refactor(ui): extract chat model resolution state

This commit is contained in:
Peter Steinberger
2026-03-23 22:22:21 -07:00
parent 013385e5c2
commit 9082795b10
9 changed files with 578 additions and 185 deletions

View File

@@ -6,20 +6,18 @@ import { refreshChat } from "./app-chat.ts";
import { syncUrlWithSessionKey } from "./app-settings.ts";
import type { AppViewState } from "./app-view-state.ts";
import { OpenClawApp } from "./app.ts";
import { createChatModelOverride } from "./chat-model-ref.ts";
import {
buildChatModelOption,
createChatModelOverride,
formatChatModelDisplay,
normalizeChatModelOverrideValue,
resolveServerChatModelValue,
} from "./chat-model-ref.ts";
resolveChatModelOverrideValue,
resolveChatModelSelectState,
} from "./chat-model-select-state.ts";
import { ChatState, loadChatHistory } from "./controllers/chat.ts";
import { loadSessions } from "./controllers/sessions.ts";
import { icons } from "./icons.ts";
import { iconForTab, pathForTab, titleForTab, type Tab } from "./navigation.ts";
import type { ThemeTransitionContext } from "./theme-transition.ts";
import type { ThemeMode, ThemeName } from "./theme.ts";
import type { ModelCatalogEntry, SessionsListResult } from "./types.ts";
import type { SessionsListResult } from "./types.ts";
type SessionDefaultsSnapshot = {
mainSessionKey?: string;
@@ -521,99 +519,8 @@ async function refreshSessionOptions(state: AppViewState) {
});
}
function resolveActiveSessionRow(state: AppViewState) {
return state.sessionsResult?.sessions?.find((row) => row.key === state.sessionKey);
}
function resolveModelOverrideValue(state: AppViewState): string {
// Prefer the local cache — it reflects in-flight patches before sessionsResult refreshes.
const cached = state.chatModelOverrides[state.sessionKey];
if (cached) {
return normalizeChatModelOverrideValue(cached, state.chatModelCatalog ?? []);
}
// cached === null means explicitly cleared to default.
if (cached === null) {
return "";
}
// No local override recorded yet — fall back to server data.
// Use the bare model name and resolve provider from the catalog rather than
// trusting the session's modelProvider, which may be the session default and
// not the model's actual provider (e.g. "zai" for a "deepseek-chat" model).
const activeRow = resolveActiveSessionRow(state);
if (activeRow && typeof activeRow.model === "string" && activeRow.model.trim()) {
const rawOverride = createChatModelOverride(activeRow.model.trim());
if (rawOverride) {
const normalized = normalizeChatModelOverrideValue(rawOverride, state.chatModelCatalog ?? []);
if (normalized) {
return normalized;
}
}
// Fallback: use server-provided provider if catalog lookup fails.
return resolveServerChatModelValue(activeRow.model, activeRow.modelProvider);
}
return "";
}
function resolveDefaultModelValue(state: AppViewState): string {
const defaults = state.sessionsResult?.defaults;
const model = defaults?.model;
if (typeof model !== "string" || !model.trim()) {
return "";
}
const rawOverride = createChatModelOverride(model.trim());
if (rawOverride) {
const normalized = normalizeChatModelOverrideValue(rawOverride, state.chatModelCatalog ?? []);
if (normalized) {
return normalized;
}
}
return resolveServerChatModelValue(model, defaults?.modelProvider);
}
function buildChatModelOptions(
catalog: ModelCatalogEntry[],
currentOverride: string,
defaultModel: string,
): Array<{ value: string; label: string }> {
const seen = new Set<string>();
const options: Array<{ value: string; label: string }> = [];
const addOption = (value: string, label?: string) => {
const trimmed = value.trim();
if (!trimmed) {
return;
}
const key = trimmed.toLowerCase();
if (seen.has(key)) {
return;
}
seen.add(key);
options.push({ value: trimmed, label: label ?? trimmed });
};
for (const entry of catalog) {
const option = buildChatModelOption(entry);
addOption(option.value, option.label);
}
if (currentOverride) {
addOption(currentOverride);
}
if (defaultModel) {
addOption(defaultModel);
}
return options;
}
function renderChatModelSelect(state: AppViewState) {
const currentOverride = resolveModelOverrideValue(state);
const defaultModel = resolveDefaultModelValue(state);
const options = buildChatModelOptions(
state.chatModelCatalog ?? [],
currentOverride,
defaultModel,
);
const defaultDisplay = formatChatModelDisplay(defaultModel);
const defaultLabel = defaultModel ? `Default (${defaultDisplay})` : "Default model";
const { currentOverride, defaultLabel, options } = resolveChatModelSelectState(state);
const busy =
state.chatLoading || state.chatSending || Boolean(state.chatRunId) || state.chatStream !== null;
const disabled =
@@ -647,7 +554,7 @@ async function switchChatModel(state: AppViewState, nextModel: string) {
if (!state.client || !state.connected) {
return;
}
const currentOverride = resolveModelOverrideValue(state);
const currentOverride = resolveChatModelOverrideValue(state);
if (currentOverride === nextModel) {
return;
}

View File

@@ -4,14 +4,22 @@ import {
createChatModelOverride,
formatChatModelDisplay,
normalizeChatModelOverrideValue,
resolveChatModelOverride,
resolvePreferredServerChatModel,
resolveServerChatModelValue,
} from "./chat-model-ref.ts";
import type { ModelCatalogEntry } from "./types.ts";
import {
createAmbiguousModelCatalog,
createModelCatalog,
DEEPSEEK_CHAT_MODEL,
OPENAI_GPT5_MINI_MODEL,
} from "./chat-model.test-helpers.ts";
const catalog: ModelCatalogEntry[] = [
{ id: "gpt-5-mini", name: "GPT-5 Mini", provider: "openai" },
{ id: "claude-sonnet-4-5", name: "Claude Sonnet 4.5", provider: "anthropic" },
];
const catalog = createModelCatalog(OPENAI_GPT5_MINI_MODEL, {
id: "claude-sonnet-4-5",
name: "Claude Sonnet 4.5",
provider: "anthropic",
});
describe("chat-model-ref helpers", () => {
it("builds provider-qualified option values and labels", () => {
@@ -28,13 +36,11 @@ describe("chat-model-ref helpers", () => {
});
it("keeps ambiguous raw overrides unchanged", () => {
const ambiguousCatalog: ModelCatalogEntry[] = [
{ id: "gpt-5-mini", name: "GPT-5 Mini", provider: "openai" },
{ id: "gpt-5-mini", name: "GPT-5 Mini", provider: "openrouter" },
];
expect(
normalizeChatModelOverrideValue(createChatModelOverride("gpt-5-mini"), ambiguousCatalog),
normalizeChatModelOverrideValue(
createChatModelOverride("gpt-5-mini"),
createAmbiguousModelCatalog("gpt-5-mini", "openai", "openrouter"),
),
).toBe("gpt-5-mini");
});
@@ -47,4 +53,50 @@ describe("chat-model-ref helpers", () => {
expect(resolveServerChatModelValue("gpt-5-mini", "openai")).toBe("openai/gpt-5-mini");
expect(resolveServerChatModelValue("alias-only", null)).toBe("alias-only");
});
it("reports the override resolution source for unique catalog matches", () => {
expect(resolveChatModelOverride(createChatModelOverride("gpt-5-mini"), catalog)).toEqual({
value: "openai/gpt-5-mini",
source: "catalog",
});
});
it("reports ambiguous raw overrides without guessing a provider", () => {
expect(
resolveChatModelOverride(
createChatModelOverride("gpt-5-mini"),
createAmbiguousModelCatalog("gpt-5-mini", "openai", "openrouter"),
),
).toEqual({
value: "gpt-5-mini",
source: "raw",
reason: "ambiguous",
});
});
it("prefers the catalog provider over a stale server provider when the match is unique", () => {
expect(resolvePreferredServerChatModel("deepseek-chat", "zai", [DEEPSEEK_CHAT_MODEL])).toEqual({
value: "deepseek/deepseek-chat",
source: "catalog",
});
});
it("falls back to the server provider when the catalog misses or is ambiguous", () => {
expect(resolvePreferredServerChatModel("gpt-5-mini", "openai", [])).toEqual({
value: "openai/gpt-5-mini",
source: "server",
reason: "missing",
});
expect(
resolvePreferredServerChatModel(
"gpt-5-mini",
"openai",
createAmbiguousModelCatalog("gpt-5-mini", "openai", "openrouter"),
),
).toEqual({
value: "openai/gpt-5-mini",
source: "server",
reason: "ambiguous",
});
});
});

View File

@@ -10,6 +10,16 @@ export type ChatModelOverride =
value: string;
};
export type ChatModelResolutionSource = "empty" | "qualified" | "catalog" | "raw" | "server";
export type ChatModelResolutionReason = "empty" | "missing" | "ambiguous";
export type ChatModelResolution = {
value: string;
source: ChatModelResolutionSource;
reason?: ChatModelResolutionReason;
};
export function buildQualifiedChatModelValue(model: string, provider?: string | null): string {
const trimmedModel = model.trim();
if (!trimmedModel) {
@@ -34,15 +44,22 @@ export function normalizeChatModelOverrideValue(
override: ChatModelOverride | null | undefined,
catalog: ModelCatalogEntry[],
): string {
return resolveChatModelOverride(override, catalog).value;
}
export function resolveChatModelOverride(
override: ChatModelOverride | null | undefined,
catalog: ModelCatalogEntry[],
): ChatModelResolution {
if (!override) {
return "";
return { value: "", source: "empty", reason: "empty" };
}
const trimmed = override?.value.trim();
if (!trimmed) {
return "";
return { value: "", source: "empty", reason: "empty" };
}
if (override.kind === "qualified") {
return trimmed;
return { value: trimmed, source: "qualified" };
}
let matchedValue = "";
@@ -56,10 +73,13 @@ export function normalizeChatModelOverrideValue(
continue;
}
if (matchedValue.toLowerCase() !== candidate.toLowerCase()) {
return trimmed;
return { value: trimmed, source: "raw", reason: "ambiguous" };
}
}
return matchedValue || trimmed;
if (matchedValue) {
return { value: matchedValue, source: "catalog" };
}
return { value: trimmed, source: "raw", reason: "missing" };
}
export function resolveServerChatModelValue(
@@ -72,6 +92,42 @@ export function resolveServerChatModelValue(
return buildQualifiedChatModelValue(model, provider);
}
export function resolvePreferredServerChatModel(
model: string | null | undefined,
provider: string | null | undefined,
catalog: ModelCatalogEntry[],
): ChatModelResolution {
if (typeof model !== "string") {
return { value: "", source: "empty", reason: "empty" };
}
const trimmedModel = model.trim();
if (!trimmedModel) {
return { value: "", source: "empty", reason: "empty" };
}
const overrideResolution = resolveChatModelOverride(
createChatModelOverride(trimmedModel),
catalog,
);
if (overrideResolution.source === "qualified" || overrideResolution.source === "catalog") {
return overrideResolution;
}
return {
value: resolveServerChatModelValue(trimmedModel, provider),
source: "server",
reason: overrideResolution.reason,
};
}
export function resolvePreferredServerChatModelValue(
model: string | null | undefined,
provider: string | null | undefined,
catalog: ModelCatalogEntry[],
): string {
return resolvePreferredServerChatModel(model, provider, catalog).value;
}
export function formatChatModelDisplay(value: string): string {
const trimmed = value.trim();
if (!trimmed) {

View File

@@ -0,0 +1,58 @@
import { describe, expect, it } from "vitest";
import {
resolveChatModelOverrideValue,
resolveChatModelSelectState,
} from "./chat-model-select-state.ts";
import {
createModelCatalog,
createSessionsListResult,
DEEPSEEK_CHAT_MODEL,
DEFAULT_CHAT_MODEL_CATALOG,
} from "./chat-model.test-helpers.ts";
describe("chat-model-select-state", () => {
it("prefers the catalog provider when the active session provider is stale", () => {
const state = {
sessionKey: "main",
chatModelOverrides: {},
chatModelCatalog: createModelCatalog(DEEPSEEK_CHAT_MODEL),
sessionsResult: createSessionsListResult({
model: "deepseek-chat",
modelProvider: "zai",
}),
};
expect(resolveChatModelOverrideValue(state)).toBe("deepseek/deepseek-chat");
});
it("falls back to the server-qualified value when catalog lookup fails", () => {
const state = {
sessionKey: "main",
chatModelOverrides: {},
chatModelCatalog: [],
sessionsResult: createSessionsListResult({
model: "gpt-5-mini",
modelProvider: "openai",
}),
};
expect(resolveChatModelOverrideValue(state)).toBe("openai/gpt-5-mini");
});
it("builds picker options without introducing a bare duplicate", () => {
const state = {
sessionKey: "main",
chatModelOverrides: {},
chatModelCatalog: createModelCatalog(...DEFAULT_CHAT_MODEL_CATALOG),
sessionsResult: createSessionsListResult({
model: "gpt-5-mini",
modelProvider: "openai",
}),
};
const resolved = resolveChatModelSelectState(state);
expect(resolved.currentOverride).toBe("openai/gpt-5-mini");
expect(resolved.options.map((option) => option.value)).toContain("openai/gpt-5-mini");
expect(resolved.options.map((option) => option.value)).not.toContain("gpt-5-mini");
});
});

View File

@@ -0,0 +1,105 @@
import type { AppViewState } from "./app-view-state.ts";
import {
buildChatModelOption,
formatChatModelDisplay,
normalizeChatModelOverrideValue,
resolvePreferredServerChatModelValue,
} from "./chat-model-ref.ts";
import type { ModelCatalogEntry } from "./types.ts";
type ChatModelSelectStateInput = Pick<
AppViewState,
"sessionKey" | "chatModelOverrides" | "chatModelCatalog" | "sessionsResult"
>;
export type ChatModelSelectOption = {
value: string;
label: string;
};
export type ChatModelSelectState = {
currentOverride: string;
defaultModel: string;
defaultDisplay: string;
defaultLabel: string;
options: ChatModelSelectOption[];
};
function resolveActiveSessionRow(state: ChatModelSelectStateInput) {
return state.sessionsResult?.sessions?.find((row) => row.key === state.sessionKey);
}
export function resolveChatModelOverrideValue(state: ChatModelSelectStateInput): string {
const catalog = state.chatModelCatalog ?? [];
// Prefer the local cache — it reflects in-flight patches before sessionsResult refreshes.
const cached = state.chatModelOverrides[state.sessionKey];
if (cached) {
return normalizeChatModelOverrideValue(cached, catalog);
}
if (cached === null) {
return "";
}
const activeRow = resolveActiveSessionRow(state);
return resolvePreferredServerChatModelValue(activeRow?.model, activeRow?.modelProvider, catalog);
}
function resolveDefaultModelValue(state: ChatModelSelectStateInput): string {
return resolvePreferredServerChatModelValue(
state.sessionsResult?.defaults?.model,
state.sessionsResult?.defaults?.modelProvider,
state.chatModelCatalog ?? [],
);
}
function buildChatModelOptions(
catalog: ModelCatalogEntry[],
currentOverride: string,
defaultModel: string,
): ChatModelSelectOption[] {
const seen = new Set<string>();
const options: ChatModelSelectOption[] = [];
const addOption = (value: string, label?: string) => {
const trimmed = value.trim();
if (!trimmed) {
return;
}
const key = trimmed.toLowerCase();
if (seen.has(key)) {
return;
}
seen.add(key);
options.push({ value: trimmed, label: label ?? trimmed });
};
for (const entry of catalog) {
const option = buildChatModelOption(entry);
addOption(option.value, option.label);
}
if (currentOverride) {
addOption(currentOverride);
}
if (defaultModel) {
addOption(defaultModel);
}
return options;
}
export function resolveChatModelSelectState(
state: ChatModelSelectStateInput,
): ChatModelSelectState {
const currentOverride = resolveChatModelOverrideValue(state);
const defaultModel = resolveDefaultModelValue(state);
const defaultDisplay = formatChatModelDisplay(defaultModel);
return {
currentOverride,
defaultModel,
defaultDisplay,
defaultLabel: defaultModel ? `Default (${defaultDisplay})` : "Default model",
options: buildChatModelOptions(state.chatModelCatalog ?? [], currentOverride, defaultModel),
};
}

View File

@@ -0,0 +1,110 @@
import type {
GatewaySessionRow,
ModelCatalogEntry,
SessionsListResult,
SessionsPatchResult,
} from "./types.ts";
export const OPENAI_GPT5_MODEL: ModelCatalogEntry = {
id: "gpt-5",
name: "GPT-5",
provider: "openai",
};
export const OPENAI_GPT5_MINI_MODEL: ModelCatalogEntry = {
id: "gpt-5-mini",
name: "GPT-5 Mini",
provider: "openai",
};
export const DEEPSEEK_CHAT_MODEL: ModelCatalogEntry = {
id: "deepseek-chat",
name: "DeepSeek Chat",
provider: "deepseek",
};
export const DEFAULT_CHAT_MODEL_CATALOG = [
OPENAI_GPT5_MODEL,
OPENAI_GPT5_MINI_MODEL,
] satisfies ModelCatalogEntry[];
export function createModelCatalog(...entries: ModelCatalogEntry[]): ModelCatalogEntry[] {
return [...entries];
}
export function createAmbiguousModelCatalog(
id: string,
...providers: string[]
): ModelCatalogEntry[] {
return providers.map((provider) => ({
id,
name: id,
provider,
}));
}
export function createMainSessionRow(
overrides: Partial<GatewaySessionRow> = {},
): GatewaySessionRow {
return {
key: "main",
kind: "direct",
updatedAt: null,
...overrides,
};
}
export function createSessionsListResult(
params: {
model?: string | null;
modelProvider?: string | null;
defaultsModel?: string | null;
defaultsProvider?: string | null;
omitSessionFromList?: boolean;
} = {},
): SessionsListResult {
const {
model = null,
modelProvider = model ? "openai" : null,
defaultsModel = "gpt-5",
defaultsProvider = defaultsModel ? "openai" : null,
omitSessionFromList = false,
} = params;
return {
ts: 0,
path: "",
count: omitSessionFromList ? 0 : 1,
defaults: {
modelProvider: defaultsProvider,
model: defaultsModel,
contextTokens: null,
},
sessions: omitSessionFromList
? []
: [
createMainSessionRow({
...(modelProvider ? { modelProvider } : {}),
...(model ? { model } : {}),
}),
],
};
}
export function createResolvedModelPatch(
model: string,
modelProvider?: string | null,
): SessionsPatchResult {
return {
ok: true,
path: "",
key: "main",
entry: {
sessionId: "main",
},
resolved: {
model,
modelProvider: modelProvider ?? undefined,
},
};
}

View File

@@ -1,4 +1,10 @@
import { describe, expect, it, vi } from "vitest";
import {
createResolvedModelPatch,
createModelCatalog,
DEEPSEEK_CHAT_MODEL,
OPENAI_GPT5_MINI_MODEL,
} from "../chat-model.test-helpers.ts";
import type { GatewayBrowserClient } from "../gateway.ts";
import type { GatewaySessionRow } from "../types.ts";
import { executeSlashCommand } from "./slash-command-executor.ts";
@@ -268,14 +274,10 @@ describe("executeSlashCommand directives", () => {
it("mirrors resolved provider-qualified model refs after /model changes", async () => {
const request = vi.fn(async (method: string, _payload?: unknown) => {
if (method === "sessions.patch") {
return {
ok: true,
key: "main",
resolved: {
modelProvider: "openai",
model: "gpt-5-mini",
},
};
return createResolvedModelPatch("gpt-5-mini", "openai");
}
if (method === "models.list") {
return { models: createModelCatalog(OPENAI_GPT5_MINI_MODEL) };
}
throw new Error(`unexpected method: ${method}`);
});
@@ -330,6 +332,77 @@ describe("executeSlashCommand directives", () => {
});
});
it("corrects stale patched providers with the catalog after /model", async () => {
const request = vi.fn(async (method: string, _payload?: unknown) => {
if (method === "sessions.patch") {
return createResolvedModelPatch("deepseek-chat", "zai");
}
if (method === "models.list") {
return { models: createModelCatalog(DEEPSEEK_CHAT_MODEL) };
}
throw new Error(`unexpected method: ${method}`);
});
const result = await executeSlashCommand(
{ request } as unknown as GatewayBrowserClient,
"main",
"model",
"deepseek-chat",
);
expect(result.sessionPatch?.modelOverride).toEqual({
kind: "qualified",
value: "deepseek/deepseek-chat",
});
});
it("falls back to the patched server provider when catalog lookup fails", async () => {
const request = vi.fn(async (method: string, _payload?: unknown) => {
if (method === "sessions.patch") {
return createResolvedModelPatch("gpt-5-mini", "openai");
}
if (method === "models.list") {
throw new Error("models unavailable");
}
throw new Error(`unexpected method: ${method}`);
});
const result = await executeSlashCommand(
{ request } as unknown as GatewayBrowserClient,
"main",
"model",
"gpt-5-mini",
);
expect(result.sessionPatch?.modelOverride).toEqual({
kind: "qualified",
value: "openai/gpt-5-mini",
});
});
it("reuses a provided model catalog for /model updates without refetching", async () => {
const request = vi.fn(async (method: string, _payload?: unknown) => {
if (method === "sessions.patch") {
return createResolvedModelPatch("gpt-5-mini", "openai");
}
throw new Error(`unexpected method: ${method}`);
});
const result = await executeSlashCommand(
{ request } as unknown as GatewayBrowserClient,
"main",
"model",
"gpt-5-mini",
{ modelCatalog: createModelCatalog(OPENAI_GPT5_MINI_MODEL) },
);
expect(result.sessionPatch?.modelOverride).toEqual({
kind: "qualified",
value: "openai/gpt-5-mini",
});
expect(request).toHaveBeenCalledTimes(1);
expect(request).not.toHaveBeenCalledWith("models.list", {});
});
it("resolves the legacy main alias for /usage", async () => {
const request = vi.fn(async (method: string, _payload?: unknown) => {
if (method === "sessions.list") {

View File

@@ -15,11 +15,7 @@ import {
isSubagentSessionKey,
parseAgentSessionKey,
} from "../../../../src/routing/session-key.js";
import {
createChatModelOverride,
normalizeChatModelOverrideValue,
resolveServerChatModelValue,
} from "../chat-model-ref.ts";
import { createChatModelOverride, resolvePreferredServerChatModel } from "../chat-model-ref.ts";
import type { GatewayBrowserClient } from "../gateway.ts";
import type {
AgentsListResult,
@@ -52,8 +48,8 @@ export type SlashCommandResult = {
export type SlashCommandContext = {
chatModelCatalog?: ModelCatalogEntry[];
modelCatalog?: ModelCatalogEntry[];
};
export async function executeSlashCommand(
client: GatewayBrowserClient,
sessionKey: string,
@@ -136,15 +132,16 @@ async function executeModel(
args: string,
context: SlashCommandContext,
): Promise<SlashCommandResult> {
const modelCatalog = context.chatModelCatalog ?? context.modelCatalog;
if (!args) {
try {
const [sessions, models] = await Promise.all([
client.request<SessionsListResult>("sessions.list", {}),
client.request<{ models: ModelCatalogEntry[] }>("models.list", {}),
modelCatalog ? Promise.resolve(modelCatalog) : loadModelCatalog(client),
]);
const session = resolveCurrentSession(sessions, sessionKey);
const model = session?.model || sessions?.defaults?.model || "default";
const available = models?.models?.map((m: ModelCatalogEntry) => m.id) ?? [];
const available = models.map((m: ModelCatalogEntry) => m.id);
const lines = [`**Current model:** \`${model}\``];
if (available.length > 0) {
lines.push(
@@ -161,17 +158,20 @@ async function executeModel(
}
try {
const patched = await client.request<SessionsPatchResult>("sessions.patch", {
key: sessionKey,
model: args.trim(),
});
const patchedModel = patched.resolved?.model ?? args.trim();
const rawOverride = createChatModelOverride(patchedModel.trim());
const chatModelCatalog = context.chatModelCatalog ?? [];
const resolvedValue = rawOverride
? normalizeChatModelOverrideValue(rawOverride, chatModelCatalog) ||
resolveServerChatModelValue(patchedModel, patched.resolved?.modelProvider)
: resolveServerChatModelValue(patchedModel, patched.resolved?.modelProvider);
const [patched, resolvedModelCatalog] = await Promise.all([
client.request<SessionsPatchResult>("sessions.patch", {
key: sessionKey,
model: args.trim(),
}),
modelCatalog
? Promise.resolve(modelCatalog)
: loadModelCatalog(client, { allowFailure: true }),
]);
const resolvedValue = resolvePreferredServerChatModel(
patched.resolved?.model ?? args.trim(),
patched.resolved?.modelProvider,
resolvedModelCatalog,
).value;
return {
content: `Model set to \`${args.trim()}\`.`,
action: "refresh",
@@ -559,14 +559,29 @@ function resolveCurrentSession(
async function loadThinkingCommandState(client: GatewayBrowserClient, sessionKey: string) {
const [sessions, models] = await Promise.all([
client.request<SessionsListResult>("sessions.list", {}),
client.request<{ models: ModelCatalogEntry[] }>("models.list", {}),
loadModelCatalog(client),
]);
return {
session: resolveCurrentSession(sessions, sessionKey),
models: models?.models ?? [],
models,
};
}
async function loadModelCatalog(
client: GatewayBrowserClient,
opts?: { allowFailure?: boolean },
): Promise<ModelCatalogEntry[]> {
try {
const result = await client.request<{ models: ModelCatalogEntry[] }>("models.list", {});
return result?.models ?? [];
} catch (err) {
if (opts?.allowFailure) {
return [];
}
throw err;
}
}
function resolveCurrentThinkingLevel(
session: GatewaySessionRow | undefined,
models: ModelCatalogEntry[],

View File

@@ -6,6 +6,12 @@ import { i18n } from "../../i18n/index.ts";
import { getSafeLocalStorage } from "../../local-storage.ts";
import { renderChatSessionSelect } from "../app-render.helpers.ts";
import type { AppViewState } from "../app-view-state.ts";
import {
createModelCatalog,
createSessionsListResult,
DEEPSEEK_CHAT_MODEL,
DEFAULT_CHAT_MODEL_CATALOG,
} from "../chat-model.test-helpers.ts";
import type { GatewayBrowserClient } from "../gateway.ts";
import type { ModelCatalogEntry } from "../types.ts";
import type { SessionsListResult } from "../types.ts";
@@ -25,17 +31,15 @@ function createSessions(): SessionsListResult {
function createChatHeaderState(
overrides: {
model?: string | null;
modelProvider?: string | null;
models?: ModelCatalogEntry[];
omitSessionFromList?: boolean;
} = {},
): { state: AppViewState; request: ReturnType<typeof vi.fn> } {
let currentModel = overrides.model ?? null;
let currentModelProvider = currentModel ? "openai" : null;
let currentModelProvider = overrides.modelProvider ?? (currentModel ? "openai" : null);
const omitSessionFromList = overrides.omitSessionFromList ?? false;
const catalog = overrides.models ?? [
{ id: "gpt-5", name: "GPT-5", provider: "openai" },
{ id: "gpt-5-mini", name: "GPT-5 Mini", provider: "openai" },
];
const catalog = overrides.models ?? createModelCatalog(...DEFAULT_CHAT_MODEL_CATALOG);
const request = vi.fn(async (method: string, params: Record<string, unknown>) => {
if (method === "sessions.patch") {
const nextModel = (params.model as string | null | undefined) ?? null;
@@ -64,23 +68,11 @@ function createChatHeaderState(
return { messages: [], thinkingLevel: null };
}
if (method === "sessions.list") {
return {
ts: 0,
path: "",
count: omitSessionFromList ? 0 : 1,
defaults: { modelProvider: "openai", model: "gpt-5", contextTokens: null },
sessions: omitSessionFromList
? []
: [
{
key: "main",
kind: "direct",
updatedAt: null,
modelProvider: currentModelProvider,
model: currentModel,
},
],
};
return createSessionsListResult({
model: currentModel,
modelProvider: currentModelProvider,
omitSessionFromList,
});
}
if (method === "models.list") {
return { models: catalog };
@@ -91,23 +83,11 @@ function createChatHeaderState(
sessionKey: "main",
connected: true,
sessionsHideCron: true,
sessionsResult: {
ts: 0,
path: "",
count: omitSessionFromList ? 0 : 1,
defaults: { modelProvider: "openai", model: "gpt-5", contextTokens: null },
sessions: omitSessionFromList
? []
: [
{
key: "main",
kind: "direct",
updatedAt: null,
modelProvider: currentModelProvider,
model: currentModel,
},
],
},
sessionsResult: createSessionsListResult({
model: currentModel,
modelProvider: currentModelProvider,
omitSessionFromList,
}),
chatModelOverrides: {},
chatModelCatalog: catalog,
chatModelsLoading: false,
@@ -850,7 +830,7 @@ describe("chat view", () => {
key: "main",
model: null,
});
expect(state.sessionsResult?.sessions[0]?.model).toBeNull();
expect(state.sessionsResult?.sessions[0]?.model).toBeUndefined();
vi.unstubAllGlobals();
});
@@ -916,6 +896,43 @@ describe("chat view", () => {
expect(optionValues).not.toContain("gpt-5-mini");
});
it("prefers the catalog provider when the active session reports a stale provider", () => {
const { state } = createChatHeaderState({
model: "deepseek-chat",
modelProvider: "zai",
models: createModelCatalog(DEEPSEEK_CHAT_MODEL),
});
const container = document.createElement("div");
render(renderChatSessionSelect(state), container);
const modelSelect = container.querySelector<HTMLSelectElement>(
'select[data-chat-model-select="true"]',
);
expect(modelSelect?.value).toBe("deepseek/deepseek-chat");
});
it("falls back to the server-qualified session model when catalog lookup fails", () => {
const { state } = createChatHeaderState({
model: "gpt-5-mini",
models: [],
});
const container = document.createElement("div");
render(renderChatSessionSelect(state), container);
const modelSelect = container.querySelector<HTMLSelectElement>(
'select[data-chat-model-select="true"]',
);
expect(modelSelect?.value).toBe("openai/gpt-5-mini");
const optionValues = Array.from(modelSelect?.querySelectorAll("option") ?? []).map(
(option) => option.value,
);
expect(optionValues).toContain("openai/gpt-5-mini");
expect(optionValues).not.toContain("gpt-5-mini");
});
it("prefers the session label over displayName in the grouped chat session selector", () => {
const { state } = createChatHeaderState({ omitSessionFromList: true });
state.sessionKey = "agent:main:subagent:4f2146de-887b-4176-9abe-91140082959b";