@@ -13,7 +13,11 @@ import type { Toast } from "../ChatInputToast";
1313import { ChatInputToast } from "../ChatInputToast" ;
1414import { createCommandToast , createErrorToast } from "../ChatInputToasts" ;
1515import { parseCommand } from "@/browser/utils/slashCommands/parser" ;
16- import { usePersistedState , updatePersistedState } from "@/browser/hooks/usePersistedState" ;
16+ import {
17+ readPersistedState ,
18+ usePersistedState ,
19+ updatePersistedState ,
20+ } from "@/browser/hooks/usePersistedState" ;
1721import { useSettings } from "@/browser/contexts/SettingsContext" ;
1822import { useWorkspaceContext } from "@/browser/contexts/WorkspaceContext" ;
1923import { useMode } from "@/browser/contexts/ModeContext" ;
@@ -26,9 +30,11 @@ import { enforceThinkingPolicy } from "@/common/utils/thinking/policy";
2630import { useSendMessageOptions } from "@/browser/hooks/useSendMessageOptions" ;
2731import {
2832 getModelKey ,
33+ getThinkingLevelKey ,
2934 getWorkspaceAISettingsByModeKey ,
3035 getInputKey ,
3136 getInputImagesKey ,
37+ MODE_AI_DEFAULTS_KEY ,
3238 VIM_ENABLED_KEY ,
3339 getProjectScopeId ,
3440 getPendingScopeId ,
@@ -74,7 +80,8 @@ import {
7480 processImageFiles ,
7581} from "@/browser/utils/imageHandling" ;
7682
77- import type { ThinkingLevel } from "@/common/types/thinking" ;
83+ import type { ModeAiDefaults } from "@/common/types/modeAiDefaults" ;
84+ import { coerceThinkingLevel , type ThinkingLevel } from "@/common/types/thinking" ;
7885import type { MuxFrontendMetadata } from "@/common/types/message" ;
7986import { prepareUserMessageForSend } from "@/common/types/message" ;
8087import { MODEL_ABBREVIATION_EXAMPLES } from "@/common/constants/knownModels" ;
@@ -270,6 +277,14 @@ const ChatInputInner: React.FC<ChatInputProps> = (props) => {
270277 defaultModel,
271278 setDefaultModel,
272279 } = useModelsFromSettings ( ) ;
280+
281+ const [ modeAiDefaults ] = usePersistedState < ModeAiDefaults > (
282+ MODE_AI_DEFAULTS_KEY ,
283+ { } ,
284+ {
285+ listener : true ,
286+ }
287+ ) ;
273288 const commandListId = useId ( ) ;
274289 const telemetry = useTelemetry ( ) ;
275290 const [ vimEnabled , setVimEnabled ] = usePersistedState < boolean > ( VIM_ENABLED_KEY , false , {
@@ -445,23 +460,41 @@ const ChatInputInner: React.FC<ChatInputProps> = (props) => {
445460 const hasReviews = attachedReviews . length > 0 ;
446461 const canSend = ( hasTypedText || hasImages || hasReviews ) && ! disabled && ! isSendInFlight ;
447462
448- // When entering creation mode, initialize the project-scoped model to the
449- // default so previous manual picks don't bleed into new creation flows.
450- // Only runs once per creation session (not when defaultModel changes, which
451- // would clobber the user's intentional model selection).
452- const creationModelInitialized = useRef < string | null > ( null ) ;
463+ const creationProjectPath = variant === "creation" ? props . projectPath : "" ;
464+
465+ // Creation variant: keep the project-scoped model/thinking in sync with global per-mode defaults
466+ // so switching Plan/Exec uses the configured defaults (and respects "inherit" semantics).
453467 useEffect ( ( ) => {
454- if ( variant === "creation" && defaultModel ) {
455- // Only initialize once per project scope
456- if ( creationModelInitialized . current !== storageKeys . modelKey ) {
457- creationModelInitialized . current = storageKeys . modelKey ;
458- updatePersistedState ( storageKeys . modelKey , defaultModel ) ;
459- }
460- } else if ( variant !== "creation" ) {
461- // Reset when leaving creation mode so re-entering triggers initialization
462- creationModelInitialized . current = null ;
468+ if ( variant !== "creation" ) {
469+ return ;
470+ }
471+
472+ const scopeId = getProjectScopeId ( creationProjectPath ) ;
473+ const modelKey = getModelKey ( scopeId ) ;
474+ const thinkingKey = getThinkingLevelKey ( scopeId ) ;
475+
476+ const fallbackModel = defaultModel ;
477+
478+ const existingModel = readPersistedState < string > ( modelKey , fallbackModel ) ;
479+ const candidateModel = modeAiDefaults [ mode ] ?. modelString ?? existingModel ;
480+ const resolvedModel =
481+ typeof candidateModel === "string" && candidateModel . trim ( ) . length > 0
482+ ? candidateModel
483+ : fallbackModel ;
484+
485+ const existingThinking = readPersistedState < ThinkingLevel > ( thinkingKey , "off" ) ;
486+ const candidateThinking = modeAiDefaults [ mode ] ?. thinkingLevel ?? existingThinking ?? "off" ;
487+ const resolvedThinking = coerceThinkingLevel ( candidateThinking ) ?? "off" ;
488+ const effectiveThinking = enforceThinkingPolicy ( resolvedModel , resolvedThinking ) ;
489+
490+ if ( existingModel !== resolvedModel ) {
491+ updatePersistedState ( modelKey , resolvedModel ) ;
492+ }
493+
494+ if ( existingThinking !== effectiveThinking ) {
495+ updatePersistedState ( thinkingKey , effectiveThinking ) ;
463496 }
464- } , [ variant , defaultModel , storageKeys . modelKey ] ) ;
497+ } , [ creationProjectPath , defaultModel , mode , modeAiDefaults , variant ] ) ;
465498
466499 // Expose ChatInput auto-focus completion for Storybook/tests.
467500 const chatInputSectionRef = useRef < HTMLDivElement | null > ( null ) ;
0 commit comments