@@ -13,7 +13,11 @@ import type { Toast } from "../ChatInputToast";
1313import { ChatInputToast } from "../ChatInputToast" ;
1414import { createCommandToast , createErrorToast } from "../ChatInputToasts" ;
1515import { parseCommand } from "@/browser/utils/slashCommands/parser" ;
16- import { usePersistedState , updatePersistedState } from "@/browser/hooks/usePersistedState" ;
16+ import {
17+ readPersistedState ,
18+ usePersistedState ,
19+ updatePersistedState ,
20+ } from "@/browser/hooks/usePersistedState" ;
1721import { useSettings } from "@/browser/contexts/SettingsContext" ;
1822import { useWorkspaceContext } from "@/browser/contexts/WorkspaceContext" ;
1923import { useMode } from "@/browser/contexts/ModeContext" ;
@@ -26,9 +30,11 @@ import { enforceThinkingPolicy } from "@/common/utils/thinking/policy";
2630import { useSendMessageOptions } from "@/browser/hooks/useSendMessageOptions" ;
2731import {
2832 getModelKey ,
33+ getThinkingLevelKey ,
2934 getWorkspaceAISettingsByModeKey ,
3035 getInputKey ,
3136 getInputImagesKey ,
37+ MODE_AI_DEFAULTS_KEY ,
3238 VIM_ENABLED_KEY ,
3339 getProjectScopeId ,
3440 getPendingScopeId ,
@@ -74,7 +80,8 @@ import {
7480 processImageFiles ,
7581} from "@/browser/utils/imageHandling" ;
7682
77- import type { ThinkingLevel } from "@/common/types/thinking" ;
83+ import type { ModeAiDefaults } from "@/common/types/modeAiDefaults" ;
84+ import { coerceThinkingLevel , type ThinkingLevel } from "@/common/types/thinking" ;
7885import type { MuxFrontendMetadata } from "@/common/types/message" ;
7986import { prepareUserMessageForSend } from "@/common/types/message" ;
8087import { MODEL_ABBREVIATION_EXAMPLES } from "@/common/constants/knownModels" ;
@@ -277,6 +284,14 @@ const ChatInputInner: React.FC<ChatInputProps> = (props) => {
277284 defaultModel,
278285 setDefaultModel,
279286 } = useModelsFromSettings ( ) ;
287+
288+ const [ modeAiDefaults ] = usePersistedState < ModeAiDefaults > (
289+ MODE_AI_DEFAULTS_KEY ,
290+ { } ,
291+ {
292+ listener : true ,
293+ }
294+ ) ;
280295 const commandListId = useId ( ) ;
281296 const telemetry = useTelemetry ( ) ;
282297 const [ vimEnabled , setVimEnabled ] = usePersistedState < boolean > ( VIM_ENABLED_KEY , false , {
@@ -452,23 +467,41 @@ const ChatInputInner: React.FC<ChatInputProps> = (props) => {
452467 const hasReviews = attachedReviews . length > 0 ;
453468 const canSend = ( hasTypedText || hasImages || hasReviews ) && ! disabled && ! isSendInFlight ;
454469
455- // When entering creation mode, initialize the project-scoped model to the
456- // default so previous manual picks don't bleed into new creation flows.
457- // Only runs once per creation session (not when defaultModel changes, which
458- // would clobber the user's intentional model selection).
459- const creationModelInitialized = useRef < string | null > ( null ) ;
470+ const creationProjectPath = variant === "creation" ? props . projectPath : "" ;
471+
472+ // Creation variant: keep the project-scoped model/thinking in sync with global per-mode defaults
473+ // so switching Plan/Exec uses the configured defaults (and respects "inherit" semantics).
460474 useEffect ( ( ) => {
461- if ( variant === "creation" && defaultModel ) {
462- // Only initialize once per project scope
463- if ( creationModelInitialized . current !== storageKeys . modelKey ) {
464- creationModelInitialized . current = storageKeys . modelKey ;
465- updatePersistedState ( storageKeys . modelKey , defaultModel ) ;
466- }
467- } else if ( variant !== "creation" ) {
468- // Reset when leaving creation mode so re-entering triggers initialization
469- creationModelInitialized . current = null ;
475+ if ( variant !== "creation" ) {
476+ return ;
477+ }
478+
479+ const scopeId = getProjectScopeId ( creationProjectPath ) ;
480+ const modelKey = getModelKey ( scopeId ) ;
481+ const thinkingKey = getThinkingLevelKey ( scopeId ) ;
482+
483+ const fallbackModel = defaultModel ;
484+
485+ const existingModel = readPersistedState < string > ( modelKey , fallbackModel ) ;
486+ const candidateModel = modeAiDefaults [ mode ] ?. modelString ?? existingModel ;
487+ const resolvedModel =
488+ typeof candidateModel === "string" && candidateModel . trim ( ) . length > 0
489+ ? candidateModel
490+ : fallbackModel ;
491+
492+ const existingThinking = readPersistedState < ThinkingLevel > ( thinkingKey , "off" ) ;
493+ const candidateThinking = modeAiDefaults [ mode ] ?. thinkingLevel ?? existingThinking ?? "off" ;
494+ const resolvedThinking = coerceThinkingLevel ( candidateThinking ) ?? "off" ;
495+ const effectiveThinking = enforceThinkingPolicy ( resolvedModel , resolvedThinking ) ;
496+
497+ if ( existingModel !== resolvedModel ) {
498+ updatePersistedState ( modelKey , resolvedModel ) ;
499+ }
500+
501+ if ( existingThinking !== effectiveThinking ) {
502+ updatePersistedState ( thinkingKey , effectiveThinking ) ;
470503 }
471- } , [ variant , defaultModel , storageKeys . modelKey ] ) ;
504+ } , [ creationProjectPath , defaultModel , mode , modeAiDefaults , variant ] ) ;
472505
473506 // Expose ChatInput auto-focus completion for Storybook/tests.
474507 const chatInputSectionRef = useRef < HTMLDivElement | null > ( null ) ;
0 commit comments