Skip to content

Commit c157e10

Browse files
authored
Merge pull request #121 from drivecore/feature/add-token-cache-option
Add tokenCache configuration option
2 parents 6201f39 + 689c58b commit c157e10

File tree

9 files changed

+40
-5
lines changed

9 files changed

+40
-5
lines changed

.changeset/itchy-hounds-cross.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
---
2+
'mycoder-agent': patch
3+
'mycoder': patch
4+
---
5+
6+
Add ability to enable/disable token caching via config values

packages/agent/src/core/tokens.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,7 @@ export class TokenUsage {
7373
export class TokenTracker {
7474
public tokenUsage = new TokenUsage();
7575
public children: TokenTracker[] = [];
76+
public tokenCache?: boolean;
7677

7778
constructor(
7879
public readonly name: string = 'unnamed',

packages/agent/src/core/toolAgent/toolAgentCore.ts

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -57,11 +57,20 @@ export const toolAgent = async (
5757
});
5858
});
5959

60-
// Apply cache control to messages for token caching
61-
const messagesWithCacheControl = [
62-
createCacheControlMessageFromSystemPrompt(systemPrompt),
63-
...addCacheControlToMessages(messages),
64-
];
60+
// Apply cache control to messages for token caching if enabled
61+
const messagesWithCacheControl =
62+
tokenTracker.tokenCache !== false && context.tokenCache !== false
63+
? [
64+
createCacheControlMessageFromSystemPrompt(systemPrompt),
65+
...addCacheControlToMessages(messages),
66+
]
67+
: [
68+
{
69+
role: 'system',
70+
content: systemPrompt,
71+
} as CoreMessage,
72+
...messages,
73+
];
6574

6675
const generateTextProps = {
6776
model: config.model,

packages/agent/src/core/types.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ export type ToolContext = {
1818
tokenTracker: TokenTracker;
1919
githubMode: boolean;
2020
customPrompt?: string;
21+
tokenCache?: boolean;
2122
};
2223

2324
export type Tool<TParams = Record<string, any>, TReturn = any> = {

packages/cli/README.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,7 @@ mycoder --modelProvider openai --modelName gpt-4o-2024-05-13 "Your prompt here"
125125
- `pageFilter`: Method to process webpage content: 'simple', 'none', or 'readability' (default: `none`)
126126
- `ollamaBaseUrl`: Base URL for Ollama API (default: `http://localhost:11434/api`)
127127
- `customPrompt`: Custom instructions to append to the system prompt for both main agent and sub-agents (default: `""`)
128+
- `tokenCache`: Enable token caching for LLM API calls (default: `true`)
128129

129130
Example:
130131

@@ -143,6 +144,9 @@ mycoder config set ollamaBaseUrl http://your-ollama-server:11434/api
143144

144145
# Set custom instructions for the agent
145146
mycoder config set customPrompt "Always prioritize readability and simplicity in your code. Prefer TypeScript over JavaScript when possible."
147+
148+
# Disable token caching for LLM API calls
149+
mycoder config set tokenCache false
146150
```
147151

148152
## Environment Variables

packages/cli/src/commands/$default.ts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,10 @@ export const command: CommandModule<SharedOptions, DefaultArgs> = {
9292
try {
9393
// Get configuration for model provider and name
9494
const userConfig = getConfig();
95+
// Use command line option if provided, otherwise use config value
96+
tokenTracker.tokenCache =
97+
argv.tokenCache !== undefined ? argv.tokenCache : userConfig.tokenCache;
98+
9599
const userModelProvider = argv.modelProvider || userConfig.modelProvider;
96100
const userModelName = argv.modelName || userConfig.modelName;
97101
const userMaxTokens = argv.maxTokens || userConfig.maxTokens;
@@ -181,6 +185,8 @@ export const command: CommandModule<SharedOptions, DefaultArgs> = {
181185
tokenTracker,
182186
githubMode: config.githubMode,
183187
customPrompt: config.customPrompt,
188+
tokenCache:
189+
argv.tokenCache !== undefined ? argv.tokenCache : config.tokenCache,
184190
});
185191

186192
const output =

packages/cli/src/options.ts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ export type SharedOptions = {
1212
readonly maxTokens?: number;
1313
readonly temperature?: number;
1414
readonly profile?: boolean;
15+
readonly tokenCache?: boolean;
1516
};
1617

1718
export const sharedOptions = {
@@ -82,4 +83,8 @@ export const sharedOptions = {
8283
description: 'Custom Sentry DSN for error tracking',
8384
hidden: true,
8485
} as const,
86+
tokenCache: {
87+
type: 'boolean',
88+
description: 'Enable token caching for LLM API calls',
89+
} as const,
8590
};

packages/cli/src/settings/config.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ const defaultConfig = {
1919
ollamaBaseUrl: 'http://localhost:11434/api',
2020
customPrompt: '',
2121
profile: false,
22+
tokenCache: true,
2223
};
2324

2425
export type Config = typeof defaultConfig;

packages/cli/tests/settings/config.test.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ describe('Config', () => {
4848
ollamaBaseUrl: 'http://localhost:11434/api',
4949
profile: false,
5050
customPrompt: '',
51+
tokenCache: true,
5152
});
5253
expect(fs.existsSync).toHaveBeenCalledWith(mockConfigFile);
5354
});
@@ -84,6 +85,7 @@ describe('Config', () => {
8485
ollamaBaseUrl: 'http://localhost:11434/api',
8586
profile: false,
8687
customPrompt: '',
88+
tokenCache: true,
8789
});
8890
});
8991
});

0 commit comments

Comments
 (0)