Skip to content

Commit 949b4c2

Browse files
committed
Add tokenCache configuration option (fixes #119)
1 parent 3911370 commit 949b4c2

File tree

7 files changed

+25
-5
lines changed

7 files changed

+25
-5
lines changed

packages/agent/src/core/tokens.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,7 @@ export class TokenUsage {
7373
export class TokenTracker {
7474
public tokenUsage = new TokenUsage();
7575
public children: TokenTracker[] = [];
76+
public tokenCache?: boolean;
7677

7778
constructor(
7879
public readonly name: string = 'unnamed',

packages/agent/src/core/toolAgent/toolAgentCore.ts

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -57,11 +57,14 @@ export const toolAgent = async (
5757
});
5858
});
5959

60-
// Apply cache control to messages for token caching
61-
const messagesWithCacheControl = [
62-
createCacheControlMessageFromSystemPrompt(systemPrompt),
63-
...addCacheControlToMessages(messages),
64-
];
60+
// Apply cache control to messages for token caching if enabled
61+
const messagesWithCacheControl =
62+
tokenTracker.tokenCache !== false && context.tokenCache !== false
63+
? [
64+
createCacheControlMessageFromSystemPrompt(systemPrompt),
65+
...addCacheControlToMessages(messages),
66+
]
67+
: [{ role: 'system', content: systemPrompt }, ...messages];
6568

6669
const generateTextProps = {
6770
model: config.model,

packages/agent/src/core/types.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ export type ToolContext = {
1818
tokenTracker: TokenTracker;
1919
githubMode: boolean;
2020
customPrompt?: string;
21+
tokenCache?: boolean;
2122
};
2223

2324
export type Tool<TParams = Record<string, any>, TReturn = any> = {

packages/cli/README.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,7 @@ mycoder --modelProvider openai --modelName gpt-4o-2024-05-13 "Your prompt here"
125125
- `pageFilter`: Method to process webpage content: 'simple', 'none', or 'readability' (default: `none`)
126126
- `ollamaBaseUrl`: Base URL for Ollama API (default: `http://localhost:11434/api`)
127127
- `customPrompt`: Custom instructions to append to the system prompt for both main agent and sub-agents (default: `""`)
128+
- `tokenCache`: Enable token caching for LLM API calls (default: `true`)
128129

129130
Example:
130131

@@ -143,6 +144,9 @@ mycoder config set ollamaBaseUrl http://your-ollama-server:11434/api
143144

144145
# Set custom instructions for the agent
145146
mycoder config set customPrompt "Always prioritize readability and simplicity in your code. Prefer TypeScript over JavaScript when possible."
147+
148+
# Disable token caching for LLM API calls
149+
mycoder config set tokenCache false
146150
```
147151

148152
## Environment Variables

packages/cli/src/commands/$default.ts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,9 @@ export const command: CommandModule<SharedOptions, DefaultArgs> = {
8888
undefined,
8989
argv.tokenUsage ? LogLevel.info : LogLevel.debug,
9090
);
91+
// Use command line option if provided, otherwise use config value
92+
tokenTracker.tokenCache =
93+
argv.tokenCache !== undefined ? argv.tokenCache : userConfig.tokenCache;
9194

9295
try {
9396
// Get configuration for model provider and name
@@ -177,6 +180,8 @@ export const command: CommandModule<SharedOptions, DefaultArgs> = {
177180
tokenTracker,
178181
githubMode: config.githubMode,
179182
customPrompt: config.customPrompt,
183+
tokenCache:
184+
argv.tokenCache !== undefined ? argv.tokenCache : config.tokenCache,
180185
});
181186

182187
const output =

packages/cli/src/options.ts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ export type SharedOptions = {
1010
readonly modelProvider?: string;
1111
readonly modelName?: string;
1212
readonly profile?: boolean;
13+
readonly tokenCache?: boolean;
1314
};
1415

1516
export const sharedOptions = {
@@ -72,4 +73,8 @@ export const sharedOptions = {
7273
description: 'Custom Sentry DSN for error tracking',
7374
hidden: true,
7475
} as const,
76+
tokenCache: {
77+
type: 'boolean',
78+
description: 'Enable token caching for LLM API calls',
79+
} as const,
7580
};

packages/cli/src/settings/config.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ const defaultConfig = {
1717
ollamaBaseUrl: 'http://localhost:11434/api',
1818
customPrompt: '',
1919
profile: false,
20+
tokenCache: true,
2021
};
2122

2223
export type Config = typeof defaultConfig;

0 commit comments

Comments
 (0)