Skip to content

Commit ef81c9b

Browse files
committed
fix: reasoning support for Anthropic via OpenAI provider
1 parent ccff52b commit ef81c9b

File tree

6 files changed

+91
-3
lines changed

6 files changed

+91
-3
lines changed

package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@
5151
"caching": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/caching.ts --name 'Jo' --location 'New York, NY'",
5252
"thinking": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/thinking.ts --name 'Jo' --location 'New York, NY'",
5353
"memory": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/memory.ts --provider 'openAI' --name 'Jo' --location 'New York, NY'",
54-
"tool-test": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/tools.ts --provider 'bedrock' --name 'Jo' --location 'New York, NY'",
54+
"tool-test": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/tools.ts --provider 'anthropicLITELLM' --name 'Jo' --location 'New York, NY'",
5555
"search": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/search.ts --provider 'anthropic' --name 'Jo' --location 'New York, NY'",
5656
"ant_web_search": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/ant_web_search.ts --name 'Jo' --location 'New York, NY'",
5757
"abort": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/abort.ts --provider 'openAI' --name 'Jo' --location 'New York, NY'",

src/llm/openai/index.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -345,6 +345,10 @@ export class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptions> {
345345
} else if ('reasoning' in delta) {
346346
chunk.additional_kwargs.reasoning_content = delta.reasoning;
347347
}
348+
if ('provider_specific_fields' in delta) {
349+
chunk.additional_kwargs.provider_specific_fields =
350+
delta.provider_specific_fields;
351+
}
348352
defaultRole = delta.role ?? defaultRole;
349353
const newTokenIndices = {
350354
prompt: options.promptIndex ?? 0,

src/llm/openai/utils/index.ts

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -298,10 +298,16 @@ export function _convertMessagesToOpenAIParams(
298298
role = 'developer';
299299
}
300300

301+
let hasAnthropicThinkingBlock: boolean = false;
302+
301303
const content =
302304
typeof message.content === 'string'
303305
? message.content
304306
: message.content.map((m) => {
307+
if ('type' in m && m.type === 'thinking') {
308+
hasAnthropicThinkingBlock = true;
309+
return m;
310+
}
305311
if (isDataContentBlock(m)) {
306312
return convertToProviderContentBlock(
307313
m,
@@ -326,7 +332,7 @@ export function _convertMessagesToOpenAIParams(
326332
completionParam.tool_calls = message.tool_calls.map(
327333
convertLangChainToolCallToOpenAI
328334
);
329-
completionParam.content = '';
335+
completionParam.content = hasAnthropicThinkingBlock ? content : '';
330336
} else {
331337
if (message.additional_kwargs.tool_calls != null) {
332338
completionParam.tool_calls = message.additional_kwargs.tool_calls;

src/messages/prune.ts

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -389,6 +389,14 @@ export function checkValidNumber(value: unknown): value is number {
389389
return typeof value === 'number' && !isNaN(value) && value > 0;
390390
}
391391

392+
type ThinkingBlocks = {
393+
thinking_blocks?: Array<{
394+
type: 'thinking';
395+
thinking: string;
396+
signature: string;
397+
}>;
398+
};
399+
392400
export function createPruneMessages(factoryParams: PruneMessagesFactoryParams) {
393401
const indexTokenCountMap = { ...factoryParams.indexTokenCountMap };
394402
let lastTurnStartIndex = factoryParams.startIndex;
@@ -402,6 +410,49 @@ export function createPruneMessages(factoryParams: PruneMessagesFactoryParams) {
402410
context: BaseMessage[];
403411
indexTokenCountMap: Record<string, number | undefined>;
404412
} {
413+
if (
414+
factoryParams.provider === Providers.OPENAI &&
415+
factoryParams.thinkingEnabled === true
416+
) {
417+
for (let i = lastTurnStartIndex; i < params.messages.length; i++) {
418+
const m = params.messages[i];
419+
if (
420+
m.getType() === 'ai' &&
421+
typeof m.additional_kwargs.reasoning_content === 'string' &&
422+
Array.isArray(
423+
(
424+
m.additional_kwargs.provider_specific_fields as
425+
| ThinkingBlocks
426+
| undefined
427+
)?.thinking_blocks
428+
) &&
429+
(m as AIMessage).tool_calls &&
430+
((m as AIMessage).tool_calls?.length ?? 0) > 0
431+
) {
432+
const message = m as AIMessage;
433+
const thinkingBlocks = (
434+
message.additional_kwargs.provider_specific_fields as ThinkingBlocks
435+
).thinking_blocks;
436+
const signature =
437+
thinkingBlocks?.[thinkingBlocks.length - 1].signature;
438+
const thinkingBlock: ThinkingContentText = {
439+
signature,
440+
type: ContentTypes.THINKING,
441+
thinking: message.additional_kwargs.reasoning_content as string,
442+
};
443+
444+
params.messages[i] = new AIMessage({
445+
...message,
446+
content: [thinkingBlock],
447+
additional_kwargs: {
448+
...message.additional_kwargs,
449+
reasoning_content: undefined,
450+
},
451+
});
452+
}
453+
}
454+
}
455+
405456
let currentUsage: UsageMetadata | undefined;
406457
if (
407458
params.usageMetadata &&

src/scripts/tools.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,10 @@ async function testStandardStreaming(): Promise<void> {
127127
const inputs = {
128128
messages: conversationHistory,
129129
};
130-
const finalContentParts = await run.processStream(inputs, config);
130+
const finalContentParts = await run.processStream(inputs, config, {
131+
indexTokenCountMap: { 0: 35 },
132+
maxContextTokens: 89000,
133+
});
131134
const finalMessages = run.getRunMessages();
132135
if (finalMessages) {
133136
conversationHistory.push(...finalMessages);

src/utils/llmConfig.ts

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,30 @@ export const llmConfigs: Record<string, t.LLMConfig | undefined> = {
1212
streamUsage: true,
1313
// disableStreaming: true,
1414
},
15+
anthropicLITELLM: {
16+
provider: Providers.OPENAI,
17+
streaming: true,
18+
streamUsage: false,
19+
apiKey: 'sk-1234',
20+
model: 'claude-sonnet-4',
21+
maxTokens: 8192,
22+
modelKwargs: {
23+
metadata: {
24+
user_id: 'some_user_id',
25+
},
26+
thinking: {
27+
type: 'enabled',
28+
budget_tokens: 2000,
29+
},
30+
},
31+
configuration: {
32+
baseURL: 'http://host.docker.internal:4000/v1',
33+
defaultHeaders: {
34+
'anthropic-beta': 'prompt-caching-2024-07-31,context-1m-2025-08-07',
35+
},
36+
},
37+
// disableStreaming: true,
38+
},
1539
[Providers.XAI]: {
1640
provider: Providers.XAI,
1741
model: 'grok-2-latest',

0 commit comments

Comments
 (0)