Skip to content

Commit 71a2b37

Browse files
committed
add langchain support
1 parent 808d05a commit 71a2b37

File tree

1 file changed

+16
-0
lines changed

1 file changed

+16
-0
lines changed

langfuse/callback/langchain.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -845,6 +845,7 @@ def on_llm_end(
845845
self.runs[run_id] = self.runs[run_id].end(
846846
output=extracted_response,
847847
usage=llm_usage,
848+
usage_details=llm_usage,
848849
version=self.version,
849850
input=kwargs.get("inputs"),
850851
model=model,
@@ -1028,12 +1029,14 @@ def _parse_usage_model(usage: typing.Union[pydantic.BaseModel, dict]):
10281029
# https://pypi.org/project/langchain-anthropic/ (works also for Bedrock-Anthropic)
10291030
("input_tokens", "input"),
10301031
("output_tokens", "output"),
1032+
("total_tokens", "total"),
10311033
# https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/get-token-count
10321034
("prompt_token_count", "input"),
10331035
("candidates_token_count", "output"),
10341036
# Bedrock: https://docs.aws.amazon.com/bedrock/latest/userguide/monitoring-cw.html#runtime-cloudwatch-metrics
10351037
("inputTokenCount", "input"),
10361038
("outputTokenCount", "output"),
1039+
("totalTokenCount", "total"),
10371040
# langchain-ibm https://pypi.org/project/langchain-ibm/
10381041
("input_token_count", "input"),
10391042
("generated_token_count", "output"),
@@ -1051,6 +1054,19 @@ def _parse_usage_model(usage: typing.Union[pydantic.BaseModel, dict]):
10511054

10521055
usage_model[langfuse_key] = final_count # Translate key and keep the value
10531056

1057+
if isinstance(usage_model, dict):
1058+
if "input_token_details" in usage_model:
1059+
input_token_details = usage_model.pop("input_token_details", {})
1060+
1061+
for key, value in input_token_details.items():
1062+
usage_model[f"input_{key}"] = value
1063+
1064+
if "output_token_details" in usage_model:
1065+
output_token_details = usage_model.pop("output_token_details", {})
1066+
1067+
for key, value in output_token_details.items():
1068+
usage_model[f"output_{key}"] = value
1069+
10541070
return usage_model if usage_model else None
10551071

10561072

0 commit comments

Comments
 (0)