From 1065560d083334d846f7e0ac75eb96cd1df17984 Mon Sep 17 00:00:00 2001 From: Hassieb Pakzad <68423100+hassiebp@users.noreply.github.com> Date: Tue, 6 May 2025 14:46:48 +0200 Subject: [PATCH] fix(vertex-ai-langchain): token count parsing --- langfuse/callback/langchain.py | 48 ++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/langfuse/callback/langchain.py b/langfuse/callback/langchain.py index 674ab0b21..81b3bea25 100644 --- a/langfuse/callback/langchain.py +++ b/langfuse/callback/langchain.py @@ -1114,6 +1114,54 @@ def _parse_usage_model(usage: typing.Union[pydantic.BaseModel, dict]): if "output" in usage_model: usage_model["output"] = max(0, usage_model["output"] - value) + # Vertex AI + if "prompt_tokens_details" in usage_model and isinstance( + usage_model["prompt_tokens_details"], list + ): + prompt_tokens_details = usage_model.pop("prompt_tokens_details") + + for item in prompt_tokens_details: + if ( + isinstance(item, dict) + and "modality" in item + and "token_count" in item + ): + usage_model[f"input_modality_{item['modality']}"] = item[ + "token_count" + ] + + # Vertex AI + if "candidates_tokens_details" in usage_model and isinstance( + usage_model["candidates_tokens_details"], list + ): + candidates_tokens_details = usage_model.pop("candidates_tokens_details") + + for item in candidates_tokens_details: + if ( + isinstance(item, dict) + and "modality" in item + and "token_count" in item + ): + usage_model[f"output_modality_{item['modality']}"] = item[ + "token_count" + ] + + # Vertex AI + if "cache_tokens_details" in usage_model and isinstance( + usage_model["cache_tokens_details"], list + ): + cache_tokens_details = usage_model.pop("cache_tokens_details") + + for item in cache_tokens_details: + if ( + isinstance(item, dict) + and "modality" in item + and "token_count" in item + ): + usage_model[f"cached_modality_{item['modality']}"] = item[ + "token_count" + ] + return usage_model if usage_model else None