We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent d97e39e commit 9a33085Copy full SHA for 9a33085
langfuse/callback/langchain.py
@@ -863,7 +863,7 @@ def on_llm_end(
863
extracted_response = (
864
self._convert_message_to_dict(generation.message)
865
if isinstance(generation, ChatGeneration)
866
- else _extract_raw_esponse(generation)
+ else _extract_raw_response(generation)
867
)
868
869
llm_usage = _parse_usage(response)
@@ -1029,7 +1029,7 @@ def _log_debug_event(
1029
1030
1031
1032
-def _extract_raw_esponse(last_response):
+def _extract_raw_response(last_response):
1033
"""Extract the response from the last response of the LLM call."""
1034
# We return the text of the response if not empty
1035
if last_response.text is not None and last_response.text.strip() != "":
0 commit comments