From 0c76a101456815f2522345f4f7871e4aeb8f8b19 Mon Sep 17 00:00:00 2001 From: Gabriel Huang Date: Mon, 23 Jun 2025 18:24:12 -0400 Subject: [PATCH] fix count tokens --- src/agentlab/llm/llm_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/agentlab/llm/llm_utils.py b/src/agentlab/llm/llm_utils.py index 2536200e..41f36892 100644 --- a/src/agentlab/llm/llm_utils.py +++ b/src/agentlab/llm/llm_utils.py @@ -188,8 +188,8 @@ def get_tokenizer(model_name="gpt-4"): logging.info(f"Could not find a tokenizer for model {model_name}. Trying HuggingFace.") try: return AutoTokenizer.from_pretrained(model_name) - except OSError: - logging.info(f"Could not find a tokenizer for model {model_name}. Defaulting to gpt-4.") + except Exception as e: + logging.info(f"Could not find a tokenizer for model {model_name}: {e} Defaulting to gpt-4.") return tiktoken.encoding_for_model("gpt-4")