diff --git a/docs/graph_qa_chain.ipynb b/docs/graph_qa_chain.ipynb index 25c18a01..af222656 100644 --- a/docs/graph_qa_chain.ipynb +++ b/docs/graph_qa_chain.ipynb @@ -499,7 +499,7 @@ "from IPython.core.display import HTML\n", "\n", "# Initialize llm object\n", - "llm = ChatVertexAI(model=\"gemini-1.5-flash-002\", temperature=0)\n", + "llm = ChatVertexAI(model=\"gemini-2.0-flash-001\", temperature=0)\n", "\n", "# Initialize GraphQAChain\n", "chain = SpannerGraphQAChain.from_llm(\n", diff --git a/tests/integration/test_spanner_graph_qa.py b/tests/integration/test_spanner_graph_qa.py index 8bac7b87..a0778ef2 100644 --- a/tests/integration/test_spanner_graph_qa.py +++ b/tests/integration/test_spanner_graph_qa.py @@ -37,7 +37,7 @@ def random_string(num_char=3): def get_llm(): llm = ChatVertexAI( - model="gemini-1.5-flash-002", + model="gemini-2.0-flash-001", temperature=0, ) return llm diff --git a/tests/integration/test_spanner_graph_retriever.py b/tests/integration/test_spanner_graph_retriever.py index b3fa0f62..b44d2d54 100644 --- a/tests/integration/test_spanner_graph_retriever.py +++ b/tests/integration/test_spanner_graph_retriever.py @@ -39,7 +39,7 @@ def random_string(num_char=3): def get_llm(): llm = ChatVertexAI( - model="gemini-1.5-flash-002", + model="gemini-2.0-flash-001", temperature=0, ) return llm