Skip to content

Commit 1aa305e

Browse files
committed
feat(ai): Add support for AI features
jira: GDAI-185 risk: low
1 parent 3d4cd5a commit 1aa305e

File tree

2 files changed

+209
-4
lines changed

2 files changed

+209
-4
lines changed

gooddata-sdk/gooddata_sdk/compute/service.py

Lines changed: 92 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@
99
from gooddata_api_client.model.chat_history_result import ChatHistoryResult
1010
from gooddata_api_client.model.chat_request import ChatRequest
1111
from gooddata_api_client.model.chat_result import ChatResult
12+
from gooddata_api_client.model.search_request import SearchRequest
13+
from gooddata_api_client.model.search_result import SearchResult
1214

1315
from gooddata_sdk.client import GoodDataApiClient
1416
from gooddata_sdk.compute.model.execution import Execution, ExecutionDefinition, ResultCacheMetadata
@@ -91,17 +93,27 @@ def ai_chat(self, workspace_id: str, question: str) -> ChatResult:
9193
response = self._actions_api.ai_chat(workspace_id, chat_request, _check_return_type=False)
9294
return response
9395

94-
def ai_chat_history(self, workspace_id: str, chat_history_interaction_id: int = 0) -> ChatHistoryResult:
96+
def ai_chat_history(
97+
self, workspace_id: str, chat_history_interaction_id: str = None, thread_id_suffix: str = None
98+
) -> ChatHistoryResult:
9599
"""
96100
Get chat history with AI in GoodData workspace.
97101
98102
Args:
99103
workspace_id: workspace identifier
100-
chat_history_interaction_id: collect history starting from this interaction id
104+
chat_history_interaction_id: collect history starting from this interaction id. If None, complete chat history is returned.
105+
thread_id_suffix: suffix to identify a specific chat thread. If provided, chat_history_interaction_id is ignored.
101106
Returns:
102-
str: Chat history response
107+
ChatHistoryResult: Chat history response containing interactions and other metadata
103108
"""
104-
chat_history_request = ChatHistoryRequest(chat_history_interaction_id=chat_history_interaction_id)
109+
110+
if chat_history_interaction_id is None:
111+
chat_history_interaction_id = ""
112+
if thread_id_suffix is None:
113+
thread_id_suffix = ""
114+
chat_history_request = ChatHistoryRequest(
115+
chat_history_interaction_id=chat_history_interaction_id, reset=False, thread_id_suffix=thread_id_suffix
116+
)
105117
response = self._actions_api.ai_chat_history(workspace_id, chat_history_request, _check_return_type=False)
106118
return response
107119

@@ -115,6 +127,72 @@ def ai_chat_history_reset(self, workspace_id: str) -> None:
115127
chat_history_request = ChatHistoryRequest(reset=True)
116128
self._actions_api.ai_chat_history(workspace_id, chat_history_request, _check_return_type=False)
117129

130+
def ai_chat_history_feedback(
131+
self, workspace_id: str, user_feedback: str, chat_history_interaction_id: str, thread_id_suffix: str = None
132+
) -> None:
133+
"""
134+
Provide feedback for a specific chat history interaction.
135+
136+
Args:
137+
workspace_id: workspace identifier
138+
user_feedback: feedback to provide ("POSITIVE", "NEGATIVE" or "NONE")
139+
chat_history_interaction_id: interaction id to provide feedback for
140+
thread_id_suffix: suffix to identify a specific chat thread
141+
"""
142+
if thread_id_suffix is None:
143+
thread_id_suffix = ""
144+
chat_history_request = ChatHistoryRequest(
145+
user_feedback=user_feedback,
146+
chat_history_interaction_id=chat_history_interaction_id,
147+
thread_id_suffix=thread_id_suffix,
148+
reset=False,
149+
)
150+
self._actions_api.ai_chat_history(workspace_id, chat_history_request, _check_return_type=False)
151+
152+
def ai_search(
153+
self,
154+
workspace_id: str,
155+
question: str,
156+
deep_search: bool = None,
157+
limit: int = None,
158+
object_types: list[str] = None,
159+
relevant_score_threshold: float = None,
160+
title_to_descriptor_ratio: float = None,
161+
) -> SearchResult:
162+
"""
163+
Search for metadata objects using similarity search.
164+
165+
Args:
166+
workspace_id: workspace identifier
167+
question: keyword/sentence input for search
168+
deep_search: turn on deep search - if true, content of complex objects will be searched as well
169+
limit: maximum number of results to return
170+
object_types: list of object types to search for. Enum items: "attribute", "metric", "fact",
171+
"label", "date", "dataset", "visualization" and "dashboard"
172+
relevant_score_threshold: minimum relevance score threshold for results
173+
title_to_descriptor_ratio: ratio of title score to descriptor score
174+
175+
Returns:
176+
SearchResult: Search results
177+
178+
Note:
179+
Default values for optional parameters are documented in the AI Search endpoint of the GoodData API.
180+
"""
181+
search_params = {}
182+
if deep_search is not None:
183+
search_params["deep_search"] = deep_search
184+
if limit is not None:
185+
search_params["limit"] = limit
186+
if object_types is not None:
187+
search_params["object_types"] = object_types
188+
if relevant_score_threshold is not None:
189+
search_params["relevant_score_threshold"] = relevant_score_threshold
190+
if title_to_descriptor_ratio is not None:
191+
search_params["title_to_descriptor_ratio"] = title_to_descriptor_ratio
192+
search_request = SearchRequest(question=question, **search_params)
193+
response = self._actions_api.ai_search(workspace_id, search_request, _check_return_type=False)
194+
return response
195+
118196
def cancel_executions(self, executions: dict[str, dict[str, str]]) -> None:
119197
"""
120198
Try to cancel given executions using the cancel api endpoint.
@@ -132,3 +210,13 @@ def cancel_executions(self, executions: dict[str, dict[str, str]]) -> None:
132210
)
133211
except ApiException as e:
134212
print("Exception when calling ActionsApi->cancel_executions: %s\n", e)
213+
214+
def metadata_sync(self, workspace_id: str, async_req: bool = False) -> None:
215+
"""
216+
Sync metadata to other services.
217+
218+
Args:
219+
workspace_id: workspace identifier
220+
async_req: if True, the metadata sync will be performed asynchronously
221+
"""
222+
self._actions_api.metadata_sync(workspace_id, async_req=async_req, _check_return_type=False)
Lines changed: 117 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,117 @@
1+
# (C) 2025 GoodData Corporation
2+
from unittest.mock import MagicMock
3+
4+
from gooddata_api_client.model.chat_history_request import ChatHistoryRequest
5+
from gooddata_api_client.model.search_request import SearchRequest
6+
from gooddata_sdk.compute.service import ComputeService
7+
8+
9+
def test_ai_search_minimal_params():
10+
api_client = MagicMock()
11+
actions_api = MagicMock()
12+
api_client.actions_api = actions_api
13+
compute_service = ComputeService(api_client)
14+
15+
compute_service.ai_search("workspace1", "revenue")
16+
actions_api.ai_search.assert_called_with("workspace1", SearchRequest(question="revenue"), _check_return_type=False)
17+
18+
19+
def test_ai_search_all_optional_params():
20+
api_client = MagicMock()
21+
actions_api = MagicMock()
22+
api_client.actions_api = actions_api
23+
compute_service = ComputeService(api_client)
24+
25+
compute_service.ai_search(
26+
"workspace1",
27+
"revenue",
28+
deep_search=True,
29+
limit=5,
30+
object_types=["metric", "attribute"],
31+
relevant_score_threshold=0.3,
32+
title_to_descriptor_ratio=0.7,
33+
)
34+
actions_api.ai_search.assert_called_with(
35+
"workspace1",
36+
SearchRequest(
37+
question="revenue",
38+
deep_search=True,
39+
limit=5,
40+
object_types=["metric", "attribute"],
41+
relevant_score_threshold=0.3,
42+
title_to_descriptor_ratio=0.7,
43+
),
44+
_check_return_type=False,
45+
)
46+
47+
48+
def test_ai_chat_history_complete():
49+
api_client = MagicMock()
50+
actions_api = MagicMock()
51+
api_client.actions_api = actions_api
52+
compute_service = ComputeService(api_client)
53+
54+
compute_service.ai_chat_history("workspace1")
55+
actions_api.ai_chat_history.assert_called_with(
56+
"workspace1",
57+
ChatHistoryRequest(chat_history_interaction_id="", reset=False, thread_id_suffix=""),
58+
_check_return_type=False,
59+
)
60+
61+
62+
def test_ai_chat_history_with_interaction_id():
63+
api_client = MagicMock()
64+
actions_api = MagicMock()
65+
api_client.actions_api = actions_api
66+
compute_service = ComputeService(api_client)
67+
68+
compute_service.ai_chat_history("workspace1", "interaction123", "thread123")
69+
actions_api.ai_chat_history.assert_called_with(
70+
"workspace1",
71+
ChatHistoryRequest(chat_history_interaction_id="interaction123", reset=False, thread_id_suffix="thread123"),
72+
_check_return_type=False,
73+
)
74+
75+
76+
def test_ai_chat_history_reset():
77+
api_client = MagicMock()
78+
actions_api = MagicMock()
79+
api_client.actions_api = actions_api
80+
compute_service = ComputeService(api_client)
81+
82+
compute_service.ai_chat_history_reset("workspace1")
83+
actions_api.ai_chat_history.assert_called_with(
84+
"workspace1", ChatHistoryRequest(reset=True), _check_return_type=False
85+
)
86+
87+
88+
def test_ai_chat_history_feedback():
89+
api_client = MagicMock()
90+
actions_api = MagicMock()
91+
api_client.actions_api = actions_api
92+
compute_service = ComputeService(api_client)
93+
94+
compute_service.ai_chat_history_feedback("workspace1", "POSITIVE", "interaction123", "thread123")
95+
actions_api.ai_chat_history.assert_called_with(
96+
"workspace1",
97+
ChatHistoryRequest(
98+
user_feedback="POSITIVE",
99+
chat_history_interaction_id="interaction123",
100+
thread_id_suffix="thread123",
101+
reset=False,
102+
),
103+
_check_return_type=False,
104+
)
105+
106+
107+
def test_metadata_sync():
108+
api_client = MagicMock()
109+
actions_api = MagicMock()
110+
api_client.actions_api = actions_api
111+
compute_service = ComputeService(api_client)
112+
113+
compute_service.metadata_sync("workspace1")
114+
actions_api.metadata_sync.assert_called_with("workspace1", async_req=False, _check_return_type=False)
115+
116+
compute_service.metadata_sync("workspace1", async_req=True)
117+
actions_api.metadata_sync.assert_called_with("workspace1", async_req=True, _check_return_type=False)

0 commit comments

Comments
 (0)