Skip to content

Commit 04b0c83

Browse files
committed
feat(ai): Add support for AI features
jira: GDAI-185 risk: low
1 parent 3d4cd5a commit 04b0c83

File tree

2 files changed

+518
-4
lines changed

2 files changed

+518
-4
lines changed

gooddata-sdk/gooddata_sdk/compute/service.py

Lines changed: 266 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,10 @@
99
from gooddata_api_client.model.chat_history_result import ChatHistoryResult
1010
from gooddata_api_client.model.chat_request import ChatRequest
1111
from gooddata_api_client.model.chat_result import ChatResult
12+
from gooddata_api_client.model.json_api_llm_endpoint_out_document import JsonApiLlmEndpointOutDocument
13+
from gooddata_api_client.model.json_api_llm_endpoint_out_list import JsonApiLlmEndpointOutList
14+
from gooddata_api_client.model.search_request import SearchRequest
15+
from gooddata_api_client.model.search_result import SearchResult
1216

1317
from gooddata_sdk.client import GoodDataApiClient
1418
from gooddata_sdk.compute.model.execution import Execution, ExecutionDefinition, ResultCacheMetadata
@@ -26,6 +30,7 @@ class ComputeService:
2630
def __init__(self, api_client: GoodDataApiClient):
2731
self._api_client = api_client
2832
self._actions_api = self._api_client.actions_api
33+
self._entities_api = self._api_client.entities_api
2934

3035
def for_exec_def(self, workspace_id: str, exec_def: ExecutionDefinition) -> Execution:
3136
"""
@@ -91,17 +96,27 @@ def ai_chat(self, workspace_id: str, question: str) -> ChatResult:
9196
response = self._actions_api.ai_chat(workspace_id, chat_request, _check_return_type=False)
9297
return response
9398

94-
def ai_chat_history(self, workspace_id: str, chat_history_interaction_id: int = 0) -> ChatHistoryResult:
99+
def ai_chat_history(
100+
self, workspace_id: str, chat_history_interaction_id: str | None = None, thread_id_suffix: str | None = None
101+
) -> ChatHistoryResult:
95102
"""
96103
Get chat history with AI in GoodData workspace.
97104
98105
Args:
99106
workspace_id: workspace identifier
100-
chat_history_interaction_id: collect history starting from this interaction id
107+
chat_history_interaction_id: collect history starting from this interaction id. If None, complete chat history is returned.
108+
thread_id_suffix: suffix to identify a specific chat thread. If provided, chat_history_interaction_id is ignored.
101109
Returns:
102-
str: Chat history response
110+
ChatHistoryResult: Chat history response containing interactions and other metadata
103111
"""
104-
chat_history_request = ChatHistoryRequest(chat_history_interaction_id=chat_history_interaction_id)
112+
113+
if chat_history_interaction_id is None:
114+
chat_history_interaction_id = ""
115+
if thread_id_suffix is None:
116+
thread_id_suffix = ""
117+
chat_history_request = ChatHistoryRequest(
118+
chat_history_interaction_id=chat_history_interaction_id, reset=False, thread_id_suffix=thread_id_suffix
119+
)
105120
response = self._actions_api.ai_chat_history(workspace_id, chat_history_request, _check_return_type=False)
106121
return response
107122

@@ -115,6 +130,76 @@ def ai_chat_history_reset(self, workspace_id: str) -> None:
115130
chat_history_request = ChatHistoryRequest(reset=True)
116131
self._actions_api.ai_chat_history(workspace_id, chat_history_request, _check_return_type=False)
117132

133+
def ai_chat_history_feedback(
134+
self,
135+
workspace_id: str,
136+
user_feedback: str,
137+
chat_history_interaction_id: str,
138+
thread_id_suffix: str | None = None,
139+
) -> None:
140+
"""
141+
Provide feedback for a specific chat history interaction.
142+
143+
Args:
144+
workspace_id: workspace identifier
145+
user_feedback: feedback to provide ("POSITIVE", "NEGATIVE" or "NONE")
146+
chat_history_interaction_id: interaction id to provide feedback for
147+
thread_id_suffix: suffix to identify a specific chat thread
148+
"""
149+
if thread_id_suffix is None:
150+
thread_id_suffix = ""
151+
chat_history_request = ChatHistoryRequest(
152+
user_feedback=user_feedback,
153+
chat_history_interaction_id=chat_history_interaction_id,
154+
thread_id_suffix=thread_id_suffix,
155+
reset=False,
156+
)
157+
self._actions_api.ai_chat_history(workspace_id, chat_history_request, _check_return_type=False)
158+
159+
def ai_search(
160+
self,
161+
workspace_id: str,
162+
question: str,
163+
deep_search: bool | None = None,
164+
limit: int | None = None,
165+
object_types: list[str] | None = None,
166+
relevant_score_threshold: float | None = None,
167+
title_to_descriptor_ratio: float | None = None,
168+
) -> SearchResult:
169+
"""
170+
Search for metadata objects using similarity search.
171+
172+
Args:
173+
workspace_id: workspace identifier
174+
question: keyword/sentence input for search
175+
deep_search: turn on deep search - if true, content of complex objects will be searched as well
176+
limit: maximum number of results to return
177+
object_types: list of object types to search for. Enum items: "attribute", "metric", "fact",
178+
"label", "date", "dataset", "visualization" and "dashboard"
179+
relevant_score_threshold: minimum relevance score threshold for results
180+
title_to_descriptor_ratio: ratio of title score to descriptor score
181+
182+
Returns:
183+
SearchResult: Search results
184+
185+
Note:
186+
Default values for optional parameters are documented in the AI Search endpoint of the GoodData API.
187+
"""
188+
search_params = {}
189+
if deep_search is not None:
190+
search_params["deep_search"] = deep_search
191+
if limit is not None:
192+
search_params["limit"] = limit
193+
if object_types is not None:
194+
search_params["object_types"] = object_types
195+
if relevant_score_threshold is not None:
196+
search_params["relevant_score_threshold"] = relevant_score_threshold
197+
if title_to_descriptor_ratio is not None:
198+
search_params["title_to_descriptor_ratio"] = title_to_descriptor_ratio
199+
search_request = SearchRequest(question=question, **search_params)
200+
response = self._actions_api.ai_search(workspace_id, search_request, _check_return_type=False)
201+
return response
202+
118203
def cancel_executions(self, executions: dict[str, dict[str, str]]) -> None:
119204
"""
120205
Try to cancel given executions using the cancel api endpoint.
@@ -132,3 +217,180 @@ def cancel_executions(self, executions: dict[str, dict[str, str]]) -> None:
132217
)
133218
except ApiException as e:
134219
print("Exception when calling ActionsApi->cancel_executions: %s\n", e)
220+
221+
def metadata_sync(self, workspace_id: str, async_req: bool = False) -> None:
222+
"""
223+
Sync metadata to other services.
224+
225+
Args:
226+
workspace_id: workspace identifier
227+
async_req: if True, the metadata sync will be performed asynchronously
228+
"""
229+
self._actions_api.metadata_sync(workspace_id, async_req=async_req, _check_return_type=False)
230+
231+
def get_llm_endpoint(self, id: str) -> JsonApiLlmEndpointOutDocument:
232+
"""
233+
Get LLM endpoint by ID.
234+
235+
Args:
236+
id: LLM endpoint identifier
237+
238+
Returns:
239+
JsonApiLlmEndpointOutDocument: Retrieved LLM endpoint
240+
"""
241+
return self._entities_api.get_entity_llm_endpoints(id, _check_return_type=False)
242+
243+
def list_llm_endpoints(
244+
self,
245+
filter: str | None = None,
246+
page: int | None = None,
247+
size: int | None = None,
248+
sort: list[str] | None = None,
249+
meta_include: list[str] | None = None,
250+
) -> JsonApiLlmEndpointOutList:
251+
"""
252+
List all LLM endpoints.
253+
254+
Args:
255+
filter: Optional filter string
256+
page: Zero-based page index (0..N)
257+
size: The size of the page to be returned
258+
sort: Sorting criteria in the format: property,(asc|desc). Multiple sort criteria are supported.
259+
meta_include: Include Meta objects
260+
261+
Returns:
262+
JsonApiLlmEndpointOutList: List of LLM endpoints
263+
264+
Note:
265+
Default values for optional parameters are documented in the LLM endpoints of the GoodData API.
266+
"""
267+
kwargs = {}
268+
if filter is not None:
269+
kwargs["filter"] = filter
270+
if page is not None:
271+
kwargs["page"] = page
272+
if size is not None:
273+
kwargs["size"] = size
274+
if sort is not None:
275+
kwargs["sort"] = sort
276+
if meta_include is not None:
277+
kwargs["meta_include"] = meta_include
278+
kwargs["_check_return_type"] = False
279+
280+
return self._entities_api.get_all_entities_llm_endpoints(**kwargs)
281+
282+
def create_llm_endpoint(
283+
self,
284+
id: str,
285+
title: str,
286+
token: str,
287+
description: str | None = None,
288+
provider: str | None = None,
289+
base_url: str | None = None,
290+
llm_organization: str | None = None,
291+
llm_model: str | None = None,
292+
workspaces_ids: list[str] | None = None,
293+
) -> JsonApiLlmEndpointOutDocument:
294+
"""
295+
Create a new LLM endpoint.
296+
297+
Args:
298+
id: Identifier of the LLM endpoint
299+
title: User-facing title of the LLM Provider
300+
token: The token to use to connect to the LLM provider
301+
description: Optional user-facing description of the LLM endpoint
302+
provider: Optional LLM provider name (e.g., "openai")
303+
base_url: Optional base URL for custom LLM endpoint
304+
llm_organization: Optional LLM organization identifier
305+
llm_model: Optional LLM default model override
306+
workspaces_ids: Optional list of workspace IDs for which LLM endpoint is valid.
307+
If empty, it is valid for all workspaces.
308+
309+
Returns:
310+
JsonApiLlmEndpointOutDocument: Created LLM endpoint
311+
"""
312+
llm_endpoint_document = {
313+
"data": {
314+
"id": id,
315+
"type": "llmEndpoint",
316+
"attributes": {
317+
"title": title,
318+
"token": token,
319+
},
320+
}
321+
}
322+
323+
if description is not None:
324+
llm_endpoint_document["data"]["attributes"]["description"] = description
325+
if provider is not None:
326+
llm_endpoint_document["data"]["attributes"]["provider"] = provider
327+
if base_url is not None:
328+
llm_endpoint_document["data"]["attributes"]["baseUrl"] = base_url
329+
if llm_organization is not None:
330+
llm_endpoint_document["data"]["attributes"]["llmOrganization"] = llm_organization
331+
if llm_model is not None:
332+
llm_endpoint_document["data"]["attributes"]["llmModel"] = llm_model
333+
if workspaces_ids is not None:
334+
llm_endpoint_document["data"]["attributes"]["workspacesIds"] = workspaces_ids
335+
336+
return self._entities_api.create_entity_llm_endpoints(llm_endpoint_document, _check_return_type=False)
337+
338+
def update_llm_endpoint(
339+
self,
340+
id: str,
341+
title: str | None = None,
342+
token: str | None = None,
343+
description: str | None = None,
344+
provider: str | None = None,
345+
base_url: str | None = None,
346+
llm_organization: str | None = None,
347+
llm_model: str | None = None,
348+
workspaces_ids: list[str] | None = None,
349+
) -> JsonApiLlmEndpointOutDocument:
350+
"""
351+
Update an existing LLM endpoint.
352+
353+
Args:
354+
id: Identifier of the LLM endpoint
355+
title: User-facing title of the LLM Provider
356+
token: The token to use to connect to the LLM provider
357+
description: User-facing description of the LLM endpoint
358+
provider: LLM provider name (e.g., "openai")
359+
base_url: Base URL for custom LLM endpoint
360+
llm_organization: LLM organization identifier
361+
llm_model: LLM default model override
362+
workspaces_ids: List of workspace IDs for which LLM endpoint is valid.
363+
If empty, it is valid for all workspaces.
364+
365+
Returns:
366+
JsonApiLlmEndpointOutDocument: Updated LLM endpoint
367+
"""
368+
llm_endpoint_document = {"data": {"id": id, "type": "llmEndpoint", "attributes": {}}}
369+
370+
if title is not None:
371+
llm_endpoint_document["data"]["attributes"]["title"] = title
372+
if token is not None:
373+
llm_endpoint_document["data"]["attributes"]["token"] = token
374+
if description is not None:
375+
llm_endpoint_document["data"]["attributes"]["description"] = description
376+
if provider is not None:
377+
llm_endpoint_document["data"]["attributes"]["provider"] = provider
378+
if base_url is not None:
379+
llm_endpoint_document["data"]["attributes"]["baseUrl"] = base_url
380+
if llm_organization is not None:
381+
llm_endpoint_document["data"]["attributes"]["llmOrganization"] = llm_organization
382+
if llm_model is not None:
383+
llm_endpoint_document["data"]["attributes"]["llmModel"] = llm_model
384+
if workspaces_ids is not None:
385+
llm_endpoint_document["data"]["attributes"]["workspacesIds"] = workspaces_ids
386+
387+
return self._entities_api.update_entity_llm_endpoints(id, llm_endpoint_document, _check_return_type=False)
388+
389+
def delete_llm_endpoint(self, id: str) -> None:
390+
"""
391+
Delete an LLM endpoint.
392+
393+
Args:
394+
id: LLM endpoint identifier
395+
"""
396+
self._entities_api.delete_entity_llm_endpoints(id, _check_return_type=False)

0 commit comments

Comments
 (0)