22from __future__ import annotations
33
44import logging
5+ from typing import Any
56
67from gooddata_api_client import ApiException
78from gooddata_api_client .model .afm_cancel_tokens import AfmCancelTokens
89from gooddata_api_client .model .chat_history_request import ChatHistoryRequest
910from gooddata_api_client .model .chat_history_result import ChatHistoryResult
1011from gooddata_api_client .model .chat_request import ChatRequest
1112from gooddata_api_client .model .chat_result import ChatResult
13+ from gooddata_api_client .model .json_api_llm_endpoint_out_document import JsonApiLlmEndpointOutDocument
14+ from gooddata_api_client .model .json_api_llm_endpoint_out_list import JsonApiLlmEndpointOutList
15+ from gooddata_api_client .model .search_request import SearchRequest
16+ from gooddata_api_client .model .search_result import SearchResult
1217
1318from gooddata_sdk .client import GoodDataApiClient
1419from gooddata_sdk .compute .model .execution import Execution , ExecutionDefinition , ResultCacheMetadata
@@ -26,6 +31,7 @@ class ComputeService:
2631 def __init__ (self , api_client : GoodDataApiClient ):
2732 self ._api_client = api_client
2833 self ._actions_api = self ._api_client .actions_api
34+ self ._entities_api = self ._api_client .entities_api
2935
3036 def for_exec_def (self , workspace_id : str , exec_def : ExecutionDefinition ) -> Execution :
3137 """
@@ -91,17 +97,27 @@ def ai_chat(self, workspace_id: str, question: str) -> ChatResult:
9197 response = self ._actions_api .ai_chat (workspace_id , chat_request , _check_return_type = False )
9298 return response
9399
94- def ai_chat_history (self , workspace_id : str , chat_history_interaction_id : int = 0 ) -> ChatHistoryResult :
100+ def ai_chat_history (
101+ self , workspace_id : str , chat_history_interaction_id : str | None = None , thread_id_suffix : str | None = None
102+ ) -> ChatHistoryResult :
95103 """
96104 Get chat history with AI in GoodData workspace.
97105
98106 Args:
99107 workspace_id: workspace identifier
100- chat_history_interaction_id: collect history starting from this interaction id
108+ chat_history_interaction_id: collect history starting from this interaction id. If None, complete chat history is returned.
109+ thread_id_suffix: suffix to identify a specific chat thread. If provided, chat_history_interaction_id is ignored.
101110 Returns:
102- str : Chat history response
111+ ChatHistoryResult : Chat history response containing interactions and other metadata
103112 """
104- chat_history_request = ChatHistoryRequest (chat_history_interaction_id = chat_history_interaction_id )
113+
114+ if chat_history_interaction_id is None :
115+ chat_history_interaction_id = ""
116+ if thread_id_suffix is None :
117+ thread_id_suffix = ""
118+ chat_history_request = ChatHistoryRequest (
119+ chat_history_interaction_id = chat_history_interaction_id , reset = False , thread_id_suffix = thread_id_suffix
120+ )
105121 response = self ._actions_api .ai_chat_history (workspace_id , chat_history_request , _check_return_type = False )
106122 return response
107123
@@ -115,6 +131,76 @@ def ai_chat_history_reset(self, workspace_id: str) -> None:
115131 chat_history_request = ChatHistoryRequest (reset = True )
116132 self ._actions_api .ai_chat_history (workspace_id , chat_history_request , _check_return_type = False )
117133
134+ def ai_chat_history_feedback (
135+ self ,
136+ workspace_id : str ,
137+ user_feedback : str ,
138+ chat_history_interaction_id : str ,
139+ thread_id_suffix : str | None = None ,
140+ ) -> None :
141+ """
142+ Provide feedback for a specific chat history interaction.
143+
144+ Args:
145+ workspace_id: workspace identifier
146+ user_feedback: feedback to provide ("POSITIVE", "NEGATIVE" or "NONE")
147+ chat_history_interaction_id: interaction id to provide feedback for
148+ thread_id_suffix: suffix to identify a specific chat thread
149+ """
150+ if thread_id_suffix is None :
151+ thread_id_suffix = ""
152+ chat_history_request = ChatHistoryRequest (
153+ user_feedback = user_feedback ,
154+ chat_history_interaction_id = chat_history_interaction_id ,
155+ thread_id_suffix = thread_id_suffix ,
156+ reset = False ,
157+ )
158+ self ._actions_api .ai_chat_history (workspace_id , chat_history_request , _check_return_type = False )
159+
160+ def ai_search (
161+ self ,
162+ workspace_id : str ,
163+ question : str ,
164+ deep_search : bool | None = None ,
165+ limit : int | None = None ,
166+ object_types : list [str ] | None = None ,
167+ relevant_score_threshold : float | None = None ,
168+ title_to_descriptor_ratio : float | None = None ,
169+ ) -> SearchResult :
170+ """
171+ Search for metadata objects using similarity search.
172+
173+ Args:
174+ workspace_id: workspace identifier
175+ question: keyword/sentence input for search
176+ deep_search: turn on deep search - if true, content of complex objects will be searched as well
177+ limit: maximum number of results to return
178+ object_types: list of object types to search for. Enum items: "attribute", "metric", "fact",
179+ "label", "date", "dataset", "visualization" and "dashboard"
180+ relevant_score_threshold: minimum relevance score threshold for results
181+ title_to_descriptor_ratio: ratio of title score to descriptor score
182+
183+ Returns:
184+ SearchResult: Search results
185+
186+ Note:
187+ Default values for optional parameters are documented in the AI Search endpoint of the GoodData API.
188+ """
189+ search_params : dict [str , Any ] = {}
190+ if deep_search is not None :
191+ search_params ["deep_search" ] = deep_search
192+ if limit is not None :
193+ search_params ["limit" ] = limit
194+ if object_types is not None :
195+ search_params ["object_types" ] = object_types
196+ if relevant_score_threshold is not None :
197+ search_params ["relevant_score_threshold" ] = relevant_score_threshold
198+ if title_to_descriptor_ratio is not None :
199+ search_params ["title_to_descriptor_ratio" ] = title_to_descriptor_ratio
200+ search_request = SearchRequest (question = question , ** search_params )
201+ response = self ._actions_api .ai_search (workspace_id , search_request , _check_return_type = False )
202+ return response
203+
118204 def cancel_executions (self , executions : dict [str , dict [str , str ]]) -> None :
119205 """
120206 Try to cancel given executions using the cancel api endpoint.
@@ -132,3 +218,187 @@ def cancel_executions(self, executions: dict[str, dict[str, str]]) -> None:
132218 )
133219 except ApiException as e :
134220 print ("Exception when calling ActionsApi->cancel_executions: %s\n " , e )
221+
222+ def metadata_sync (self , workspace_id : str , async_req : bool = False ) -> None :
223+ """
224+ Sync metadata to other services.
225+
226+ Args:
227+ workspace_id: workspace identifier
228+ async_req: if True, the metadata sync will be performed asynchronously
229+ """
230+ self ._actions_api .metadata_sync (workspace_id , async_req = async_req , _check_return_type = False )
231+
232+ def get_llm_endpoint (self , id : str ) -> JsonApiLlmEndpointOutDocument :
233+ """
234+ Get LLM endpoint by ID.
235+
236+ Args:
237+ id: LLM endpoint identifier
238+
239+ Returns:
240+ JsonApiLlmEndpointOutDocument: Retrieved LLM endpoint
241+ """
242+ return self ._entities_api .get_entity_llm_endpoints (id , _check_return_type = False )
243+
244+ def list_llm_endpoints (
245+ self ,
246+ filter : str | None = None ,
247+ page : int | None = None ,
248+ size : int | None = None ,
249+ sort : list [str ] | None = None ,
250+ meta_include : list [str ] | None = None ,
251+ ) -> JsonApiLlmEndpointOutList :
252+ """
253+ List all LLM endpoints.
254+
255+ Args:
256+ filter: Optional filter string
257+ page: Zero-based page index (0..N)
258+ size: The size of the page to be returned
259+ sort: Sorting criteria in the format: property,(asc|desc). Multiple sort criteria are supported.
260+ meta_include: Include Meta objects
261+
262+ Returns:
263+ JsonApiLlmEndpointOutList: List of LLM endpoints
264+
265+ Note:
266+ Default values for optional parameters are documented in the LLM endpoints of the GoodData API.
267+ """
268+ kwargs : dict [str , Any ] = {}
269+ if filter is not None :
270+ kwargs ["filter" ] = filter
271+ if page is not None :
272+ kwargs ["page" ] = page
273+ if size is not None :
274+ kwargs ["size" ] = size
275+ if sort is not None :
276+ kwargs ["sort" ] = sort
277+ if meta_include is not None :
278+ kwargs ["meta_include" ] = meta_include
279+ kwargs ["_check_return_type" ] = False
280+
281+ return self ._entities_api .get_all_entities_llm_endpoints (** kwargs )
282+
283+ def create_llm_endpoint (
284+ self ,
285+ id : str ,
286+ title : str ,
287+ token : str ,
288+ description : str | None = None ,
289+ provider : str | None = None ,
290+ base_url : str | None = None ,
291+ llm_organization : str | None = None ,
292+ llm_model : str | None = None ,
293+ workspaces_ids : list [str ] | None = None ,
294+ ) -> JsonApiLlmEndpointOutDocument :
295+ """
296+ Create a new LLM endpoint.
297+
298+ Args:
299+ id: Identifier of the LLM endpoint
300+ title: User-facing title of the LLM Provider
301+ token: The token to use to connect to the LLM provider
302+ description: Optional user-facing description of the LLM endpoint
303+ provider: Optional LLM provider name (e.g., "openai")
304+ base_url: Optional base URL for custom LLM endpoint
305+ llm_organization: Optional LLM organization identifier
306+ llm_model: Optional LLM default model override
307+ workspaces_ids: Optional list of workspace IDs for which LLM endpoint is valid.
308+ If empty, it is valid for all workspaces.
309+
310+ Returns:
311+ JsonApiLlmEndpointOutDocument: Created LLM endpoint
312+ """
313+
314+ llm_endpoint_document : dict [str , Any ] = {
315+ "data" : {
316+ "id" : id ,
317+ "type" : "llmEndpoint" ,
318+ "attributes" : {
319+ "title" : title ,
320+ "token" : token ,
321+ },
322+ }
323+ }
324+
325+ if description is not None :
326+ llm_endpoint_document ["data" ]["attributes" ]["description" ] = description
327+ if provider is not None :
328+ llm_endpoint_document ["data" ]["attributes" ]["provider" ] = provider
329+ if base_url is not None :
330+ llm_endpoint_document ["data" ]["attributes" ]["baseUrl" ] = base_url
331+ if llm_organization is not None :
332+ llm_endpoint_document ["data" ]["attributes" ]["llmOrganization" ] = llm_organization
333+ if llm_model is not None :
334+ llm_endpoint_document ["data" ]["attributes" ]["llmModel" ] = llm_model
335+ if workspaces_ids is not None :
336+ llm_endpoint_document ["data" ]["attributes" ]["workspacesIds" ] = workspaces_ids
337+
338+ return self ._entities_api .create_entity_llm_endpoints (llm_endpoint_document , _check_return_type = False )
339+
340+ def update_llm_endpoint (
341+ self ,
342+ id : str ,
343+ title : str | None = None ,
344+ token : str | None = None ,
345+ description : str | None = None ,
346+ provider : str | None = None ,
347+ base_url : str | None = None ,
348+ llm_organization : str | None = None ,
349+ llm_model : str | None = None ,
350+ workspaces_ids : list [str ] | None = None ,
351+ ) -> JsonApiLlmEndpointOutDocument :
352+ """
353+ Update an existing LLM endpoint.
354+
355+ Args:
356+ id: Identifier of the LLM endpoint
357+ title: User-facing title of the LLM Provider
358+ token: The token to use to connect to the LLM provider
359+ description: User-facing description of the LLM endpoint
360+ provider: LLM provider name (e.g., "openai")
361+ base_url: Base URL for custom LLM endpoint
362+ llm_organization: LLM organization identifier
363+ llm_model: LLM default model override
364+ workspaces_ids: List of workspace IDs for which LLM endpoint is valid.
365+ If empty, it is valid for all workspaces.
366+
367+ Returns:
368+ JsonApiLlmEndpointOutDocument: Updated LLM endpoint
369+ """
370+ llm_endpoint_document : dict [str , Any ] = {
371+ "data" : {
372+ "id" : id ,
373+ "type" : "llmEndpoint" ,
374+ "attributes" : {},
375+ }
376+ }
377+
378+ if title is not None :
379+ llm_endpoint_document ["data" ]["attributes" ]["title" ] = title
380+ if token is not None :
381+ llm_endpoint_document ["data" ]["attributes" ]["token" ] = token
382+ if description is not None :
383+ llm_endpoint_document ["data" ]["attributes" ]["description" ] = description
384+ if provider is not None :
385+ llm_endpoint_document ["data" ]["attributes" ]["provider" ] = provider
386+ if base_url is not None :
387+ llm_endpoint_document ["data" ]["attributes" ]["baseUrl" ] = base_url
388+ if llm_organization is not None :
389+ llm_endpoint_document ["data" ]["attributes" ]["llmOrganization" ] = llm_organization
390+ if llm_model is not None :
391+ llm_endpoint_document ["data" ]["attributes" ]["llmModel" ] = llm_model
392+ if workspaces_ids is not None :
393+ llm_endpoint_document ["data" ]["attributes" ]["workspacesIds" ] = workspaces_ids
394+
395+ return self ._entities_api .update_entity_llm_endpoints (id , llm_endpoint_document , _check_return_type = False )
396+
397+ def delete_llm_endpoint (self , id : str ) -> None :
398+ """
399+ Delete an LLM endpoint.
400+
401+ Args:
402+ id: LLM endpoint identifier
403+ """
404+ self ._entities_api .delete_entity_llm_endpoints (id , _check_return_type = False )
0 commit comments