99from gooddata_api_client .model .chat_history_result import ChatHistoryResult
1010from gooddata_api_client .model .chat_request import ChatRequest
1111from gooddata_api_client .model .chat_result import ChatResult
12+ from gooddata_api_client .model .json_api_llm_endpoint_out_document import JsonApiLlmEndpointOutDocument
13+ from gooddata_api_client .model .json_api_llm_endpoint_out_list import JsonApiLlmEndpointOutList
14+ from gooddata_api_client .model .search_request import SearchRequest
15+ from gooddata_api_client .model .search_result import SearchResult
1216
1317from gooddata_sdk .client import GoodDataApiClient
1418from gooddata_sdk .compute .model .execution import Execution , ExecutionDefinition , ResultCacheMetadata
@@ -26,6 +30,7 @@ class ComputeService:
2630 def __init__ (self , api_client : GoodDataApiClient ):
2731 self ._api_client = api_client
2832 self ._actions_api = self ._api_client .actions_api
33+ self ._entities_api = self ._api_client .entities_api
2934
3035 def for_exec_def (self , workspace_id : str , exec_def : ExecutionDefinition ) -> Execution :
3136 """
@@ -91,17 +96,27 @@ def ai_chat(self, workspace_id: str, question: str) -> ChatResult:
9196 response = self ._actions_api .ai_chat (workspace_id , chat_request , _check_return_type = False )
9297 return response
9398
94- def ai_chat_history (self , workspace_id : str , chat_history_interaction_id : int = 0 ) -> ChatHistoryResult :
99+ def ai_chat_history (
100+ self , workspace_id : str , chat_history_interaction_id : str = None , thread_id_suffix : str = None
101+ ) -> ChatHistoryResult :
95102 """
96103 Get chat history with AI in GoodData workspace.
97104
98105 Args:
99106 workspace_id: workspace identifier
100- chat_history_interaction_id: collect history starting from this interaction id
107+ chat_history_interaction_id: collect history starting from this interaction id. If None, complete chat history is returned.
108+ thread_id_suffix: suffix to identify a specific chat thread. If provided, chat_history_interaction_id is ignored.
101109 Returns:
102- str : Chat history response
110+ ChatHistoryResult : Chat history response containing interactions and other metadata
103111 """
104- chat_history_request = ChatHistoryRequest (chat_history_interaction_id = chat_history_interaction_id )
112+
113+ if chat_history_interaction_id is None :
114+ chat_history_interaction_id = ""
115+ if thread_id_suffix is None :
116+ thread_id_suffix = ""
117+ chat_history_request = ChatHistoryRequest (
118+ chat_history_interaction_id = chat_history_interaction_id , reset = False , thread_id_suffix = thread_id_suffix
119+ )
105120 response = self ._actions_api .ai_chat_history (workspace_id , chat_history_request , _check_return_type = False )
106121 return response
107122
@@ -115,6 +130,72 @@ def ai_chat_history_reset(self, workspace_id: str) -> None:
115130 chat_history_request = ChatHistoryRequest (reset = True )
116131 self ._actions_api .ai_chat_history (workspace_id , chat_history_request , _check_return_type = False )
117132
133+ def ai_chat_history_feedback (
134+ self , workspace_id : str , user_feedback : str , chat_history_interaction_id : str , thread_id_suffix : str = None
135+ ) -> None :
136+ """
137+ Provide feedback for a specific chat history interaction.
138+
139+ Args:
140+ workspace_id: workspace identifier
141+ user_feedback: feedback to provide ("POSITIVE", "NEGATIVE" or "NONE")
142+ chat_history_interaction_id: interaction id to provide feedback for
143+ thread_id_suffix: suffix to identify a specific chat thread
144+ """
145+ if thread_id_suffix is None :
146+ thread_id_suffix = ""
147+ chat_history_request = ChatHistoryRequest (
148+ user_feedback = user_feedback ,
149+ chat_history_interaction_id = chat_history_interaction_id ,
150+ thread_id_suffix = thread_id_suffix ,
151+ reset = False ,
152+ )
153+ self ._actions_api .ai_chat_history (workspace_id , chat_history_request , _check_return_type = False )
154+
155+ def ai_search (
156+ self ,
157+ workspace_id : str ,
158+ question : str ,
159+ deep_search : bool = None ,
160+ limit : int = None ,
161+ object_types : list [str ] = None ,
162+ relevant_score_threshold : float = None ,
163+ title_to_descriptor_ratio : float = None ,
164+ ) -> SearchResult :
165+ """
166+ Search for metadata objects using similarity search.
167+
168+ Args:
169+ workspace_id: workspace identifier
170+ question: keyword/sentence input for search
171+ deep_search: turn on deep search - if true, content of complex objects will be searched as well
172+ limit: maximum number of results to return
173+ object_types: list of object types to search for. Enum items: "attribute", "metric", "fact",
174+ "label", "date", "dataset", "visualization" and "dashboard"
175+ relevant_score_threshold: minimum relevance score threshold for results
176+ title_to_descriptor_ratio: ratio of title score to descriptor score
177+
178+ Returns:
179+ SearchResult: Search results
180+
181+ Note:
182+ Default values for optional parameters are documented in the AI Search endpoint of the GoodData API.
183+ """
184+ search_params = {}
185+ if deep_search is not None :
186+ search_params ["deep_search" ] = deep_search
187+ if limit is not None :
188+ search_params ["limit" ] = limit
189+ if object_types is not None :
190+ search_params ["object_types" ] = object_types
191+ if relevant_score_threshold is not None :
192+ search_params ["relevant_score_threshold" ] = relevant_score_threshold
193+ if title_to_descriptor_ratio is not None :
194+ search_params ["title_to_descriptor_ratio" ] = title_to_descriptor_ratio
195+ search_request = SearchRequest (question = question , ** search_params )
196+ response = self ._actions_api .ai_search (workspace_id , search_request , _check_return_type = False )
197+ return response
198+
118199 def cancel_executions (self , executions : dict [str , dict [str , str ]]) -> None :
119200 """
120201 Try to cancel given executions using the cancel api endpoint.
@@ -132,3 +213,180 @@ def cancel_executions(self, executions: dict[str, dict[str, str]]) -> None:
132213 )
133214 except ApiException as e :
134215 print ("Exception when calling ActionsApi->cancel_executions: %s\n " , e )
216+
217+ def metadata_sync (self , workspace_id : str , async_req : bool = False ) -> None :
218+ """
219+ Sync metadata to other services.
220+
221+ Args:
222+ workspace_id: workspace identifier
223+ async_req: if True, the metadata sync will be performed asynchronously
224+ """
225+ self ._actions_api .metadata_sync (workspace_id , async_req = async_req , _check_return_type = False )
226+
227+ def get_llm_endpoint (self , id : str ) -> JsonApiLlmEndpointOutDocument :
228+ """
229+ Get LLM endpoint by ID.
230+
231+ Args:
232+ id: LLM endpoint identifier
233+
234+ Returns:
235+ JsonApiLlmEndpointOutDocument: Retrieved LLM endpoint
236+ """
237+ return self ._entities_api .get_entity_llm_endpoints (id , _check_return_type = False )
238+
239+ def list_llm_endpoints (
240+ self ,
241+ filter : str = None ,
242+ page : int = None ,
243+ size : int = None ,
244+ sort : list [str ] = None ,
245+ meta_include : list [str ] = None ,
246+ ) -> JsonApiLlmEndpointOutList :
247+ """
248+ List all LLM endpoints.
249+
250+ Args:
251+ filter: Optional filter string
252+ page: Zero-based page index (0..N)
253+ size: The size of the page to be returned
254+ sort: Sorting criteria in the format: property,(asc|desc). Multiple sort criteria are supported.
255+ meta_include: Include Meta objects
256+
257+ Returns:
258+ JsonApiLlmEndpointOutList: List of LLM endpoints
259+
260+ Note:
261+ Default values for optional parameters are documented in the LLM endpoints of the GoodData API.
262+ """
263+ kwargs = {}
264+ if filter is not None :
265+ kwargs ["filter" ] = filter
266+ if page is not None :
267+ kwargs ["page" ] = page
268+ if size is not None :
269+ kwargs ["size" ] = size
270+ if sort is not None :
271+ kwargs ["sort" ] = sort
272+ if meta_include is not None :
273+ kwargs ["meta_include" ] = meta_include
274+ kwargs ["_check_return_type" ] = False
275+
276+ return self ._entities_api .get_all_entities_llm_endpoints (** kwargs )
277+
278+ def create_llm_endpoint (
279+ self ,
280+ id : str ,
281+ title : str ,
282+ token : str ,
283+ description : str = None ,
284+ provider : str = None ,
285+ base_url : str = None ,
286+ llm_organization : str = None ,
287+ llm_model : str = None ,
288+ workspaces_ids : list [str ] = None ,
289+ ) -> JsonApiLlmEndpointOutDocument :
290+ """
291+ Create a new LLM endpoint.
292+
293+ Args:
294+ id: Identifier of the LLM endpoint
295+ title: User-facing title of the LLM Provider
296+ token: The token to use to connect to the LLM provider
297+ description: Optional user-facing description of the LLM endpoint
298+ provider: Optional LLM provider name (e.g., "openai")
299+ base_url: Optional base URL for custom LLM endpoint
300+ llm_organization: Optional LLM organization identifier
301+ llm_model: Optional LLM default model override
302+ workspaces_ids: Optional list of workspace IDs for which LLM endpoint is valid.
303+ If empty, it is valid for all workspaces.
304+
305+ Returns:
306+ JsonApiLlmEndpointOutDocument: Created LLM endpoint
307+ """
308+ llm_endpoint_document = {
309+ "data" : {
310+ "id" : id ,
311+ "type" : "llmEndpoint" ,
312+ "attributes" : {
313+ "title" : title ,
314+ "token" : token ,
315+ },
316+ }
317+ }
318+
319+ if description is not None :
320+ llm_endpoint_document ["data" ]["attributes" ]["description" ] = description
321+ if provider is not None :
322+ llm_endpoint_document ["data" ]["attributes" ]["provider" ] = provider
323+ if base_url is not None :
324+ llm_endpoint_document ["data" ]["attributes" ]["baseUrl" ] = base_url
325+ if llm_organization is not None :
326+ llm_endpoint_document ["data" ]["attributes" ]["llmOrganization" ] = llm_organization
327+ if llm_model is not None :
328+ llm_endpoint_document ["data" ]["attributes" ]["llmModel" ] = llm_model
329+ if workspaces_ids is not None :
330+ llm_endpoint_document ["data" ]["attributes" ]["workspacesIds" ] = workspaces_ids
331+
332+ return self ._entities_api .create_entity_llm_endpoints (llm_endpoint_document , _check_return_type = False )
333+
334+ def update_llm_endpoint (
335+ self ,
336+ id : str ,
337+ title : str = None ,
338+ token : str = None ,
339+ description : str = None ,
340+ provider : str = None ,
341+ base_url : str = None ,
342+ llm_organization : str = None ,
343+ llm_model : str = None ,
344+ workspaces_ids : list [str ] = None ,
345+ ) -> JsonApiLlmEndpointOutDocument :
346+ """
347+ Update an existing LLM endpoint.
348+
349+ Args:
350+ id: Identifier of the LLM endpoint
351+ title: User-facing title of the LLM Provider
352+ token: The token to use to connect to the LLM provider
353+ description: User-facing description of the LLM endpoint
354+ provider: LLM provider name (e.g., "openai")
355+ base_url: Base URL for custom LLM endpoint
356+ llm_organization: LLM organization identifier
357+ llm_model: LLM default model override
358+ workspaces_ids: List of workspace IDs for which LLM endpoint is valid.
359+ If empty, it is valid for all workspaces.
360+
361+ Returns:
362+ JsonApiLlmEndpointOutDocument: Updated LLM endpoint
363+ """
364+ llm_endpoint_document = {"data" : {"id" : id , "type" : "llmEndpoint" , "attributes" : {}}}
365+
366+ if title is not None :
367+ llm_endpoint_document ["data" ]["attributes" ]["title" ] = title
368+ if token is not None :
369+ llm_endpoint_document ["data" ]["attributes" ]["token" ] = token
370+ if description is not None :
371+ llm_endpoint_document ["data" ]["attributes" ]["description" ] = description
372+ if provider is not None :
373+ llm_endpoint_document ["data" ]["attributes" ]["provider" ] = provider
374+ if base_url is not None :
375+ llm_endpoint_document ["data" ]["attributes" ]["baseUrl" ] = base_url
376+ if llm_organization is not None :
377+ llm_endpoint_document ["data" ]["attributes" ]["llmOrganization" ] = llm_organization
378+ if llm_model is not None :
379+ llm_endpoint_document ["data" ]["attributes" ]["llmModel" ] = llm_model
380+ if workspaces_ids is not None :
381+ llm_endpoint_document ["data" ]["attributes" ]["workspacesIds" ] = workspaces_ids
382+
383+ return self ._entities_api .update_entity_llm_endpoints (id , llm_endpoint_document , _check_return_type = False )
384+
385+ def delete_llm_endpoint (self , id : str ) -> None :
386+ """
387+ Delete an LLM endpoint.
388+
389+ Args:
390+ id: LLM endpoint identifier
391+ """
392+ self ._entities_api .delete_entity_llm_endpoints (id , _check_return_type = False )
0 commit comments