From 4888d8ad39bf78f590cff1e73489811ab9a074d1 Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Wed, 12 Mar 2025 16:00:37 -0700 Subject: [PATCH 1/2] Sync updates from stainless branch: main --- .../resources/agents/turn.py | 37 +- .../resources/eval_tasks.py | 328 ------- src/llama_stack_client/resources/memory.py | 320 ------- .../resources/memory_banks.py | 524 ----------- .../resources/tool_runtime.py | 323 ------- .../types/agent_delete_params.py | 17 - .../types/agents/agents_step.py | 18 - .../types/agents/agents_turn_stream_chunk.py | 12 - .../types/agents/session_delete_params.py | 19 - .../types/agents/step_retrieve_params.py | 23 - .../types/agents/turn_create_response.py | 17 - .../types/agents/turn_resume_params.py | 8 +- .../types/agents/turn_retrieve_params.py | 21 - .../types/agents/turn_stream_event.py | 98 -- .../types/batch_chat_completion.py | 12 - .../batch_inference_completion_response.py | 82 -- .../types/chat_completion_stream_chunk.py | 41 - .../types/completion_stream_chunk.py | 17 - .../types/dataset_create_params.py | 18 - .../types/dataset_delete_params.py | 15 - .../types/dataset_get_params.py | 15 - .../types/dataset_retrieve_params.py | 17 - .../types/dataset_unregister_params.py | 17 - .../types/eval/job_cancel_params.py | 19 - .../types/eval/job_result_params.py | 15 - .../types/eval/job_result_response.py | 19 - .../types/eval/job_retrieve_params.py | 17 - .../types/eval/job_status.py | 7 - .../types/eval/job_status_params.py | 17 - .../types/eval_evaluate_batch_params.py | 259 ------ .../types/eval_evaluate_params.py | 259 ------ .../types/eval_evaluate_response.py | 19 - src/llama_stack_client/types/eval_task.py | 24 - .../types/eval_task_config_param.py | 32 - .../types/eval_task_list_response.py | 10 - .../types/eval_task_register_params.py | 22 - .../types/eval_task_retrieve_params.py | 17 - .../types/evaluate/__init__.py | 9 - .../types/evaluate/evaluate_response.py | 19 - .../evaluate/evaluation_job_artifacts.py | 11 - .../evaluate/evaluation_job_log_stream.py | 11 - .../types/evaluate/evaluation_job_status.py | 11 - .../types/evaluate/job_artifacts_params.py | 15 - .../types/evaluate/job_artifacts_response.py | 10 - .../types/evaluate/job_cancel_params.py | 15 - .../types/evaluate/job_logs_params.py | 15 - .../types/evaluate/job_logs_response.py | 10 - .../types/evaluate/job_result_params.py | 15 - .../types/evaluate/job_status.py | 7 - .../types/evaluate/job_status_params.py | 15 - .../types/evaluate/job_status_response.py | 10 - .../types/evaluate/jobs/__init__.py | 7 - .../evaluate/jobs/artifact_list_params.py | 15 - .../types/evaluate/jobs/log_list_params.py | 15 - .../types/evaluate/jobs/status_list_params.py | 15 - .../question_answering_create_params.py | 16 - .../types/evaluate_evaluate_batch_params.py | 259 ------ .../types/evaluate_evaluate_params.py | 259 ------ .../evaluate_question_answering_params.py | 16 - .../types/evaluate_summarization_params.py | 16 - .../types/evaluation_job.py | 10 - .../types/evaluation_summarization_params.py | 16 - .../evaluation_text_generation_params.py | 16 - .../types/inference/__init__.py | 6 - .../inference/embedding_create_params.py | 23 - .../types/inference/embeddings.py | 11 - .../inference_chat_completion_response.py | 49 - .../types/inference_completion_response.py | 24 - .../types/list_eval_tasks_response.py | 11 - .../types/list_memory_banks_response.py | 78 -- .../types/memory/__init__.py | 7 - .../types/memory/document_delete_params.py | 18 - .../types/memory/document_retrieve_params.py | 18 - .../memory/document_retrieve_response.py | 22 - .../types/memory_bank_get_params.py | 15 - .../types/memory_bank_list_response.py | 81 -- .../types/memory_bank_register_params.py | 61 -- .../types/memory_bank_register_response.py | 73 -- .../types/memory_bank_retrieve_params.py | 17 - .../types/memory_bank_retrieve_response.py | 75 -- .../types/memory_bank_spec.py | 20 - .../types/memory_bank_unregister_params.py | 17 - .../types/memory_create_params.py | 57 -- .../types/memory_create_response.py | 59 -- .../types/memory_drop_params.py | 15 - .../types/memory_drop_response.py | 7 - .../types/memory_insert_params.py | 59 -- .../types/memory_list_response.py | 59 -- .../types/memory_query_params.py | 23 - .../types/memory_retrieve_params.py | 15 - .../types/memory_retrieve_response.py | 59 -- .../types/memory_update_params.py | 33 - .../types/model_def_with_provider.py | 17 - .../types/model_def_with_provider_param.py | 18 - .../types/model_delete_params.py | 15 - .../types/model_get_params.py | 15 - .../types/model_retrieve_params.py | 17 - .../types/model_retrieve_response.py | 23 - .../types/model_serving_spec.py | 23 - .../types/model_unregister_params.py | 17 - .../types/model_update_params.py | 22 - .../types/post_training/job_logs_params.py | 15 - .../types/post_training/job_logs_response.py | 13 - .../post_training_job_artifacts.py | 13 - .../post_training_job_log_stream.py | 13 - .../post_training/post_training_job_status.py | 25 - .../types/query_documents.py | 26 - .../types/query_documents_response.py | 22 - .../types/response_format_param.py | 23 - .../types/rest_api_execution_config_param.py | 20 - .../types/reward_scoring.py | 12 - .../types/reward_scoring_response.py | 40 - .../types/reward_scoring_score_params.py | 38 - .../types/run_sheid_response.py | 20 - .../types/score_batch_response.py | 19 - .../types/score_response.py | 17 - .../types/scored_dialog_generations.py | 28 - .../types/scoring_fn_def_with_provider.py | 84 -- .../scoring_fn_def_with_provider_param.py | 84 -- .../scoring_function_def_with_provider.py | 98 -- ...coring_function_def_with_provider_param.py | 98 -- .../types/scoring_function_retrieve_params.py | 17 - .../scoring_function_retrieve_response.py | 84 -- .../types/shared/attachment.py | 33 - .../code_interpreter_tool_definition.py | 21 - .../types/shared/content_array.py | 12 - .../shared/function_call_tool_definition.py | 26 - .../types/shared/graph_memory_bank_def.py | 15 - .../types/shared/image_media.py | 21 - .../types/shared/key_value_memory_bank_def.py | 15 - .../types/shared/keyword_memory_bank_def.py | 15 - .../types/shared/memory_tool_definition.py | 91 -- .../types/shared/photogen_tool_definition.py | 19 - .../types/shared/rest_api_execution_config.py | 21 - .../types/shared/search_tool_definition.py | 23 - src/llama_stack_client/types/shared/url.py | 10 - .../types/shared/vector_memory_bank_def.py | 22 - .../shared/wolfram_alpha_tool_definition.py | 21 - .../types/shared_params/attachment.py | 34 - .../code_interpreter_tool_definition.py | 22 - .../types/shared_params/content_array.py | 14 - .../function_call_tool_definition.py | 27 - .../shared_params/graph_memory_bank_def.py | 15 - .../types/shared_params/image_media.py | 21 - .../key_value_memory_bank_def.py | 15 - .../shared_params/keyword_memory_bank_def.py | 15 - .../shared_params/memory_tool_definition.py | 91 -- .../shared_params/photogen_tool_definition.py | 20 - .../rest_api_execution_config.py | 22 - .../shared_params/search_tool_definition.py | 24 - .../types/shared_params/url.py | 11 - .../shared_params/vector_memory_bank_def.py | 21 - .../wolfram_alpha_tool_definition.py | 22 - .../types/shield_def_with_provider.py | 17 - .../types/shield_def_with_provider_param.py | 18 - .../types/shield_get_params.py | 15 - .../types/shield_retrieve_params.py | 17 - .../types/shield_retrieve_response.py | 19 - src/llama_stack_client/types/shield_spec.py | 19 - .../types/span_with_children.py | 38 - .../types/synthetic_data_generation.py | 14 - .../types/telemetry_get_trace_params.py | 15 - .../types/telemetry_get_trace_response.py | 18 - .../types/telemetry_log_params.py | 96 -- .../types/tool_get_params.py | 17 - .../types/tool_param_definition_param.py | 18 - .../types/tool_runtime/document_param.py | 48 - .../types/tool_runtime/query_config_param.py | 40 - .../types/tool_runtime/query_result.py | 12 - .../types/toolgroup_get_params.py | 17 - .../types/toolgroup_unregister_params.py | 17 - .../types/train_eval_dataset.py | 16 - .../types/train_eval_dataset_param.py | 16 - tests/api_resources/eval/test_job.py | 259 ------ tests/api_resources/evaluate/__init__.py | 1 - tests/api_resources/evaluate/jobs/__init__.py | 1 - .../evaluate/jobs/test_artifacts.py | 100 -- .../api_resources/evaluate/jobs/test_logs.py | 100 -- .../evaluate/jobs/test_status.py | 100 -- tests/api_resources/evaluate/test_jobs.py | 259 ------ .../evaluate/test_question_answering.py | 100 -- tests/api_resources/inference/__init__.py | 1 - .../inference/test_embeddings.py | 108 --- tests/api_resources/memory/__init__.py | 1 - tests/api_resources/memory/test_documents.py | 218 ----- .../api_resources/post_training/test_jobs.py | 427 --------- tests/api_resources/test_batch_inferences.py | 675 -------------- tests/api_resources/test_eval_tasks.py | 246 ----- tests/api_resources/test_evaluate.py | 319 ------- tests/api_resources/test_evaluations.py | 100 -- tests/api_resources/test_memory.py | 252 ----- tests/api_resources/test_memory_banks.py | 406 -------- tests/api_resources/test_reward_scoring.py | 872 ------------------ 193 files changed, 17 insertions(+), 11310 deletions(-) delete mode 100644 src/llama_stack_client/resources/eval_tasks.py delete mode 100644 src/llama_stack_client/resources/memory.py delete mode 100644 src/llama_stack_client/resources/memory_banks.py delete mode 100644 src/llama_stack_client/resources/tool_runtime.py delete mode 100644 src/llama_stack_client/types/agent_delete_params.py delete mode 100644 src/llama_stack_client/types/agents/agents_step.py delete mode 100644 src/llama_stack_client/types/agents/agents_turn_stream_chunk.py delete mode 100644 src/llama_stack_client/types/agents/session_delete_params.py delete mode 100644 src/llama_stack_client/types/agents/step_retrieve_params.py delete mode 100644 src/llama_stack_client/types/agents/turn_create_response.py delete mode 100644 src/llama_stack_client/types/agents/turn_retrieve_params.py delete mode 100644 src/llama_stack_client/types/agents/turn_stream_event.py delete mode 100644 src/llama_stack_client/types/batch_chat_completion.py delete mode 100644 src/llama_stack_client/types/batch_inference_completion_response.py delete mode 100644 src/llama_stack_client/types/chat_completion_stream_chunk.py delete mode 100644 src/llama_stack_client/types/completion_stream_chunk.py delete mode 100644 src/llama_stack_client/types/dataset_create_params.py delete mode 100644 src/llama_stack_client/types/dataset_delete_params.py delete mode 100644 src/llama_stack_client/types/dataset_get_params.py delete mode 100644 src/llama_stack_client/types/dataset_retrieve_params.py delete mode 100644 src/llama_stack_client/types/dataset_unregister_params.py delete mode 100644 src/llama_stack_client/types/eval/job_cancel_params.py delete mode 100644 src/llama_stack_client/types/eval/job_result_params.py delete mode 100644 src/llama_stack_client/types/eval/job_result_response.py delete mode 100644 src/llama_stack_client/types/eval/job_retrieve_params.py delete mode 100644 src/llama_stack_client/types/eval/job_status.py delete mode 100644 src/llama_stack_client/types/eval/job_status_params.py delete mode 100644 src/llama_stack_client/types/eval_evaluate_batch_params.py delete mode 100644 src/llama_stack_client/types/eval_evaluate_params.py delete mode 100644 src/llama_stack_client/types/eval_evaluate_response.py delete mode 100644 src/llama_stack_client/types/eval_task.py delete mode 100644 src/llama_stack_client/types/eval_task_config_param.py delete mode 100644 src/llama_stack_client/types/eval_task_list_response.py delete mode 100644 src/llama_stack_client/types/eval_task_register_params.py delete mode 100644 src/llama_stack_client/types/eval_task_retrieve_params.py delete mode 100644 src/llama_stack_client/types/evaluate/__init__.py delete mode 100644 src/llama_stack_client/types/evaluate/evaluate_response.py delete mode 100644 src/llama_stack_client/types/evaluate/evaluation_job_artifacts.py delete mode 100644 src/llama_stack_client/types/evaluate/evaluation_job_log_stream.py delete mode 100644 src/llama_stack_client/types/evaluate/evaluation_job_status.py delete mode 100644 src/llama_stack_client/types/evaluate/job_artifacts_params.py delete mode 100644 src/llama_stack_client/types/evaluate/job_artifacts_response.py delete mode 100644 src/llama_stack_client/types/evaluate/job_cancel_params.py delete mode 100644 src/llama_stack_client/types/evaluate/job_logs_params.py delete mode 100644 src/llama_stack_client/types/evaluate/job_logs_response.py delete mode 100644 src/llama_stack_client/types/evaluate/job_result_params.py delete mode 100644 src/llama_stack_client/types/evaluate/job_status.py delete mode 100644 src/llama_stack_client/types/evaluate/job_status_params.py delete mode 100644 src/llama_stack_client/types/evaluate/job_status_response.py delete mode 100644 src/llama_stack_client/types/evaluate/jobs/__init__.py delete mode 100644 src/llama_stack_client/types/evaluate/jobs/artifact_list_params.py delete mode 100644 src/llama_stack_client/types/evaluate/jobs/log_list_params.py delete mode 100644 src/llama_stack_client/types/evaluate/jobs/status_list_params.py delete mode 100644 src/llama_stack_client/types/evaluate/question_answering_create_params.py delete mode 100644 src/llama_stack_client/types/evaluate_evaluate_batch_params.py delete mode 100644 src/llama_stack_client/types/evaluate_evaluate_params.py delete mode 100644 src/llama_stack_client/types/evaluate_question_answering_params.py delete mode 100644 src/llama_stack_client/types/evaluate_summarization_params.py delete mode 100644 src/llama_stack_client/types/evaluation_job.py delete mode 100644 src/llama_stack_client/types/evaluation_summarization_params.py delete mode 100644 src/llama_stack_client/types/evaluation_text_generation_params.py delete mode 100644 src/llama_stack_client/types/inference/__init__.py delete mode 100644 src/llama_stack_client/types/inference/embedding_create_params.py delete mode 100644 src/llama_stack_client/types/inference/embeddings.py delete mode 100644 src/llama_stack_client/types/inference_chat_completion_response.py delete mode 100644 src/llama_stack_client/types/inference_completion_response.py delete mode 100644 src/llama_stack_client/types/list_eval_tasks_response.py delete mode 100644 src/llama_stack_client/types/list_memory_banks_response.py delete mode 100644 src/llama_stack_client/types/memory/__init__.py delete mode 100644 src/llama_stack_client/types/memory/document_delete_params.py delete mode 100644 src/llama_stack_client/types/memory/document_retrieve_params.py delete mode 100644 src/llama_stack_client/types/memory/document_retrieve_response.py delete mode 100644 src/llama_stack_client/types/memory_bank_get_params.py delete mode 100644 src/llama_stack_client/types/memory_bank_list_response.py delete mode 100644 src/llama_stack_client/types/memory_bank_register_params.py delete mode 100644 src/llama_stack_client/types/memory_bank_register_response.py delete mode 100644 src/llama_stack_client/types/memory_bank_retrieve_params.py delete mode 100644 src/llama_stack_client/types/memory_bank_retrieve_response.py delete mode 100644 src/llama_stack_client/types/memory_bank_spec.py delete mode 100644 src/llama_stack_client/types/memory_bank_unregister_params.py delete mode 100644 src/llama_stack_client/types/memory_create_params.py delete mode 100644 src/llama_stack_client/types/memory_create_response.py delete mode 100644 src/llama_stack_client/types/memory_drop_params.py delete mode 100644 src/llama_stack_client/types/memory_drop_response.py delete mode 100644 src/llama_stack_client/types/memory_insert_params.py delete mode 100644 src/llama_stack_client/types/memory_list_response.py delete mode 100644 src/llama_stack_client/types/memory_query_params.py delete mode 100644 src/llama_stack_client/types/memory_retrieve_params.py delete mode 100644 src/llama_stack_client/types/memory_retrieve_response.py delete mode 100644 src/llama_stack_client/types/memory_update_params.py delete mode 100644 src/llama_stack_client/types/model_def_with_provider.py delete mode 100644 src/llama_stack_client/types/model_def_with_provider_param.py delete mode 100644 src/llama_stack_client/types/model_delete_params.py delete mode 100644 src/llama_stack_client/types/model_get_params.py delete mode 100644 src/llama_stack_client/types/model_retrieve_params.py delete mode 100644 src/llama_stack_client/types/model_retrieve_response.py delete mode 100644 src/llama_stack_client/types/model_serving_spec.py delete mode 100644 src/llama_stack_client/types/model_unregister_params.py delete mode 100644 src/llama_stack_client/types/model_update_params.py delete mode 100644 src/llama_stack_client/types/post_training/job_logs_params.py delete mode 100644 src/llama_stack_client/types/post_training/job_logs_response.py delete mode 100644 src/llama_stack_client/types/post_training/post_training_job_artifacts.py delete mode 100644 src/llama_stack_client/types/post_training/post_training_job_log_stream.py delete mode 100644 src/llama_stack_client/types/post_training/post_training_job_status.py delete mode 100644 src/llama_stack_client/types/query_documents.py delete mode 100644 src/llama_stack_client/types/query_documents_response.py delete mode 100644 src/llama_stack_client/types/response_format_param.py delete mode 100644 src/llama_stack_client/types/rest_api_execution_config_param.py delete mode 100644 src/llama_stack_client/types/reward_scoring.py delete mode 100644 src/llama_stack_client/types/reward_scoring_response.py delete mode 100644 src/llama_stack_client/types/reward_scoring_score_params.py delete mode 100644 src/llama_stack_client/types/run_sheid_response.py delete mode 100644 src/llama_stack_client/types/score_batch_response.py delete mode 100644 src/llama_stack_client/types/score_response.py delete mode 100644 src/llama_stack_client/types/scored_dialog_generations.py delete mode 100644 src/llama_stack_client/types/scoring_fn_def_with_provider.py delete mode 100644 src/llama_stack_client/types/scoring_fn_def_with_provider_param.py delete mode 100644 src/llama_stack_client/types/scoring_function_def_with_provider.py delete mode 100644 src/llama_stack_client/types/scoring_function_def_with_provider_param.py delete mode 100644 src/llama_stack_client/types/scoring_function_retrieve_params.py delete mode 100644 src/llama_stack_client/types/scoring_function_retrieve_response.py delete mode 100644 src/llama_stack_client/types/shared/attachment.py delete mode 100644 src/llama_stack_client/types/shared/code_interpreter_tool_definition.py delete mode 100644 src/llama_stack_client/types/shared/content_array.py delete mode 100644 src/llama_stack_client/types/shared/function_call_tool_definition.py delete mode 100644 src/llama_stack_client/types/shared/graph_memory_bank_def.py delete mode 100644 src/llama_stack_client/types/shared/image_media.py delete mode 100644 src/llama_stack_client/types/shared/key_value_memory_bank_def.py delete mode 100644 src/llama_stack_client/types/shared/keyword_memory_bank_def.py delete mode 100644 src/llama_stack_client/types/shared/memory_tool_definition.py delete mode 100644 src/llama_stack_client/types/shared/photogen_tool_definition.py delete mode 100644 src/llama_stack_client/types/shared/rest_api_execution_config.py delete mode 100644 src/llama_stack_client/types/shared/search_tool_definition.py delete mode 100644 src/llama_stack_client/types/shared/url.py delete mode 100644 src/llama_stack_client/types/shared/vector_memory_bank_def.py delete mode 100644 src/llama_stack_client/types/shared/wolfram_alpha_tool_definition.py delete mode 100644 src/llama_stack_client/types/shared_params/attachment.py delete mode 100644 src/llama_stack_client/types/shared_params/code_interpreter_tool_definition.py delete mode 100644 src/llama_stack_client/types/shared_params/content_array.py delete mode 100644 src/llama_stack_client/types/shared_params/function_call_tool_definition.py delete mode 100644 src/llama_stack_client/types/shared_params/graph_memory_bank_def.py delete mode 100644 src/llama_stack_client/types/shared_params/image_media.py delete mode 100644 src/llama_stack_client/types/shared_params/key_value_memory_bank_def.py delete mode 100644 src/llama_stack_client/types/shared_params/keyword_memory_bank_def.py delete mode 100644 src/llama_stack_client/types/shared_params/memory_tool_definition.py delete mode 100644 src/llama_stack_client/types/shared_params/photogen_tool_definition.py delete mode 100644 src/llama_stack_client/types/shared_params/rest_api_execution_config.py delete mode 100644 src/llama_stack_client/types/shared_params/search_tool_definition.py delete mode 100644 src/llama_stack_client/types/shared_params/url.py delete mode 100644 src/llama_stack_client/types/shared_params/vector_memory_bank_def.py delete mode 100644 src/llama_stack_client/types/shared_params/wolfram_alpha_tool_definition.py delete mode 100644 src/llama_stack_client/types/shield_def_with_provider.py delete mode 100644 src/llama_stack_client/types/shield_def_with_provider_param.py delete mode 100644 src/llama_stack_client/types/shield_get_params.py delete mode 100644 src/llama_stack_client/types/shield_retrieve_params.py delete mode 100644 src/llama_stack_client/types/shield_retrieve_response.py delete mode 100644 src/llama_stack_client/types/shield_spec.py delete mode 100644 src/llama_stack_client/types/span_with_children.py delete mode 100644 src/llama_stack_client/types/synthetic_data_generation.py delete mode 100644 src/llama_stack_client/types/telemetry_get_trace_params.py delete mode 100644 src/llama_stack_client/types/telemetry_get_trace_response.py delete mode 100644 src/llama_stack_client/types/telemetry_log_params.py delete mode 100644 src/llama_stack_client/types/tool_get_params.py delete mode 100644 src/llama_stack_client/types/tool_param_definition_param.py delete mode 100644 src/llama_stack_client/types/tool_runtime/document_param.py delete mode 100644 src/llama_stack_client/types/tool_runtime/query_config_param.py delete mode 100644 src/llama_stack_client/types/tool_runtime/query_result.py delete mode 100644 src/llama_stack_client/types/toolgroup_get_params.py delete mode 100644 src/llama_stack_client/types/toolgroup_unregister_params.py delete mode 100644 src/llama_stack_client/types/train_eval_dataset.py delete mode 100644 src/llama_stack_client/types/train_eval_dataset_param.py delete mode 100644 tests/api_resources/eval/test_job.py delete mode 100644 tests/api_resources/evaluate/__init__.py delete mode 100755 tests/api_resources/evaluate/jobs/__init__.py delete mode 100755 tests/api_resources/evaluate/jobs/test_artifacts.py delete mode 100755 tests/api_resources/evaluate/jobs/test_logs.py delete mode 100755 tests/api_resources/evaluate/jobs/test_status.py delete mode 100644 tests/api_resources/evaluate/test_jobs.py delete mode 100644 tests/api_resources/evaluate/test_question_answering.py delete mode 100644 tests/api_resources/inference/__init__.py delete mode 100644 tests/api_resources/inference/test_embeddings.py delete mode 100644 tests/api_resources/memory/__init__.py delete mode 100644 tests/api_resources/memory/test_documents.py delete mode 100644 tests/api_resources/post_training/test_jobs.py delete mode 100644 tests/api_resources/test_batch_inferences.py delete mode 100644 tests/api_resources/test_eval_tasks.py delete mode 100644 tests/api_resources/test_evaluate.py delete mode 100644 tests/api_resources/test_evaluations.py delete mode 100644 tests/api_resources/test_memory.py delete mode 100644 tests/api_resources/test_memory_banks.py delete mode 100644 tests/api_resources/test_reward_scoring.py diff --git a/src/llama_stack_client/resources/agents/turn.py b/src/llama_stack_client/resources/agents/turn.py index 2b1e07f4..23c44677 100644 --- a/src/llama_stack_client/resources/agents/turn.py +++ b/src/llama_stack_client/resources/agents/turn.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Union, Iterable +from typing import List, Iterable from typing_extensions import Literal, overload import httpx @@ -26,7 +26,6 @@ from ...types.agents import turn_create_params, turn_resume_params from ...types.agents.turn import Turn from ...types.tool_response_param import ToolResponseParam -from ...types.shared_params.tool_response_message import ToolResponseMessage from ...types.agents.agent_turn_response_stream_chunk import AgentTurnResponseStreamChunk __all__ = ["TurnResource", "AsyncTurnResource"] @@ -275,7 +274,7 @@ def resume( *, agent_id: str, session_id: str, - tool_responses: Union[Iterable[ToolResponseParam], Iterable[ToolResponseMessage]], + tool_responses: Iterable[ToolResponseParam], stream: Literal[False] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -292,8 +291,7 @@ def resume( ready. Args: - tool_responses: The tool call responses to resume the turn with. NOTE: ToolResponseMessage will - be deprecated. Use ToolResponse. + tool_responses: The tool call responses to resume the turn with. stream: Whether to stream the response. @@ -315,7 +313,7 @@ def resume( agent_id: str, session_id: str, stream: Literal[True], - tool_responses: Union[Iterable[ToolResponseParam], Iterable[ToolResponseMessage]], + tool_responses: Iterable[ToolResponseParam], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -333,8 +331,7 @@ def resume( Args: stream: Whether to stream the response. - tool_responses: The tool call responses to resume the turn with. NOTE: ToolResponseMessage will - be deprecated. Use ToolResponse. + tool_responses: The tool call responses to resume the turn with. extra_headers: Send extra headers @@ -354,7 +351,7 @@ def resume( agent_id: str, session_id: str, stream: bool, - tool_responses: Union[Iterable[ToolResponseParam], Iterable[ToolResponseMessage]], + tool_responses: Iterable[ToolResponseParam], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -372,8 +369,7 @@ def resume( Args: stream: Whether to stream the response. - tool_responses: The tool call responses to resume the turn with. NOTE: ToolResponseMessage will - be deprecated. Use ToolResponse. + tool_responses: The tool call responses to resume the turn with. extra_headers: Send extra headers @@ -392,7 +388,7 @@ def resume( *, agent_id: str, session_id: str, - tool_responses: Union[Iterable[ToolResponseParam], Iterable[ToolResponseMessage]], + tool_responses: Iterable[ToolResponseParam], stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -668,7 +664,7 @@ async def resume( *, agent_id: str, session_id: str, - tool_responses: Union[Iterable[ToolResponseParam], Iterable[ToolResponseMessage]], + tool_responses: Iterable[ToolResponseParam], stream: Literal[False] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -685,8 +681,7 @@ async def resume( ready. Args: - tool_responses: The tool call responses to resume the turn with. NOTE: ToolResponseMessage will - be deprecated. Use ToolResponse. + tool_responses: The tool call responses to resume the turn with. stream: Whether to stream the response. @@ -708,7 +703,7 @@ async def resume( agent_id: str, session_id: str, stream: Literal[True], - tool_responses: Union[Iterable[ToolResponseParam], Iterable[ToolResponseMessage]], + tool_responses: Iterable[ToolResponseParam], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -726,8 +721,7 @@ async def resume( Args: stream: Whether to stream the response. - tool_responses: The tool call responses to resume the turn with. NOTE: ToolResponseMessage will - be deprecated. Use ToolResponse. + tool_responses: The tool call responses to resume the turn with. extra_headers: Send extra headers @@ -747,7 +741,7 @@ async def resume( agent_id: str, session_id: str, stream: bool, - tool_responses: Union[Iterable[ToolResponseParam], Iterable[ToolResponseMessage]], + tool_responses: Iterable[ToolResponseParam], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -765,8 +759,7 @@ async def resume( Args: stream: Whether to stream the response. - tool_responses: The tool call responses to resume the turn with. NOTE: ToolResponseMessage will - be deprecated. Use ToolResponse. + tool_responses: The tool call responses to resume the turn with. extra_headers: Send extra headers @@ -785,7 +778,7 @@ async def resume( *, agent_id: str, session_id: str, - tool_responses: Union[Iterable[ToolResponseParam], Iterable[ToolResponseMessage]], + tool_responses: Iterable[ToolResponseParam], stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. diff --git a/src/llama_stack_client/resources/eval_tasks.py b/src/llama_stack_client/resources/eval_tasks.py deleted file mode 100644 index 40dbe8f2..00000000 --- a/src/llama_stack_client/resources/eval_tasks.py +++ /dev/null @@ -1,328 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Type, Union, Iterable, Optional, cast - -import httpx - -from ..types import eval_task_register_params -from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from .._utils import ( - maybe_transform, - async_maybe_transform, -) -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._wrappers import DataWrapper -from .._base_client import make_request_options -from ..types.benchmark import Benchmark -from ..types.benchmark_list_response import BenchmarkListResponse - -__all__ = ["EvalTasksResource", "AsyncEvalTasksResource"] - - -class EvalTasksResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> EvalTasksResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/stainless-sdks/llama-stack-python#accessing-raw-response-data-eg-headers - """ - return EvalTasksResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> EvalTasksResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/stainless-sdks/llama-stack-python#with_streaming_response - """ - return EvalTasksResourceWithStreamingResponse(self) - - def retrieve( - self, - eval_task_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Optional[Benchmark]: - """ - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not eval_task_id: - raise ValueError(f"Expected a non-empty value for `eval_task_id` but received {eval_task_id!r}") - return self._get( - f"/v1/eval-tasks/{eval_task_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Benchmark, - ) - - def list( - self, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> BenchmarkListResponse: - return self._get( - "/v1/eval-tasks", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - post_parser=DataWrapper[BenchmarkListResponse]._unwrapper, - ), - cast_to=cast(Type[BenchmarkListResponse], DataWrapper[BenchmarkListResponse]), - ) - - def register( - self, - *, - dataset_id: str, - eval_task_id: str, - scoring_functions: List[str], - metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]] | NotGiven = NOT_GIVEN, - provider_benchmark_id: str | NotGiven = NOT_GIVEN, - provider_id: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._post( - "/v1/eval-tasks", - body=maybe_transform( - { - "dataset_id": dataset_id, - "eval_task_id": eval_task_id, - "scoring_functions": scoring_functions, - "metadata": metadata, - "provider_benchmark_id": provider_benchmark_id, - "provider_id": provider_id, - }, - eval_task_register_params.EvalTaskRegisterParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class AsyncEvalTasksResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncEvalTasksResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/stainless-sdks/llama-stack-python#accessing-raw-response-data-eg-headers - """ - return AsyncEvalTasksResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncEvalTasksResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/stainless-sdks/llama-stack-python#with_streaming_response - """ - return AsyncEvalTasksResourceWithStreamingResponse(self) - - async def retrieve( - self, - eval_task_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Optional[Benchmark]: - """ - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not eval_task_id: - raise ValueError(f"Expected a non-empty value for `eval_task_id` but received {eval_task_id!r}") - return await self._get( - f"/v1/eval-tasks/{eval_task_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Benchmark, - ) - - async def list( - self, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> BenchmarkListResponse: - return await self._get( - "/v1/eval-tasks", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - post_parser=DataWrapper[BenchmarkListResponse]._unwrapper, - ), - cast_to=cast(Type[BenchmarkListResponse], DataWrapper[BenchmarkListResponse]), - ) - - async def register( - self, - *, - dataset_id: str, - eval_task_id: str, - scoring_functions: List[str], - metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]] | NotGiven = NOT_GIVEN, - provider_benchmark_id: str | NotGiven = NOT_GIVEN, - provider_id: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._post( - "/v1/eval-tasks", - body=await async_maybe_transform( - { - "dataset_id": dataset_id, - "eval_task_id": eval_task_id, - "scoring_functions": scoring_functions, - "metadata": metadata, - "provider_benchmark_id": provider_benchmark_id, - "provider_id": provider_id, - }, - eval_task_register_params.EvalTaskRegisterParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class EvalTasksResourceWithRawResponse: - def __init__(self, eval_tasks: EvalTasksResource) -> None: - self._eval_tasks = eval_tasks - - self.retrieve = to_raw_response_wrapper( - eval_tasks.retrieve, - ) - self.list = to_raw_response_wrapper( - eval_tasks.list, - ) - self.register = to_raw_response_wrapper( - eval_tasks.register, - ) - - -class AsyncEvalTasksResourceWithRawResponse: - def __init__(self, eval_tasks: AsyncEvalTasksResource) -> None: - self._eval_tasks = eval_tasks - - self.retrieve = async_to_raw_response_wrapper( - eval_tasks.retrieve, - ) - self.list = async_to_raw_response_wrapper( - eval_tasks.list, - ) - self.register = async_to_raw_response_wrapper( - eval_tasks.register, - ) - - -class EvalTasksResourceWithStreamingResponse: - def __init__(self, eval_tasks: EvalTasksResource) -> None: - self._eval_tasks = eval_tasks - - self.retrieve = to_streamed_response_wrapper( - eval_tasks.retrieve, - ) - self.list = to_streamed_response_wrapper( - eval_tasks.list, - ) - self.register = to_streamed_response_wrapper( - eval_tasks.register, - ) - - -class AsyncEvalTasksResourceWithStreamingResponse: - def __init__(self, eval_tasks: AsyncEvalTasksResource) -> None: - self._eval_tasks = eval_tasks - - self.retrieve = async_to_streamed_response_wrapper( - eval_tasks.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - eval_tasks.list, - ) - self.register = async_to_streamed_response_wrapper( - eval_tasks.register, - ) diff --git a/src/llama_stack_client/resources/memory.py b/src/llama_stack_client/resources/memory.py deleted file mode 100644 index b5292704..00000000 --- a/src/llama_stack_client/resources/memory.py +++ /dev/null @@ -1,320 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Iterable - -import httpx - -from ..types import memory_query_params, memory_insert_params -from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from .._utils import ( - maybe_transform, - strip_not_given, - async_maybe_transform, -) -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.query_documents_response import QueryDocumentsResponse -from ..types.shared_params.interleaved_content import InterleavedContent - -__all__ = ["MemoryResource", "AsyncMemoryResource"] - - -class MemoryResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> MemoryResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return the - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/stainless-sdks/llama-stack-python#accessing-raw-response-data-eg-headers - """ - return MemoryResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> MemoryResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/stainless-sdks/llama-stack-python#with_streaming_response - """ - return MemoryResourceWithStreamingResponse(self) - - def insert( - self, - *, - bank_id: str, - documents: Iterable[memory_insert_params.Document], - ttl_seconds: int | NotGiven = NOT_GIVEN, - x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, - x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - extra_headers = { - **strip_not_given( - { - "X-LlamaStack-Client-Version": x_llama_stack_client_version, - "X-LlamaStack-Provider-Data": x_llama_stack_provider_data, - } - ), - **(extra_headers or {}), - } - return self._post( - "/v1/memory/insert", - body=maybe_transform( - { - "bank_id": bank_id, - "documents": documents, - "ttl_seconds": ttl_seconds, - }, - memory_insert_params.MemoryInsertParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - def query( - self, - *, - bank_id: str, - query: InterleavedContent, - params: Dict[str, Union[bool, float, str, Iterable[object], object, None]] | NotGiven = NOT_GIVEN, - x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, - x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> QueryDocumentsResponse: - """ - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = { - **strip_not_given( - { - "X-LlamaStack-Client-Version": x_llama_stack_client_version, - "X-LlamaStack-Provider-Data": x_llama_stack_provider_data, - } - ), - **(extra_headers or {}), - } - return self._post( - "/v1/memory/query", - body=maybe_transform( - { - "bank_id": bank_id, - "query": query, - "params": params, - }, - memory_query_params.MemoryQueryParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=QueryDocumentsResponse, - ) - - -class AsyncMemoryResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncMemoryResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return the - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/stainless-sdks/llama-stack-python#accessing-raw-response-data-eg-headers - """ - return AsyncMemoryResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncMemoryResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/stainless-sdks/llama-stack-python#with_streaming_response - """ - return AsyncMemoryResourceWithStreamingResponse(self) - - async def insert( - self, - *, - bank_id: str, - documents: Iterable[memory_insert_params.Document], - ttl_seconds: int | NotGiven = NOT_GIVEN, - x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, - x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - extra_headers = { - **strip_not_given( - { - "X-LlamaStack-Client-Version": x_llama_stack_client_version, - "X-LlamaStack-Provider-Data": x_llama_stack_provider_data, - } - ), - **(extra_headers or {}), - } - return await self._post( - "/v1/memory/insert", - body=await async_maybe_transform( - { - "bank_id": bank_id, - "documents": documents, - "ttl_seconds": ttl_seconds, - }, - memory_insert_params.MemoryInsertParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - async def query( - self, - *, - bank_id: str, - query: InterleavedContent, - params: Dict[str, Union[bool, float, str, Iterable[object], object, None]] | NotGiven = NOT_GIVEN, - x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, - x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> QueryDocumentsResponse: - """ - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = { - **strip_not_given( - { - "X-LlamaStack-Client-Version": x_llama_stack_client_version, - "X-LlamaStack-Provider-Data": x_llama_stack_provider_data, - } - ), - **(extra_headers or {}), - } - return await self._post( - "/v1/memory/query", - body=await async_maybe_transform( - { - "bank_id": bank_id, - "query": query, - "params": params, - }, - memory_query_params.MemoryQueryParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=QueryDocumentsResponse, - ) - - -class MemoryResourceWithRawResponse: - def __init__(self, memory: MemoryResource) -> None: - self._memory = memory - - self.insert = to_raw_response_wrapper( - memory.insert, - ) - self.query = to_raw_response_wrapper( - memory.query, - ) - - -class AsyncMemoryResourceWithRawResponse: - def __init__(self, memory: AsyncMemoryResource) -> None: - self._memory = memory - - self.insert = async_to_raw_response_wrapper( - memory.insert, - ) - self.query = async_to_raw_response_wrapper( - memory.query, - ) - - -class MemoryResourceWithStreamingResponse: - def __init__(self, memory: MemoryResource) -> None: - self._memory = memory - - self.insert = to_streamed_response_wrapper( - memory.insert, - ) - self.query = to_streamed_response_wrapper( - memory.query, - ) - - -class AsyncMemoryResourceWithStreamingResponse: - def __init__(self, memory: AsyncMemoryResource) -> None: - self._memory = memory - - self.insert = async_to_streamed_response_wrapper( - memory.insert, - ) - self.query = async_to_streamed_response_wrapper( - memory.query, - ) diff --git a/src/llama_stack_client/resources/memory_banks.py b/src/llama_stack_client/resources/memory_banks.py deleted file mode 100644 index 1d2fb197..00000000 --- a/src/llama_stack_client/resources/memory_banks.py +++ /dev/null @@ -1,524 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Any, Type, Optional, cast - -import httpx - -from ..types import memory_bank_register_params -from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from .._utils import ( - maybe_transform, - strip_not_given, - async_maybe_transform, -) -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._wrappers import DataWrapper -from .._base_client import make_request_options -from ..types.memory_bank_list_response import MemoryBankListResponse -from ..types.memory_bank_register_response import MemoryBankRegisterResponse -from ..types.memory_bank_retrieve_response import MemoryBankRetrieveResponse - -__all__ = ["MemoryBanksResource", "AsyncMemoryBanksResource"] - - -class MemoryBanksResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> MemoryBanksResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return the - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/stainless-sdks/llama-stack-python#accessing-raw-response-data-eg-headers - """ - return MemoryBanksResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> MemoryBanksResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/stainless-sdks/llama-stack-python#with_streaming_response - """ - return MemoryBanksResourceWithStreamingResponse(self) - - def retrieve( - self, - memory_bank_id: str, - *, - x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, - x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Optional[MemoryBankRetrieveResponse]: - """ - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not memory_bank_id: - raise ValueError(f"Expected a non-empty value for `memory_bank_id` but received {memory_bank_id!r}") - extra_headers = { - **strip_not_given( - { - "X-LlamaStack-Client-Version": x_llama_stack_client_version, - "X-LlamaStack-Provider-Data": x_llama_stack_provider_data, - } - ), - **(extra_headers or {}), - } - return cast( - Optional[MemoryBankRetrieveResponse], - self._get( - f"/v1/memory-banks/{memory_bank_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=cast( - Any, MemoryBankRetrieveResponse - ), # Union types cannot be passed in as arguments in the type system - ), - ) - - def list( - self, - *, - x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, - x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> MemoryBankListResponse: - """ - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = { - **strip_not_given( - { - "X-LlamaStack-Client-Version": x_llama_stack_client_version, - "X-LlamaStack-Provider-Data": x_llama_stack_provider_data, - } - ), - **(extra_headers or {}), - } - return self._get( - "/v1/memory-banks", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - post_parser=DataWrapper[MemoryBankListResponse]._unwrapper, - ), - cast_to=cast(Type[MemoryBankListResponse], DataWrapper[MemoryBankListResponse]), - ) - - def register( - self, - *, - memory_bank_id: str, - params: memory_bank_register_params.Params, - provider_id: str | NotGiven = NOT_GIVEN, - provider_memory_bank_id: str | NotGiven = NOT_GIVEN, - x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, - x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> MemoryBankRegisterResponse: - """ - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = { - **strip_not_given( - { - "X-LlamaStack-Client-Version": x_llama_stack_client_version, - "X-LlamaStack-Provider-Data": x_llama_stack_provider_data, - } - ), - **(extra_headers or {}), - } - return cast( - MemoryBankRegisterResponse, - self._post( - "/v1/memory-banks", - body=maybe_transform( - { - "memory_bank_id": memory_bank_id, - "params": params, - "provider_id": provider_id, - "provider_memory_bank_id": provider_memory_bank_id, - }, - memory_bank_register_params.MemoryBankRegisterParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=cast( - Any, MemoryBankRegisterResponse - ), # Union types cannot be passed in as arguments in the type system - ), - ) - - def unregister( - self, - memory_bank_id: str, - *, - x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, - x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not memory_bank_id: - raise ValueError(f"Expected a non-empty value for `memory_bank_id` but received {memory_bank_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - extra_headers = { - **strip_not_given( - { - "X-LlamaStack-Client-Version": x_llama_stack_client_version, - "X-LlamaStack-Provider-Data": x_llama_stack_provider_data, - } - ), - **(extra_headers or {}), - } - return self._delete( - f"/v1/memory-banks/{memory_bank_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class AsyncMemoryBanksResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncMemoryBanksResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return the - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/stainless-sdks/llama-stack-python#accessing-raw-response-data-eg-headers - """ - return AsyncMemoryBanksResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncMemoryBanksResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/stainless-sdks/llama-stack-python#with_streaming_response - """ - return AsyncMemoryBanksResourceWithStreamingResponse(self) - - async def retrieve( - self, - memory_bank_id: str, - *, - x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, - x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Optional[MemoryBankRetrieveResponse]: - """ - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not memory_bank_id: - raise ValueError(f"Expected a non-empty value for `memory_bank_id` but received {memory_bank_id!r}") - extra_headers = { - **strip_not_given( - { - "X-LlamaStack-Client-Version": x_llama_stack_client_version, - "X-LlamaStack-Provider-Data": x_llama_stack_provider_data, - } - ), - **(extra_headers or {}), - } - return cast( - Optional[MemoryBankRetrieveResponse], - await self._get( - f"/v1/memory-banks/{memory_bank_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=cast( - Any, MemoryBankRetrieveResponse - ), # Union types cannot be passed in as arguments in the type system - ), - ) - - async def list( - self, - *, - x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, - x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> MemoryBankListResponse: - """ - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = { - **strip_not_given( - { - "X-LlamaStack-Client-Version": x_llama_stack_client_version, - "X-LlamaStack-Provider-Data": x_llama_stack_provider_data, - } - ), - **(extra_headers or {}), - } - return await self._get( - "/v1/memory-banks", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - post_parser=DataWrapper[MemoryBankListResponse]._unwrapper, - ), - cast_to=cast(Type[MemoryBankListResponse], DataWrapper[MemoryBankListResponse]), - ) - - async def register( - self, - *, - memory_bank_id: str, - params: memory_bank_register_params.Params, - provider_id: str | NotGiven = NOT_GIVEN, - provider_memory_bank_id: str | NotGiven = NOT_GIVEN, - x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, - x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> MemoryBankRegisterResponse: - """ - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = { - **strip_not_given( - { - "X-LlamaStack-Client-Version": x_llama_stack_client_version, - "X-LlamaStack-Provider-Data": x_llama_stack_provider_data, - } - ), - **(extra_headers or {}), - } - return cast( - MemoryBankRegisterResponse, - await self._post( - "/v1/memory-banks", - body=await async_maybe_transform( - { - "memory_bank_id": memory_bank_id, - "params": params, - "provider_id": provider_id, - "provider_memory_bank_id": provider_memory_bank_id, - }, - memory_bank_register_params.MemoryBankRegisterParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=cast( - Any, MemoryBankRegisterResponse - ), # Union types cannot be passed in as arguments in the type system - ), - ) - - async def unregister( - self, - memory_bank_id: str, - *, - x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, - x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not memory_bank_id: - raise ValueError(f"Expected a non-empty value for `memory_bank_id` but received {memory_bank_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - extra_headers = { - **strip_not_given( - { - "X-LlamaStack-Client-Version": x_llama_stack_client_version, - "X-LlamaStack-Provider-Data": x_llama_stack_provider_data, - } - ), - **(extra_headers or {}), - } - return await self._delete( - f"/v1/memory-banks/{memory_bank_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class MemoryBanksResourceWithRawResponse: - def __init__(self, memory_banks: MemoryBanksResource) -> None: - self._memory_banks = memory_banks - - self.retrieve = to_raw_response_wrapper( - memory_banks.retrieve, - ) - self.list = to_raw_response_wrapper( - memory_banks.list, - ) - self.register = to_raw_response_wrapper( - memory_banks.register, - ) - self.unregister = to_raw_response_wrapper( - memory_banks.unregister, - ) - - -class AsyncMemoryBanksResourceWithRawResponse: - def __init__(self, memory_banks: AsyncMemoryBanksResource) -> None: - self._memory_banks = memory_banks - - self.retrieve = async_to_raw_response_wrapper( - memory_banks.retrieve, - ) - self.list = async_to_raw_response_wrapper( - memory_banks.list, - ) - self.register = async_to_raw_response_wrapper( - memory_banks.register, - ) - self.unregister = async_to_raw_response_wrapper( - memory_banks.unregister, - ) - - -class MemoryBanksResourceWithStreamingResponse: - def __init__(self, memory_banks: MemoryBanksResource) -> None: - self._memory_banks = memory_banks - - self.retrieve = to_streamed_response_wrapper( - memory_banks.retrieve, - ) - self.list = to_streamed_response_wrapper( - memory_banks.list, - ) - self.register = to_streamed_response_wrapper( - memory_banks.register, - ) - self.unregister = to_streamed_response_wrapper( - memory_banks.unregister, - ) - - -class AsyncMemoryBanksResourceWithStreamingResponse: - def __init__(self, memory_banks: AsyncMemoryBanksResource) -> None: - self._memory_banks = memory_banks - - self.retrieve = async_to_streamed_response_wrapper( - memory_banks.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - memory_banks.list, - ) - self.register = async_to_streamed_response_wrapper( - memory_banks.register, - ) - self.unregister = async_to_streamed_response_wrapper( - memory_banks.unregister, - ) diff --git a/src/llama_stack_client/resources/tool_runtime.py b/src/llama_stack_client/resources/tool_runtime.py deleted file mode 100644 index 16aa4b69..00000000 --- a/src/llama_stack_client/resources/tool_runtime.py +++ /dev/null @@ -1,323 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Iterable - -import httpx - -from ..types import tool_runtime_list_tools_params, tool_runtime_invoke_tool_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import ( - maybe_transform, - strip_not_given, - async_maybe_transform, -) -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.tool_def import ToolDef -from ..types.shared_params.url import URL -from ..types.tool_invocation_result import ToolInvocationResult - -__all__ = ["ToolRuntimeResource", "AsyncToolRuntimeResource"] - - -class ToolRuntimeResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> ToolRuntimeResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return the - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/stainless-sdks/llama-stack-python#accessing-raw-response-data-eg-headers - """ - return ToolRuntimeResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ToolRuntimeResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/stainless-sdks/llama-stack-python#with_streaming_response - """ - return ToolRuntimeResourceWithStreamingResponse(self) - - def invoke_tool( - self, - *, - args: Dict[str, Union[bool, float, str, Iterable[object], object, None]], - tool_name: str, - x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, - x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ToolInvocationResult: - """ - Run a tool with the given arguments - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = { - **strip_not_given( - { - "X-LlamaStack-Client-Version": x_llama_stack_client_version, - "X-LlamaStack-Provider-Data": x_llama_stack_provider_data, - } - ), - **(extra_headers or {}), - } - return self._post( - "/v1/tool-runtime/invoke", - body=maybe_transform( - { - "args": args, - "tool_name": tool_name, - }, - tool_runtime_invoke_tool_params.ToolRuntimeInvokeToolParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ToolInvocationResult, - ) - - def list_tools( - self, - *, - mcp_endpoint: URL | NotGiven = NOT_GIVEN, - tool_group_id: str | NotGiven = NOT_GIVEN, - x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, - x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ToolDef: - """ - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} - extra_headers = { - **strip_not_given( - { - "X-LlamaStack-Client-Version": x_llama_stack_client_version, - "X-LlamaStack-Provider-Data": x_llama_stack_provider_data, - } - ), - **(extra_headers or {}), - } - return self._get( - "/v1/tool-runtime/list-tools", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "mcp_endpoint": mcp_endpoint, - "tool_group_id": tool_group_id, - }, - tool_runtime_list_tools_params.ToolRuntimeListToolsParams, - ), - ), - cast_to=ToolDef, - ) - - -class AsyncToolRuntimeResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncToolRuntimeResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return the - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/stainless-sdks/llama-stack-python#accessing-raw-response-data-eg-headers - """ - return AsyncToolRuntimeResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncToolRuntimeResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/stainless-sdks/llama-stack-python#with_streaming_response - """ - return AsyncToolRuntimeResourceWithStreamingResponse(self) - - async def invoke_tool( - self, - *, - args: Dict[str, Union[bool, float, str, Iterable[object], object, None]], - tool_name: str, - x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, - x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ToolInvocationResult: - """ - Run a tool with the given arguments - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = { - **strip_not_given( - { - "X-LlamaStack-Client-Version": x_llama_stack_client_version, - "X-LlamaStack-Provider-Data": x_llama_stack_provider_data, - } - ), - **(extra_headers or {}), - } - return await self._post( - "/v1/tool-runtime/invoke", - body=await async_maybe_transform( - { - "args": args, - "tool_name": tool_name, - }, - tool_runtime_invoke_tool_params.ToolRuntimeInvokeToolParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ToolInvocationResult, - ) - - async def list_tools( - self, - *, - mcp_endpoint: URL | NotGiven = NOT_GIVEN, - tool_group_id: str | NotGiven = NOT_GIVEN, - x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, - x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ToolDef: - """ - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} - extra_headers = { - **strip_not_given( - { - "X-LlamaStack-Client-Version": x_llama_stack_client_version, - "X-LlamaStack-Provider-Data": x_llama_stack_provider_data, - } - ), - **(extra_headers or {}), - } - return await self._get( - "/v1/tool-runtime/list-tools", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "mcp_endpoint": mcp_endpoint, - "tool_group_id": tool_group_id, - }, - tool_runtime_list_tools_params.ToolRuntimeListToolsParams, - ), - ), - cast_to=ToolDef, - ) - - -class ToolRuntimeResourceWithRawResponse: - def __init__(self, tool_runtime: ToolRuntimeResource) -> None: - self._tool_runtime = tool_runtime - - self.invoke_tool = to_raw_response_wrapper( - tool_runtime.invoke_tool, - ) - self.list_tools = to_raw_response_wrapper( - tool_runtime.list_tools, - ) - - -class AsyncToolRuntimeResourceWithRawResponse: - def __init__(self, tool_runtime: AsyncToolRuntimeResource) -> None: - self._tool_runtime = tool_runtime - - self.invoke_tool = async_to_raw_response_wrapper( - tool_runtime.invoke_tool, - ) - self.list_tools = async_to_raw_response_wrapper( - tool_runtime.list_tools, - ) - - -class ToolRuntimeResourceWithStreamingResponse: - def __init__(self, tool_runtime: ToolRuntimeResource) -> None: - self._tool_runtime = tool_runtime - - self.invoke_tool = to_streamed_response_wrapper( - tool_runtime.invoke_tool, - ) - self.list_tools = to_streamed_response_wrapper( - tool_runtime.list_tools, - ) - - -class AsyncToolRuntimeResourceWithStreamingResponse: - def __init__(self, tool_runtime: AsyncToolRuntimeResource) -> None: - self._tool_runtime = tool_runtime - - self.invoke_tool = async_to_streamed_response_wrapper( - tool_runtime.invoke_tool, - ) - self.list_tools = async_to_streamed_response_wrapper( - tool_runtime.list_tools, - ) diff --git a/src/llama_stack_client/types/agent_delete_params.py b/src/llama_stack_client/types/agent_delete_params.py deleted file mode 100644 index 970f9dfc..00000000 --- a/src/llama_stack_client/types/agent_delete_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["AgentDeleteParams"] - - -class AgentDeleteParams(TypedDict, total=False): - agent_id: Required[str] - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/agents/agents_step.py b/src/llama_stack_client/types/agents/agents_step.py deleted file mode 100644 index 743890d6..00000000 --- a/src/llama_stack_client/types/agents/agents_step.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Union -from typing_extensions import TypeAlias - -from ..._models import BaseModel -from ..inference_step import InferenceStep -from ..shield_call_step import ShieldCallStep -from ..tool_execution_step import ToolExecutionStep -from ..memory_retrieval_step import MemoryRetrievalStep - -__all__ = ["AgentsStep", "Step"] - -Step: TypeAlias = Union[InferenceStep, ToolExecutionStep, ShieldCallStep, MemoryRetrievalStep] - - -class AgentsStep(BaseModel): - step: Step diff --git a/src/llama_stack_client/types/agents/agents_turn_stream_chunk.py b/src/llama_stack_client/types/agents/agents_turn_stream_chunk.py deleted file mode 100644 index 79fd2d3e..00000000 --- a/src/llama_stack_client/types/agents/agents_turn_stream_chunk.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - - - -from ..._models import BaseModel -from .turn_stream_event import TurnStreamEvent - -__all__ = ["AgentsTurnStreamChunk"] - - -class AgentsTurnStreamChunk(BaseModel): - event: TurnStreamEvent diff --git a/src/llama_stack_client/types/agents/session_delete_params.py b/src/llama_stack_client/types/agents/session_delete_params.py deleted file mode 100644 index 6b5f98ae..00000000 --- a/src/llama_stack_client/types/agents/session_delete_params.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["SessionDeleteParams"] - - -class SessionDeleteParams(TypedDict, total=False): - agent_id: Required[str] - - session_id: Required[str] - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/agents/step_retrieve_params.py b/src/llama_stack_client/types/agents/step_retrieve_params.py deleted file mode 100644 index a18d12bf..00000000 --- a/src/llama_stack_client/types/agents/step_retrieve_params.py +++ /dev/null @@ -1,23 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["StepRetrieveParams"] - - -class StepRetrieveParams(TypedDict, total=False): - agent_id: Required[str] - - session_id: Required[str] - - step_id: Required[str] - - turn_id: Required[str] - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/agents/turn_create_response.py b/src/llama_stack_client/types/agents/turn_create_response.py deleted file mode 100644 index 8e12c94d..00000000 --- a/src/llama_stack_client/types/agents/turn_create_response.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Union -from typing_extensions import TypeAlias - -from .turn import Turn -from ..._models import BaseModel -from .turn_response_event import TurnResponseEvent - -__all__ = ["TurnCreateResponse", "AgentTurnResponseStreamChunk"] - - -class AgentTurnResponseStreamChunk(BaseModel): - event: TurnResponseEvent - - -TurnCreateResponse: TypeAlias = Union[Turn, AgentTurnResponseStreamChunk] diff --git a/src/llama_stack_client/types/agents/turn_resume_params.py b/src/llama_stack_client/types/agents/turn_resume_params.py index dc153d5e..554e3578 100644 --- a/src/llama_stack_client/types/agents/turn_resume_params.py +++ b/src/llama_stack_client/types/agents/turn_resume_params.py @@ -6,7 +6,6 @@ from typing_extensions import Literal, Required, TypedDict from ..tool_response_param import ToolResponseParam -from ..shared_params.tool_response_message import ToolResponseMessage __all__ = ["TurnResumeParamsBase", "TurnResumeParamsNonStreaming", "TurnResumeParamsStreaming"] @@ -16,11 +15,8 @@ class TurnResumeParamsBase(TypedDict, total=False): session_id: Required[str] - tool_responses: Required[Union[Iterable[ToolResponseParam], Iterable[ToolResponseMessage]]] - """The tool call responses to resume the turn with. - - NOTE: ToolResponseMessage will be deprecated. Use ToolResponse. - """ + tool_responses: Required[Iterable[ToolResponseParam]] + """The tool call responses to resume the turn with.""" class TurnResumeParamsNonStreaming(TurnResumeParamsBase, total=False): diff --git a/src/llama_stack_client/types/agents/turn_retrieve_params.py b/src/llama_stack_client/types/agents/turn_retrieve_params.py deleted file mode 100644 index 7b31d27c..00000000 --- a/src/llama_stack_client/types/agents/turn_retrieve_params.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["TurnRetrieveParams"] - - -class TurnRetrieveParams(TypedDict, total=False): - agent_id: Required[str] - - session_id: Required[str] - - turn_id: Required[str] - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/agents/turn_stream_event.py b/src/llama_stack_client/types/agents/turn_stream_event.py deleted file mode 100644 index 2d810d27..00000000 --- a/src/llama_stack_client/types/agents/turn_stream_event.py +++ /dev/null @@ -1,98 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union, Optional -from typing_extensions import Literal, TypeAlias - -from pydantic import Field as FieldInfo - -from .turn import Turn -from ..._models import BaseModel -from ..inference_step import InferenceStep -from ..shared.tool_call import ToolCall -from ..shield_call_step import ShieldCallStep -from ..tool_execution_step import ToolExecutionStep -from ..memory_retrieval_step import MemoryRetrievalStep - -__all__ = [ - "TurnStreamEvent", - "Payload", - "PayloadAgentTurnResponseStepStartPayload", - "PayloadAgentTurnResponseStepProgressPayload", - "PayloadAgentTurnResponseStepProgressPayloadToolCallDelta", - "PayloadAgentTurnResponseStepProgressPayloadToolCallDeltaContent", - "PayloadAgentTurnResponseStepCompletePayload", - "PayloadAgentTurnResponseStepCompletePayloadStepDetails", - "PayloadAgentTurnResponseTurnStartPayload", - "PayloadAgentTurnResponseTurnCompletePayload", -] - - -class PayloadAgentTurnResponseStepStartPayload(BaseModel): - event_type: Literal["step_start"] - - step_id: str - - step_type: Literal["inference", "tool_execution", "shield_call", "memory_retrieval"] - - metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None - - -PayloadAgentTurnResponseStepProgressPayloadToolCallDeltaContent: TypeAlias = Union[str, ToolCall] - - -class PayloadAgentTurnResponseStepProgressPayloadToolCallDelta(BaseModel): - content: PayloadAgentTurnResponseStepProgressPayloadToolCallDeltaContent - - parse_status: Literal["started", "in_progress", "failure", "success"] - - -class PayloadAgentTurnResponseStepProgressPayload(BaseModel): - event_type: Literal["step_progress"] - - step_id: str - - step_type: Literal["inference", "tool_execution", "shield_call", "memory_retrieval"] - - text_delta_model_response: Optional[str] = FieldInfo(alias="model_response_text_delta", default=None) - - tool_call_delta: Optional[PayloadAgentTurnResponseStepProgressPayloadToolCallDelta] = None - - tool_response_text_delta: Optional[str] = None - - -PayloadAgentTurnResponseStepCompletePayloadStepDetails: TypeAlias = Union[ - InferenceStep, ToolExecutionStep, ShieldCallStep, MemoryRetrievalStep -] - - -class PayloadAgentTurnResponseStepCompletePayload(BaseModel): - event_type: Literal["step_complete"] - - step_details: PayloadAgentTurnResponseStepCompletePayloadStepDetails - - step_type: Literal["inference", "tool_execution", "shield_call", "memory_retrieval"] - - -class PayloadAgentTurnResponseTurnStartPayload(BaseModel): - event_type: Literal["turn_start"] - - turn_id: str - - -class PayloadAgentTurnResponseTurnCompletePayload(BaseModel): - event_type: Literal["turn_complete"] - - turn: Turn - - -Payload: TypeAlias = Union[ - PayloadAgentTurnResponseStepStartPayload, - PayloadAgentTurnResponseStepProgressPayload, - PayloadAgentTurnResponseStepCompletePayload, - PayloadAgentTurnResponseTurnStartPayload, - PayloadAgentTurnResponseTurnCompletePayload, -] - - -class TurnStreamEvent(BaseModel): - payload: Payload diff --git a/src/llama_stack_client/types/batch_chat_completion.py b/src/llama_stack_client/types/batch_chat_completion.py deleted file mode 100644 index c07b492e..00000000 --- a/src/llama_stack_client/types/batch_chat_completion.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from .._models import BaseModel -from .shared.completion_message import CompletionMessage - -__all__ = ["BatchChatCompletion"] - - -class BatchChatCompletion(BaseModel): - completion_message_batch: List[CompletionMessage] diff --git a/src/llama_stack_client/types/batch_inference_completion_response.py b/src/llama_stack_client/types/batch_inference_completion_response.py deleted file mode 100644 index 4a124a33..00000000 --- a/src/llama_stack_client/types/batch_inference_completion_response.py +++ /dev/null @@ -1,82 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union, Optional -from typing_extensions import Literal, TypeAlias - -from .._models import BaseModel - -__all__ = [ - "BatchInferenceCompletionResponse", - "CompletionMessageBatch", - "CompletionMessageBatchContent", - "CompletionMessageBatchContentImageMedia", - "CompletionMessageBatchContentImageMediaImage", - "CompletionMessageBatchContentImageMediaImageThisClassRepresentsAnImageObjectToCreate", - "CompletionMessageBatchContentUnionMember2", - "CompletionMessageBatchContentUnionMember2ImageMedia", - "CompletionMessageBatchContentUnionMember2ImageMediaImage", - "CompletionMessageBatchContentUnionMember2ImageMediaImageThisClassRepresentsAnImageObjectToCreate", - "CompletionMessageBatchToolCall", -] - - -class CompletionMessageBatchContentImageMediaImageThisClassRepresentsAnImageObjectToCreate(BaseModel): - format: Optional[str] = None - - format_description: Optional[str] = None - - -CompletionMessageBatchContentImageMediaImage: TypeAlias = Union[ - CompletionMessageBatchContentImageMediaImageThisClassRepresentsAnImageObjectToCreate, str -] - - -class CompletionMessageBatchContentImageMedia(BaseModel): - image: CompletionMessageBatchContentImageMediaImage - - -class CompletionMessageBatchContentUnionMember2ImageMediaImageThisClassRepresentsAnImageObjectToCreate(BaseModel): - format: Optional[str] = None - - format_description: Optional[str] = None - - -CompletionMessageBatchContentUnionMember2ImageMediaImage: TypeAlias = Union[ - CompletionMessageBatchContentUnionMember2ImageMediaImageThisClassRepresentsAnImageObjectToCreate, str -] - - -class CompletionMessageBatchContentUnionMember2ImageMedia(BaseModel): - image: CompletionMessageBatchContentUnionMember2ImageMediaImage - - -CompletionMessageBatchContentUnionMember2: TypeAlias = Union[str, CompletionMessageBatchContentUnionMember2ImageMedia] - -CompletionMessageBatchContent: TypeAlias = Union[ - str, CompletionMessageBatchContentImageMedia, List[CompletionMessageBatchContentUnionMember2] -] - - -class CompletionMessageBatchToolCall(BaseModel): - arguments: Dict[ - str, - Union[str, float, bool, List[Union[str, float, bool, None]], Dict[str, Union[str, float, bool, None]], None], - ] - - call_id: str - - tool_name: Union[Literal["brave_search", "wolfram_alpha", "photogen", "code_interpreter"], str] - - -class CompletionMessageBatch(BaseModel): - content: CompletionMessageBatchContent - - role: Literal["assistant"] - - stop_reason: Literal["end_of_turn", "end_of_message", "out_of_tokens"] - - tool_calls: List[CompletionMessageBatchToolCall] - - -class BatchInferenceCompletionResponse(BaseModel): - completion_message_batch: List[CompletionMessageBatch] diff --git a/src/llama_stack_client/types/chat_completion_stream_chunk.py b/src/llama_stack_client/types/chat_completion_stream_chunk.py deleted file mode 100644 index 6a1d5c81..00000000 --- a/src/llama_stack_client/types/chat_completion_stream_chunk.py +++ /dev/null @@ -1,41 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Union, Optional -from typing_extensions import Literal, TypeAlias - -from .._models import BaseModel -from .token_log_probs import TokenLogProbs -from .shared.tool_call import ToolCall - -__all__ = [ - "ChatCompletionStreamChunk", - "Event", - "EventDelta", - "EventDeltaToolCallDelta", - "EventDeltaToolCallDeltaContent", -] - -EventDeltaToolCallDeltaContent: TypeAlias = Union[str, ToolCall] - - -class EventDeltaToolCallDelta(BaseModel): - content: EventDeltaToolCallDeltaContent - - parse_status: Literal["started", "in_progress", "failure", "success"] - - -EventDelta: TypeAlias = Union[str, EventDeltaToolCallDelta] - - -class Event(BaseModel): - delta: EventDelta - - event_type: Literal["start", "complete", "progress"] - - logprobs: Optional[List[TokenLogProbs]] = None - - stop_reason: Optional[Literal["end_of_turn", "end_of_message", "out_of_tokens"]] = None - - -class ChatCompletionStreamChunk(BaseModel): - event: Event diff --git a/src/llama_stack_client/types/completion_stream_chunk.py b/src/llama_stack_client/types/completion_stream_chunk.py deleted file mode 100644 index ff445dbd..00000000 --- a/src/llama_stack_client/types/completion_stream_chunk.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from typing_extensions import Literal - -from .._models import BaseModel -from .token_log_probs import TokenLogProbs - -__all__ = ["CompletionStreamChunk"] - - -class CompletionStreamChunk(BaseModel): - delta: str - - logprobs: Optional[List[TokenLogProbs]] = None - - stop_reason: Optional[Literal["end_of_turn", "end_of_message", "out_of_tokens"]] = None diff --git a/src/llama_stack_client/types/dataset_create_params.py b/src/llama_stack_client/types/dataset_create_params.py deleted file mode 100644 index ec811755..00000000 --- a/src/llama_stack_client/types/dataset_create_params.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo -from .train_eval_dataset_param import TrainEvalDatasetParam - -__all__ = ["DatasetCreateParams"] - - -class DatasetCreateParams(TypedDict, total=False): - dataset: Required[TrainEvalDatasetParam] - - uuid: Required[str] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/dataset_delete_params.py b/src/llama_stack_client/types/dataset_delete_params.py deleted file mode 100644 index 66d06704..00000000 --- a/src/llama_stack_client/types/dataset_delete_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["DatasetDeleteParams"] - - -class DatasetDeleteParams(TypedDict, total=False): - dataset_uuid: Required[str] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/dataset_get_params.py b/src/llama_stack_client/types/dataset_get_params.py deleted file mode 100644 index d0d66952..00000000 --- a/src/llama_stack_client/types/dataset_get_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["DatasetGetParams"] - - -class DatasetGetParams(TypedDict, total=False): - dataset_uuid: Required[str] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/dataset_retrieve_params.py b/src/llama_stack_client/types/dataset_retrieve_params.py deleted file mode 100644 index 5ea2502b..00000000 --- a/src/llama_stack_client/types/dataset_retrieve_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["DatasetRetrieveParams"] - - -class DatasetRetrieveParams(TypedDict, total=False): - dataset_id: Required[str] - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/dataset_unregister_params.py b/src/llama_stack_client/types/dataset_unregister_params.py deleted file mode 100644 index f2d73138..00000000 --- a/src/llama_stack_client/types/dataset_unregister_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["DatasetUnregisterParams"] - - -class DatasetUnregisterParams(TypedDict, total=False): - dataset_id: Required[str] - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/eval/job_cancel_params.py b/src/llama_stack_client/types/eval/job_cancel_params.py deleted file mode 100644 index 1c4e5206..00000000 --- a/src/llama_stack_client/types/eval/job_cancel_params.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["JobCancelParams"] - - -class JobCancelParams(TypedDict, total=False): - job_id: Required[str] - - task_id: Required[str] - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/eval/job_result_params.py b/src/llama_stack_client/types/eval/job_result_params.py deleted file mode 100644 index 694e12f8..00000000 --- a/src/llama_stack_client/types/eval/job_result_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["JobResultParams"] - - -class JobResultParams(TypedDict, total=False): - job_id: Required[str] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/eval/job_result_response.py b/src/llama_stack_client/types/eval/job_result_response.py deleted file mode 100644 index 78c7620b..00000000 --- a/src/llama_stack_client/types/eval/job_result_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union - -from ..._models import BaseModel - -__all__ = ["JobResultResponse", "Scores"] - - -class Scores(BaseModel): - aggregated_results: Dict[str, Union[bool, float, str, List[object], object, None]] - - score_rows: List[Dict[str, Union[bool, float, str, List[object], object, None]]] - - -class JobResultResponse(BaseModel): - generations: List[Dict[str, Union[bool, float, str, List[object], object, None]]] - - scores: Dict[str, Scores] diff --git a/src/llama_stack_client/types/eval/job_retrieve_params.py b/src/llama_stack_client/types/eval/job_retrieve_params.py deleted file mode 100644 index 2278e423..00000000 --- a/src/llama_stack_client/types/eval/job_retrieve_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["JobRetrieveParams"] - - -class JobRetrieveParams(TypedDict, total=False): - task_id: Required[str] - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/eval/job_status.py b/src/llama_stack_client/types/eval/job_status.py deleted file mode 100644 index 22bc685c..00000000 --- a/src/llama_stack_client/types/eval/job_status.py +++ /dev/null @@ -1,7 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal, TypeAlias - -__all__ = ["JobStatus"] - -JobStatus: TypeAlias = Literal["completed", "in_progress"] diff --git a/src/llama_stack_client/types/eval/job_status_params.py b/src/llama_stack_client/types/eval/job_status_params.py deleted file mode 100644 index 7dc72242..00000000 --- a/src/llama_stack_client/types/eval/job_status_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["JobStatusParams"] - - -class JobStatusParams(TypedDict, total=False): - task_id: Required[str] - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/eval_evaluate_batch_params.py b/src/llama_stack_client/types/eval_evaluate_batch_params.py deleted file mode 100644 index ded8d391..00000000 --- a/src/llama_stack_client/types/eval_evaluate_batch_params.py +++ /dev/null @@ -1,259 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable -from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict - -from .._utils import PropertyInfo -from .shared_params.system_message import SystemMessage -from .shared_params.sampling_params import SamplingParams -from .rest_api_execution_config_param import RestAPIExecutionConfigParam - -__all__ = [ - "EvalEvaluateBatchParams", - "Candidate", - "CandidateModelCandidate", - "CandidateAgentCandidate", - "CandidateAgentCandidateConfig", - "CandidateAgentCandidateConfigTool", - "CandidateAgentCandidateConfigToolSearchToolDefinition", - "CandidateAgentCandidateConfigToolWolframAlphaToolDefinition", - "CandidateAgentCandidateConfigToolPhotogenToolDefinition", - "CandidateAgentCandidateConfigToolCodeInterpreterToolDefinition", - "CandidateAgentCandidateConfigToolFunctionCallToolDefinition", - "CandidateAgentCandidateConfigToolFunctionCallToolDefinitionParameters", - "CandidateAgentCandidateConfigToolMemoryToolDefinition", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfig", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember0", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember1", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember2", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember3", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfig", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember0", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember1", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigType", -] - - -class EvalEvaluateBatchParams(TypedDict, total=False): - candidate: Required[Candidate] - - dataset_id: Required[str] - - scoring_functions: Required[List[str]] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] - - -class CandidateModelCandidate(TypedDict, total=False): - model: Required[str] - - sampling_params: Required[SamplingParams] - - type: Required[Literal["model"]] - - system_message: SystemMessage - - -class CandidateAgentCandidateConfigToolSearchToolDefinition(TypedDict, total=False): - api_key: Required[str] - - engine: Required[Literal["bing", "brave"]] - - type: Required[Literal["brave_search"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfigParam - - -class CandidateAgentCandidateConfigToolWolframAlphaToolDefinition(TypedDict, total=False): - api_key: Required[str] - - type: Required[Literal["wolfram_alpha"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfigParam - - -class CandidateAgentCandidateConfigToolPhotogenToolDefinition(TypedDict, total=False): - type: Required[Literal["photogen"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfigParam - - -class CandidateAgentCandidateConfigToolCodeInterpreterToolDefinition(TypedDict, total=False): - enable_inline_code_execution: Required[bool] - - type: Required[Literal["code_interpreter"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfigParam - - -class CandidateAgentCandidateConfigToolFunctionCallToolDefinitionParameters(TypedDict, total=False): - param_type: Required[str] - - default: Union[bool, float, str, Iterable[object], object, None] - - description: str - - required: bool - - -class CandidateAgentCandidateConfigToolFunctionCallToolDefinition(TypedDict, total=False): - description: Required[str] - - function_name: Required[str] - - parameters: Required[Dict[str, CandidateAgentCandidateConfigToolFunctionCallToolDefinitionParameters]] - - type: Required[Literal["function_call"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfigParam - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember0(TypedDict, total=False): - bank_id: Required[str] - - type: Required[Literal["vector"]] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember1(TypedDict, total=False): - bank_id: Required[str] - - keys: Required[List[str]] - - type: Required[Literal["keyvalue"]] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember2(TypedDict, total=False): - bank_id: Required[str] - - type: Required[Literal["keyword"]] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember3(TypedDict, total=False): - bank_id: Required[str] - - entities: Required[List[str]] - - type: Required[Literal["graph"]] - - -CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfig: TypeAlias = Union[ - CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember0, - CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember1, - CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember2, - CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember3, -] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember0(TypedDict, total=False): - sep: Required[str] - - type: Required[Literal["default"]] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember1(TypedDict, total=False): - model: Required[str] - - template: Required[str] - - type: Required[Literal["llm"]] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigType(TypedDict, total=False): - type: Required[Literal["custom"]] - - -CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfig: TypeAlias = Union[ - CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember0, - CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember1, - CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigType, -] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinition(TypedDict, total=False): - max_chunks: Required[int] - - max_tokens_in_context: Required[int] - - memory_bank_configs: Required[Iterable[CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfig]] - - query_generator_config: Required[CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfig] - - type: Required[Literal["memory"]] - - input_shields: List[str] - - output_shields: List[str] - - -CandidateAgentCandidateConfigTool: TypeAlias = Union[ - CandidateAgentCandidateConfigToolSearchToolDefinition, - CandidateAgentCandidateConfigToolWolframAlphaToolDefinition, - CandidateAgentCandidateConfigToolPhotogenToolDefinition, - CandidateAgentCandidateConfigToolCodeInterpreterToolDefinition, - CandidateAgentCandidateConfigToolFunctionCallToolDefinition, - CandidateAgentCandidateConfigToolMemoryToolDefinition, -] - - -class CandidateAgentCandidateConfig(TypedDict, total=False): - enable_session_persistence: Required[bool] - - instructions: Required[str] - - max_infer_iters: Required[int] - - model: Required[str] - - input_shields: List[str] - - output_shields: List[str] - - sampling_params: SamplingParams - - tool_choice: Literal["auto", "required"] - - tool_prompt_format: Literal["json", "function_tag", "python_list"] - """ - `json` -- Refers to the json format for calling tools. The json format takes the - form like { "type": "function", "function" : { "name": "function_name", - "description": "function_description", "parameters": {...} } } - - `function_tag` -- This is an example of how you could define your own user - defined format for making tool calls. The function_tag format looks like this, - (parameters) - - The detailed prompts for each of these formats are added to llama cli - """ - - tools: Iterable[CandidateAgentCandidateConfigTool] - - -class CandidateAgentCandidate(TypedDict, total=False): - config: Required[CandidateAgentCandidateConfig] - - type: Required[Literal["agent"]] - - -Candidate: TypeAlias = Union[CandidateModelCandidate, CandidateAgentCandidate] diff --git a/src/llama_stack_client/types/eval_evaluate_params.py b/src/llama_stack_client/types/eval_evaluate_params.py deleted file mode 100644 index 5f786316..00000000 --- a/src/llama_stack_client/types/eval_evaluate_params.py +++ /dev/null @@ -1,259 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable -from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict - -from .._utils import PropertyInfo -from .shared_params.system_message import SystemMessage -from .shared_params.sampling_params import SamplingParams -from .rest_api_execution_config_param import RestAPIExecutionConfigParam - -__all__ = [ - "EvalEvaluateParams", - "Candidate", - "CandidateModelCandidate", - "CandidateAgentCandidate", - "CandidateAgentCandidateConfig", - "CandidateAgentCandidateConfigTool", - "CandidateAgentCandidateConfigToolSearchToolDefinition", - "CandidateAgentCandidateConfigToolWolframAlphaToolDefinition", - "CandidateAgentCandidateConfigToolPhotogenToolDefinition", - "CandidateAgentCandidateConfigToolCodeInterpreterToolDefinition", - "CandidateAgentCandidateConfigToolFunctionCallToolDefinition", - "CandidateAgentCandidateConfigToolFunctionCallToolDefinitionParameters", - "CandidateAgentCandidateConfigToolMemoryToolDefinition", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfig", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember0", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember1", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember2", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember3", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfig", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember0", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember1", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigType", -] - - -class EvalEvaluateParams(TypedDict, total=False): - candidate: Required[Candidate] - - input_rows: Required[Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]]] - - scoring_functions: Required[List[str]] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] - - -class CandidateModelCandidate(TypedDict, total=False): - model: Required[str] - - sampling_params: Required[SamplingParams] - - type: Required[Literal["model"]] - - system_message: SystemMessage - - -class CandidateAgentCandidateConfigToolSearchToolDefinition(TypedDict, total=False): - api_key: Required[str] - - engine: Required[Literal["bing", "brave"]] - - type: Required[Literal["brave_search"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfigParam - - -class CandidateAgentCandidateConfigToolWolframAlphaToolDefinition(TypedDict, total=False): - api_key: Required[str] - - type: Required[Literal["wolfram_alpha"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfigParam - - -class CandidateAgentCandidateConfigToolPhotogenToolDefinition(TypedDict, total=False): - type: Required[Literal["photogen"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfigParam - - -class CandidateAgentCandidateConfigToolCodeInterpreterToolDefinition(TypedDict, total=False): - enable_inline_code_execution: Required[bool] - - type: Required[Literal["code_interpreter"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfigParam - - -class CandidateAgentCandidateConfigToolFunctionCallToolDefinitionParameters(TypedDict, total=False): - param_type: Required[str] - - default: Union[bool, float, str, Iterable[object], object, None] - - description: str - - required: bool - - -class CandidateAgentCandidateConfigToolFunctionCallToolDefinition(TypedDict, total=False): - description: Required[str] - - function_name: Required[str] - - parameters: Required[Dict[str, CandidateAgentCandidateConfigToolFunctionCallToolDefinitionParameters]] - - type: Required[Literal["function_call"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfigParam - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember0(TypedDict, total=False): - bank_id: Required[str] - - type: Required[Literal["vector"]] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember1(TypedDict, total=False): - bank_id: Required[str] - - keys: Required[List[str]] - - type: Required[Literal["keyvalue"]] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember2(TypedDict, total=False): - bank_id: Required[str] - - type: Required[Literal["keyword"]] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember3(TypedDict, total=False): - bank_id: Required[str] - - entities: Required[List[str]] - - type: Required[Literal["graph"]] - - -CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfig: TypeAlias = Union[ - CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember0, - CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember1, - CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember2, - CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember3, -] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember0(TypedDict, total=False): - sep: Required[str] - - type: Required[Literal["default"]] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember1(TypedDict, total=False): - model: Required[str] - - template: Required[str] - - type: Required[Literal["llm"]] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigType(TypedDict, total=False): - type: Required[Literal["custom"]] - - -CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfig: TypeAlias = Union[ - CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember0, - CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember1, - CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigType, -] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinition(TypedDict, total=False): - max_chunks: Required[int] - - max_tokens_in_context: Required[int] - - memory_bank_configs: Required[Iterable[CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfig]] - - query_generator_config: Required[CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfig] - - type: Required[Literal["memory"]] - - input_shields: List[str] - - output_shields: List[str] - - -CandidateAgentCandidateConfigTool: TypeAlias = Union[ - CandidateAgentCandidateConfigToolSearchToolDefinition, - CandidateAgentCandidateConfigToolWolframAlphaToolDefinition, - CandidateAgentCandidateConfigToolPhotogenToolDefinition, - CandidateAgentCandidateConfigToolCodeInterpreterToolDefinition, - CandidateAgentCandidateConfigToolFunctionCallToolDefinition, - CandidateAgentCandidateConfigToolMemoryToolDefinition, -] - - -class CandidateAgentCandidateConfig(TypedDict, total=False): - enable_session_persistence: Required[bool] - - instructions: Required[str] - - max_infer_iters: Required[int] - - model: Required[str] - - input_shields: List[str] - - output_shields: List[str] - - sampling_params: SamplingParams - - tool_choice: Literal["auto", "required"] - - tool_prompt_format: Literal["json", "function_tag", "python_list"] - """ - `json` -- Refers to the json format for calling tools. The json format takes the - form like { "type": "function", "function" : { "name": "function_name", - "description": "function_description", "parameters": {...} } } - - `function_tag` -- This is an example of how you could define your own user - defined format for making tool calls. The function_tag format looks like this, - (parameters) - - The detailed prompts for each of these formats are added to llama cli - """ - - tools: Iterable[CandidateAgentCandidateConfigTool] - - -class CandidateAgentCandidate(TypedDict, total=False): - config: Required[CandidateAgentCandidateConfig] - - type: Required[Literal["agent"]] - - -Candidate: TypeAlias = Union[CandidateModelCandidate, CandidateAgentCandidate] diff --git a/src/llama_stack_client/types/eval_evaluate_response.py b/src/llama_stack_client/types/eval_evaluate_response.py deleted file mode 100644 index d5734ed2..00000000 --- a/src/llama_stack_client/types/eval_evaluate_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union - -from .._models import BaseModel - -__all__ = ["EvalEvaluateResponse", "Scores"] - - -class Scores(BaseModel): - aggregated_results: Dict[str, Union[bool, float, str, List[object], object, None]] - - score_rows: List[Dict[str, Union[bool, float, str, List[object], object, None]]] - - -class EvalEvaluateResponse(BaseModel): - generations: List[Dict[str, Union[bool, float, str, List[object], object, None]]] - - scores: Dict[str, Scores] diff --git a/src/llama_stack_client/types/eval_task.py b/src/llama_stack_client/types/eval_task.py deleted file mode 100644 index 7fbdb12c..00000000 --- a/src/llama_stack_client/types/eval_task.py +++ /dev/null @@ -1,24 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["EvalTask"] - - -class EvalTask(BaseModel): - dataset_id: str - - identifier: str - - metadata: Dict[str, Union[bool, float, str, List[object], object, None]] - - provider_id: str - - provider_resource_id: str - - scoring_functions: List[str] - - type: Literal["eval_task"] diff --git a/src/llama_stack_client/types/eval_task_config_param.py b/src/llama_stack_client/types/eval_task_config_param.py deleted file mode 100644 index 9ec5e29d..00000000 --- a/src/llama_stack_client/types/eval_task_config_param.py +++ /dev/null @@ -1,32 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -from .eval_candidate_param import EvalCandidateParam -from .scoring_fn_params_param import ScoringFnParamsParam - -__all__ = ["EvalTaskConfigParam", "BenchmarkEvalTaskConfig", "AppEvalTaskConfig"] - - -class BenchmarkEvalTaskConfig(TypedDict, total=False): - eval_candidate: Required[EvalCandidateParam] - - type: Required[Literal["benchmark"]] - - num_examples: int - - -class AppEvalTaskConfig(TypedDict, total=False): - eval_candidate: Required[EvalCandidateParam] - - scoring_params: Required[Dict[str, ScoringFnParamsParam]] - - type: Required[Literal["app"]] - - num_examples: int - - -EvalTaskConfigParam: TypeAlias = Union[BenchmarkEvalTaskConfig, AppEvalTaskConfig] diff --git a/src/llama_stack_client/types/eval_task_list_response.py b/src/llama_stack_client/types/eval_task_list_response.py deleted file mode 100644 index c1260673..00000000 --- a/src/llama_stack_client/types/eval_task_list_response.py +++ /dev/null @@ -1,10 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List -from typing_extensions import TypeAlias - -from .benchmark import Benchmark - -__all__ = ["EvalTaskListResponse"] - -EvalTaskListResponse: TypeAlias = List[Benchmark] diff --git a/src/llama_stack_client/types/eval_task_register_params.py b/src/llama_stack_client/types/eval_task_register_params.py deleted file mode 100644 index 26934c67..00000000 --- a/src/llama_stack_client/types/eval_task_register_params.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable -from typing_extensions import Required, TypedDict - -__all__ = ["EvalTaskRegisterParams"] - - -class EvalTaskRegisterParams(TypedDict, total=False): - dataset_id: Required[str] - - eval_task_id: Required[str] - - scoring_functions: Required[List[str]] - - metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]] - - provider_benchmark_id: str - - provider_id: str diff --git a/src/llama_stack_client/types/eval_task_retrieve_params.py b/src/llama_stack_client/types/eval_task_retrieve_params.py deleted file mode 100644 index 4922b0fe..00000000 --- a/src/llama_stack_client/types/eval_task_retrieve_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["EvalTaskRetrieveParams"] - - -class EvalTaskRetrieveParams(TypedDict, total=False): - name: Required[str] - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/evaluate/__init__.py b/src/llama_stack_client/types/evaluate/__init__.py deleted file mode 100644 index c7dfea29..00000000 --- a/src/llama_stack_client/types/evaluate/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .job_status import JobStatus as JobStatus -from .evaluate_response import EvaluateResponse as EvaluateResponse -from .job_cancel_params import JobCancelParams as JobCancelParams -from .job_result_params import JobResultParams as JobResultParams -from .job_status_params import JobStatusParams as JobStatusParams diff --git a/src/llama_stack_client/types/evaluate/evaluate_response.py b/src/llama_stack_client/types/evaluate/evaluate_response.py deleted file mode 100644 index 11aa6820..00000000 --- a/src/llama_stack_client/types/evaluate/evaluate_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union - -from ..._models import BaseModel - -__all__ = ["EvaluateResponse", "Scores"] - - -class Scores(BaseModel): - aggregated_results: Dict[str, Union[bool, float, str, List[object], object, None]] - - score_rows: List[Dict[str, Union[bool, float, str, List[object], object, None]]] - - -class EvaluateResponse(BaseModel): - generations: List[Dict[str, Union[bool, float, str, List[object], object, None]]] - - scores: Dict[str, Scores] diff --git a/src/llama_stack_client/types/evaluate/evaluation_job_artifacts.py b/src/llama_stack_client/types/evaluate/evaluation_job_artifacts.py deleted file mode 100644 index 6642fe37..00000000 --- a/src/llama_stack_client/types/evaluate/evaluation_job_artifacts.py +++ /dev/null @@ -1,11 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - - - -from ..._models import BaseModel - -__all__ = ["EvaluationJobArtifacts"] - - -class EvaluationJobArtifacts(BaseModel): - job_uuid: str diff --git a/src/llama_stack_client/types/evaluate/evaluation_job_log_stream.py b/src/llama_stack_client/types/evaluate/evaluation_job_log_stream.py deleted file mode 100644 index ec9b7356..00000000 --- a/src/llama_stack_client/types/evaluate/evaluation_job_log_stream.py +++ /dev/null @@ -1,11 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - - - -from ..._models import BaseModel - -__all__ = ["EvaluationJobLogStream"] - - -class EvaluationJobLogStream(BaseModel): - job_uuid: str diff --git a/src/llama_stack_client/types/evaluate/evaluation_job_status.py b/src/llama_stack_client/types/evaluate/evaluation_job_status.py deleted file mode 100644 index dfc9498f..00000000 --- a/src/llama_stack_client/types/evaluate/evaluation_job_status.py +++ /dev/null @@ -1,11 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - - - -from ..._models import BaseModel - -__all__ = ["EvaluationJobStatus"] - - -class EvaluationJobStatus(BaseModel): - job_uuid: str diff --git a/src/llama_stack_client/types/evaluate/job_artifacts_params.py b/src/llama_stack_client/types/evaluate/job_artifacts_params.py deleted file mode 100644 index 1f7ae657..00000000 --- a/src/llama_stack_client/types/evaluate/job_artifacts_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["JobArtifactsParams"] - - -class JobArtifactsParams(TypedDict, total=False): - job_uuid: Required[str] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/evaluate/job_artifacts_response.py b/src/llama_stack_client/types/evaluate/job_artifacts_response.py deleted file mode 100644 index e39404cf..00000000 --- a/src/llama_stack_client/types/evaluate/job_artifacts_response.py +++ /dev/null @@ -1,10 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - - -from ..._models import BaseModel - -__all__ = ["JobArtifactsResponse"] - - -class JobArtifactsResponse(BaseModel): - job_uuid: str diff --git a/src/llama_stack_client/types/evaluate/job_cancel_params.py b/src/llama_stack_client/types/evaluate/job_cancel_params.py deleted file mode 100644 index 337f6803..00000000 --- a/src/llama_stack_client/types/evaluate/job_cancel_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["JobCancelParams"] - - -class JobCancelParams(TypedDict, total=False): - job_id: Required[str] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/evaluate/job_logs_params.py b/src/llama_stack_client/types/evaluate/job_logs_params.py deleted file mode 100644 index 42f7e071..00000000 --- a/src/llama_stack_client/types/evaluate/job_logs_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["JobLogsParams"] - - -class JobLogsParams(TypedDict, total=False): - job_uuid: Required[str] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/evaluate/job_logs_response.py b/src/llama_stack_client/types/evaluate/job_logs_response.py deleted file mode 100644 index ec036719..00000000 --- a/src/llama_stack_client/types/evaluate/job_logs_response.py +++ /dev/null @@ -1,10 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - - -from ..._models import BaseModel - -__all__ = ["JobLogsResponse"] - - -class JobLogsResponse(BaseModel): - job_uuid: str diff --git a/src/llama_stack_client/types/evaluate/job_result_params.py b/src/llama_stack_client/types/evaluate/job_result_params.py deleted file mode 100644 index 694e12f8..00000000 --- a/src/llama_stack_client/types/evaluate/job_result_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["JobResultParams"] - - -class JobResultParams(TypedDict, total=False): - job_id: Required[str] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/evaluate/job_status.py b/src/llama_stack_client/types/evaluate/job_status.py deleted file mode 100644 index 22bc685c..00000000 --- a/src/llama_stack_client/types/evaluate/job_status.py +++ /dev/null @@ -1,7 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal, TypeAlias - -__all__ = ["JobStatus"] - -JobStatus: TypeAlias = Literal["completed", "in_progress"] diff --git a/src/llama_stack_client/types/evaluate/job_status_params.py b/src/llama_stack_client/types/evaluate/job_status_params.py deleted file mode 100644 index 01070e2a..00000000 --- a/src/llama_stack_client/types/evaluate/job_status_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["JobStatusParams"] - - -class JobStatusParams(TypedDict, total=False): - job_id: Required[str] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/evaluate/job_status_response.py b/src/llama_stack_client/types/evaluate/job_status_response.py deleted file mode 100644 index 9405d17e..00000000 --- a/src/llama_stack_client/types/evaluate/job_status_response.py +++ /dev/null @@ -1,10 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - - -from ..._models import BaseModel - -__all__ = ["JobStatusResponse"] - - -class JobStatusResponse(BaseModel): - job_uuid: str diff --git a/src/llama_stack_client/types/evaluate/jobs/__init__.py b/src/llama_stack_client/types/evaluate/jobs/__init__.py deleted file mode 100644 index c7ba741f..00000000 --- a/src/llama_stack_client/types/evaluate/jobs/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .log_list_params import LogListParams as LogListParams -from .status_list_params import StatusListParams as StatusListParams -from .artifact_list_params import ArtifactListParams as ArtifactListParams diff --git a/src/llama_stack_client/types/evaluate/jobs/artifact_list_params.py b/src/llama_stack_client/types/evaluate/jobs/artifact_list_params.py deleted file mode 100644 index 579033e9..00000000 --- a/src/llama_stack_client/types/evaluate/jobs/artifact_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ...._utils import PropertyInfo - -__all__ = ["ArtifactListParams"] - - -class ArtifactListParams(TypedDict, total=False): - job_uuid: Required[str] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/evaluate/jobs/log_list_params.py b/src/llama_stack_client/types/evaluate/jobs/log_list_params.py deleted file mode 100644 index 4b2df452..00000000 --- a/src/llama_stack_client/types/evaluate/jobs/log_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ...._utils import PropertyInfo - -__all__ = ["LogListParams"] - - -class LogListParams(TypedDict, total=False): - job_uuid: Required[str] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/evaluate/jobs/status_list_params.py b/src/llama_stack_client/types/evaluate/jobs/status_list_params.py deleted file mode 100644 index a7d51652..00000000 --- a/src/llama_stack_client/types/evaluate/jobs/status_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ...._utils import PropertyInfo - -__all__ = ["StatusListParams"] - - -class StatusListParams(TypedDict, total=False): - job_uuid: Required[str] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/evaluate/question_answering_create_params.py b/src/llama_stack_client/types/evaluate/question_answering_create_params.py deleted file mode 100644 index de8caa05..00000000 --- a/src/llama_stack_client/types/evaluate/question_answering_create_params.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["QuestionAnsweringCreateParams"] - - -class QuestionAnsweringCreateParams(TypedDict, total=False): - metrics: Required[List[Literal["em", "f1"]]] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/evaluate_evaluate_batch_params.py b/src/llama_stack_client/types/evaluate_evaluate_batch_params.py deleted file mode 100644 index f729a91a..00000000 --- a/src/llama_stack_client/types/evaluate_evaluate_batch_params.py +++ /dev/null @@ -1,259 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable -from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict - -from .._utils import PropertyInfo -from .shared_params.system_message import SystemMessage -from .shared_params.sampling_params import SamplingParams -from .rest_api_execution_config_param import RestAPIExecutionConfigParam - -__all__ = [ - "EvaluateEvaluateBatchParams", - "Candidate", - "CandidateModelCandidate", - "CandidateAgentCandidate", - "CandidateAgentCandidateConfig", - "CandidateAgentCandidateConfigTool", - "CandidateAgentCandidateConfigToolSearchToolDefinition", - "CandidateAgentCandidateConfigToolWolframAlphaToolDefinition", - "CandidateAgentCandidateConfigToolPhotogenToolDefinition", - "CandidateAgentCandidateConfigToolCodeInterpreterToolDefinition", - "CandidateAgentCandidateConfigToolFunctionCallToolDefinition", - "CandidateAgentCandidateConfigToolFunctionCallToolDefinitionParameters", - "CandidateAgentCandidateConfigToolMemoryToolDefinition", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfig", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember0", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember1", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember2", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember3", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfig", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember0", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember1", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigType", -] - - -class EvaluateEvaluateBatchParams(TypedDict, total=False): - candidate: Required[Candidate] - - dataset_id: Required[str] - - scoring_functions: Required[List[str]] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] - - -class CandidateModelCandidate(TypedDict, total=False): - model: Required[str] - - sampling_params: Required[SamplingParams] - - type: Required[Literal["model"]] - - system_message: SystemMessage - - -class CandidateAgentCandidateConfigToolSearchToolDefinition(TypedDict, total=False): - api_key: Required[str] - - engine: Required[Literal["bing", "brave"]] - - type: Required[Literal["brave_search"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfigParam - - -class CandidateAgentCandidateConfigToolWolframAlphaToolDefinition(TypedDict, total=False): - api_key: Required[str] - - type: Required[Literal["wolfram_alpha"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfigParam - - -class CandidateAgentCandidateConfigToolPhotogenToolDefinition(TypedDict, total=False): - type: Required[Literal["photogen"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfigParam - - -class CandidateAgentCandidateConfigToolCodeInterpreterToolDefinition(TypedDict, total=False): - enable_inline_code_execution: Required[bool] - - type: Required[Literal["code_interpreter"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfigParam - - -class CandidateAgentCandidateConfigToolFunctionCallToolDefinitionParameters(TypedDict, total=False): - param_type: Required[str] - - default: Union[bool, float, str, Iterable[object], object, None] - - description: str - - required: bool - - -class CandidateAgentCandidateConfigToolFunctionCallToolDefinition(TypedDict, total=False): - description: Required[str] - - function_name: Required[str] - - parameters: Required[Dict[str, CandidateAgentCandidateConfigToolFunctionCallToolDefinitionParameters]] - - type: Required[Literal["function_call"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfigParam - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember0(TypedDict, total=False): - bank_id: Required[str] - - type: Required[Literal["vector"]] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember1(TypedDict, total=False): - bank_id: Required[str] - - keys: Required[List[str]] - - type: Required[Literal["keyvalue"]] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember2(TypedDict, total=False): - bank_id: Required[str] - - type: Required[Literal["keyword"]] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember3(TypedDict, total=False): - bank_id: Required[str] - - entities: Required[List[str]] - - type: Required[Literal["graph"]] - - -CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfig: TypeAlias = Union[ - CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember0, - CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember1, - CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember2, - CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember3, -] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember0(TypedDict, total=False): - sep: Required[str] - - type: Required[Literal["default"]] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember1(TypedDict, total=False): - model: Required[str] - - template: Required[str] - - type: Required[Literal["llm"]] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigType(TypedDict, total=False): - type: Required[Literal["custom"]] - - -CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfig: TypeAlias = Union[ - CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember0, - CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember1, - CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigType, -] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinition(TypedDict, total=False): - max_chunks: Required[int] - - max_tokens_in_context: Required[int] - - memory_bank_configs: Required[Iterable[CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfig]] - - query_generator_config: Required[CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfig] - - type: Required[Literal["memory"]] - - input_shields: List[str] - - output_shields: List[str] - - -CandidateAgentCandidateConfigTool: TypeAlias = Union[ - CandidateAgentCandidateConfigToolSearchToolDefinition, - CandidateAgentCandidateConfigToolWolframAlphaToolDefinition, - CandidateAgentCandidateConfigToolPhotogenToolDefinition, - CandidateAgentCandidateConfigToolCodeInterpreterToolDefinition, - CandidateAgentCandidateConfigToolFunctionCallToolDefinition, - CandidateAgentCandidateConfigToolMemoryToolDefinition, -] - - -class CandidateAgentCandidateConfig(TypedDict, total=False): - enable_session_persistence: Required[bool] - - instructions: Required[str] - - max_infer_iters: Required[int] - - model: Required[str] - - input_shields: List[str] - - output_shields: List[str] - - sampling_params: SamplingParams - - tool_choice: Literal["auto", "required"] - - tool_prompt_format: Literal["json", "function_tag", "python_list"] - """ - `json` -- Refers to the json format for calling tools. The json format takes the - form like { "type": "function", "function" : { "name": "function_name", - "description": "function_description", "parameters": {...} } } - - `function_tag` -- This is an example of how you could define your own user - defined format for making tool calls. The function_tag format looks like this, - (parameters) - - The detailed prompts for each of these formats are added to llama cli - """ - - tools: Iterable[CandidateAgentCandidateConfigTool] - - -class CandidateAgentCandidate(TypedDict, total=False): - config: Required[CandidateAgentCandidateConfig] - - type: Required[Literal["agent"]] - - -Candidate: TypeAlias = Union[CandidateModelCandidate, CandidateAgentCandidate] diff --git a/src/llama_stack_client/types/evaluate_evaluate_params.py b/src/llama_stack_client/types/evaluate_evaluate_params.py deleted file mode 100644 index e2daff58..00000000 --- a/src/llama_stack_client/types/evaluate_evaluate_params.py +++ /dev/null @@ -1,259 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable -from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict - -from .._utils import PropertyInfo -from .shared_params.system_message import SystemMessage -from .shared_params.sampling_params import SamplingParams -from .rest_api_execution_config_param import RestAPIExecutionConfigParam - -__all__ = [ - "EvaluateEvaluateParams", - "Candidate", - "CandidateModelCandidate", - "CandidateAgentCandidate", - "CandidateAgentCandidateConfig", - "CandidateAgentCandidateConfigTool", - "CandidateAgentCandidateConfigToolSearchToolDefinition", - "CandidateAgentCandidateConfigToolWolframAlphaToolDefinition", - "CandidateAgentCandidateConfigToolPhotogenToolDefinition", - "CandidateAgentCandidateConfigToolCodeInterpreterToolDefinition", - "CandidateAgentCandidateConfigToolFunctionCallToolDefinition", - "CandidateAgentCandidateConfigToolFunctionCallToolDefinitionParameters", - "CandidateAgentCandidateConfigToolMemoryToolDefinition", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfig", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember0", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember1", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember2", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember3", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfig", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember0", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember1", - "CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigType", -] - - -class EvaluateEvaluateParams(TypedDict, total=False): - candidate: Required[Candidate] - - input_rows: Required[Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]]] - - scoring_functions: Required[List[str]] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] - - -class CandidateModelCandidate(TypedDict, total=False): - model: Required[str] - - sampling_params: Required[SamplingParams] - - type: Required[Literal["model"]] - - system_message: SystemMessage - - -class CandidateAgentCandidateConfigToolSearchToolDefinition(TypedDict, total=False): - api_key: Required[str] - - engine: Required[Literal["bing", "brave"]] - - type: Required[Literal["brave_search"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfigParam - - -class CandidateAgentCandidateConfigToolWolframAlphaToolDefinition(TypedDict, total=False): - api_key: Required[str] - - type: Required[Literal["wolfram_alpha"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfigParam - - -class CandidateAgentCandidateConfigToolPhotogenToolDefinition(TypedDict, total=False): - type: Required[Literal["photogen"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfigParam - - -class CandidateAgentCandidateConfigToolCodeInterpreterToolDefinition(TypedDict, total=False): - enable_inline_code_execution: Required[bool] - - type: Required[Literal["code_interpreter"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfigParam - - -class CandidateAgentCandidateConfigToolFunctionCallToolDefinitionParameters(TypedDict, total=False): - param_type: Required[str] - - default: Union[bool, float, str, Iterable[object], object, None] - - description: str - - required: bool - - -class CandidateAgentCandidateConfigToolFunctionCallToolDefinition(TypedDict, total=False): - description: Required[str] - - function_name: Required[str] - - parameters: Required[Dict[str, CandidateAgentCandidateConfigToolFunctionCallToolDefinitionParameters]] - - type: Required[Literal["function_call"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfigParam - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember0(TypedDict, total=False): - bank_id: Required[str] - - type: Required[Literal["vector"]] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember1(TypedDict, total=False): - bank_id: Required[str] - - keys: Required[List[str]] - - type: Required[Literal["keyvalue"]] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember2(TypedDict, total=False): - bank_id: Required[str] - - type: Required[Literal["keyword"]] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember3(TypedDict, total=False): - bank_id: Required[str] - - entities: Required[List[str]] - - type: Required[Literal["graph"]] - - -CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfig: TypeAlias = Union[ - CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember0, - CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember1, - CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember2, - CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfigUnionMember3, -] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember0(TypedDict, total=False): - sep: Required[str] - - type: Required[Literal["default"]] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember1(TypedDict, total=False): - model: Required[str] - - template: Required[str] - - type: Required[Literal["llm"]] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigType(TypedDict, total=False): - type: Required[Literal["custom"]] - - -CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfig: TypeAlias = Union[ - CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember0, - CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigUnionMember1, - CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfigType, -] - - -class CandidateAgentCandidateConfigToolMemoryToolDefinition(TypedDict, total=False): - max_chunks: Required[int] - - max_tokens_in_context: Required[int] - - memory_bank_configs: Required[Iterable[CandidateAgentCandidateConfigToolMemoryToolDefinitionMemoryBankConfig]] - - query_generator_config: Required[CandidateAgentCandidateConfigToolMemoryToolDefinitionQueryGeneratorConfig] - - type: Required[Literal["memory"]] - - input_shields: List[str] - - output_shields: List[str] - - -CandidateAgentCandidateConfigTool: TypeAlias = Union[ - CandidateAgentCandidateConfigToolSearchToolDefinition, - CandidateAgentCandidateConfigToolWolframAlphaToolDefinition, - CandidateAgentCandidateConfigToolPhotogenToolDefinition, - CandidateAgentCandidateConfigToolCodeInterpreterToolDefinition, - CandidateAgentCandidateConfigToolFunctionCallToolDefinition, - CandidateAgentCandidateConfigToolMemoryToolDefinition, -] - - -class CandidateAgentCandidateConfig(TypedDict, total=False): - enable_session_persistence: Required[bool] - - instructions: Required[str] - - max_infer_iters: Required[int] - - model: Required[str] - - input_shields: List[str] - - output_shields: List[str] - - sampling_params: SamplingParams - - tool_choice: Literal["auto", "required"] - - tool_prompt_format: Literal["json", "function_tag", "python_list"] - """ - `json` -- Refers to the json format for calling tools. The json format takes the - form like { "type": "function", "function" : { "name": "function_name", - "description": "function_description", "parameters": {...} } } - - `function_tag` -- This is an example of how you could define your own user - defined format for making tool calls. The function_tag format looks like this, - (parameters) - - The detailed prompts for each of these formats are added to llama cli - """ - - tools: Iterable[CandidateAgentCandidateConfigTool] - - -class CandidateAgentCandidate(TypedDict, total=False): - config: Required[CandidateAgentCandidateConfig] - - type: Required[Literal["agent"]] - - -Candidate: TypeAlias = Union[CandidateModelCandidate, CandidateAgentCandidate] diff --git a/src/llama_stack_client/types/evaluate_question_answering_params.py b/src/llama_stack_client/types/evaluate_question_answering_params.py deleted file mode 100644 index 6a757058..00000000 --- a/src/llama_stack_client/types/evaluate_question_answering_params.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["EvaluateQuestionAnsweringParams"] - - -class EvaluateQuestionAnsweringParams(TypedDict, total=False): - metrics: Required[List[Literal["em", "f1"]]] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/evaluate_summarization_params.py b/src/llama_stack_client/types/evaluate_summarization_params.py deleted file mode 100644 index a7746963..00000000 --- a/src/llama_stack_client/types/evaluate_summarization_params.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["EvaluateSummarizationParams"] - - -class EvaluateSummarizationParams(TypedDict, total=False): - metrics: Required[List[Literal["rouge", "bleu"]]] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/evaluation_job.py b/src/llama_stack_client/types/evaluation_job.py deleted file mode 100644 index 5c0b51f7..00000000 --- a/src/llama_stack_client/types/evaluation_job.py +++ /dev/null @@ -1,10 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - - -from .._models import BaseModel - -__all__ = ["EvaluationJob"] - - -class EvaluationJob(BaseModel): - job_uuid: str diff --git a/src/llama_stack_client/types/evaluation_summarization_params.py b/src/llama_stack_client/types/evaluation_summarization_params.py deleted file mode 100644 index 80dd8f57..00000000 --- a/src/llama_stack_client/types/evaluation_summarization_params.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["EvaluationSummarizationParams"] - - -class EvaluationSummarizationParams(TypedDict, total=False): - metrics: Required[List[Literal["rouge", "bleu"]]] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/evaluation_text_generation_params.py b/src/llama_stack_client/types/evaluation_text_generation_params.py deleted file mode 100644 index 1cd3a568..00000000 --- a/src/llama_stack_client/types/evaluation_text_generation_params.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["EvaluationTextGenerationParams"] - - -class EvaluationTextGenerationParams(TypedDict, total=False): - metrics: Required[List[Literal["perplexity", "rouge", "bleu"]]] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/inference/__init__.py b/src/llama_stack_client/types/inference/__init__.py deleted file mode 100644 index 43ef90c4..00000000 --- a/src/llama_stack_client/types/inference/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .embeddings import Embeddings as Embeddings -from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams diff --git a/src/llama_stack_client/types/inference/embedding_create_params.py b/src/llama_stack_client/types/inference/embedding_create_params.py deleted file mode 100644 index c744099e..00000000 --- a/src/llama_stack_client/types/inference/embedding_create_params.py +++ /dev/null @@ -1,23 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union -from typing_extensions import Required, Annotated, TypeAlias, TypedDict - -from ..._utils import PropertyInfo -from ..shared_params.image_media import ImageMedia -from ..shared_params.content_array import ContentArray - -__all__ = ["EmbeddingCreateParams", "Content"] - - -class EmbeddingCreateParams(TypedDict, total=False): - contents: Required[List[Content]] - - model: Required[str] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] - - -Content: TypeAlias = Union[str, ImageMedia, ContentArray] diff --git a/src/llama_stack_client/types/inference/embeddings.py b/src/llama_stack_client/types/inference/embeddings.py deleted file mode 100644 index 73ea5574..00000000 --- a/src/llama_stack_client/types/inference/embeddings.py +++ /dev/null @@ -1,11 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from ..._models import BaseModel - -__all__ = ["Embeddings"] - - -class Embeddings(BaseModel): - embeddings: List[List[float]] diff --git a/src/llama_stack_client/types/inference_chat_completion_response.py b/src/llama_stack_client/types/inference_chat_completion_response.py deleted file mode 100644 index 1e59952d..00000000 --- a/src/llama_stack_client/types/inference_chat_completion_response.py +++ /dev/null @@ -1,49 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Union, Optional -from typing_extensions import Literal, TypeAlias - -from .._models import BaseModel -from .token_log_probs import TokenLogProbs -from .shared.content_delta import ContentDelta -from .shared.completion_message import CompletionMessage - -__all__ = [ - "InferenceChatCompletionResponse", - "ChatCompletionResponse", - "ChatCompletionResponseStreamChunk", - "ChatCompletionResponseStreamChunkEvent", -] - - -class ChatCompletionResponse(BaseModel): - completion_message: CompletionMessage - """The complete response message""" - - logprobs: Optional[List[TokenLogProbs]] = None - """Optional log probabilities for generated tokens""" - - -class ChatCompletionResponseStreamChunkEvent(BaseModel): - delta: ContentDelta - """Content generated since last event. - - This can be one or more tokens, or a tool call. - """ - - event_type: Literal["start", "complete", "progress"] - """Type of the event""" - - logprobs: Optional[List[TokenLogProbs]] = None - """Optional log probabilities for generated tokens""" - - stop_reason: Optional[Literal["end_of_turn", "end_of_message", "out_of_tokens"]] = None - """Optional reason why generation stopped, if complete""" - - -class ChatCompletionResponseStreamChunk(BaseModel): - event: ChatCompletionResponseStreamChunkEvent - """The event containing the new content""" - - -InferenceChatCompletionResponse: TypeAlias = Union[ChatCompletionResponse, ChatCompletionResponseStreamChunk] diff --git a/src/llama_stack_client/types/inference_completion_response.py b/src/llama_stack_client/types/inference_completion_response.py deleted file mode 100644 index eccf0e7c..00000000 --- a/src/llama_stack_client/types/inference_completion_response.py +++ /dev/null @@ -1,24 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Union, Optional -from typing_extensions import Literal, TypeAlias - -from .._models import BaseModel -from .token_log_probs import TokenLogProbs -from .completion_response import CompletionResponse - -__all__ = ["InferenceCompletionResponse", "CompletionResponseStreamChunk"] - - -class CompletionResponseStreamChunk(BaseModel): - delta: str - """New content generated since last chunk. This can be one or more tokens.""" - - logprobs: Optional[List[TokenLogProbs]] = None - """Optional log probabilities for generated tokens""" - - stop_reason: Optional[Literal["end_of_turn", "end_of_message", "out_of_tokens"]] = None - """Optional reason why generation stopped, if complete""" - - -InferenceCompletionResponse: TypeAlias = Union[CompletionResponse, CompletionResponseStreamChunk] diff --git a/src/llama_stack_client/types/list_eval_tasks_response.py b/src/llama_stack_client/types/list_eval_tasks_response.py deleted file mode 100644 index 4037c0dc..00000000 --- a/src/llama_stack_client/types/list_eval_tasks_response.py +++ /dev/null @@ -1,11 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - - -from .._models import BaseModel -from .eval_task_list_response import EvalTaskListResponse - -__all__ = ["ListEvalTasksResponse"] - - -class ListEvalTasksResponse(BaseModel): - data: EvalTaskListResponse diff --git a/src/llama_stack_client/types/list_memory_banks_response.py b/src/llama_stack_client/types/list_memory_banks_response.py deleted file mode 100644 index e11b9026..00000000 --- a/src/llama_stack_client/types/list_memory_banks_response.py +++ /dev/null @@ -1,78 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Union, Optional -from typing_extensions import Literal, TypeAlias - -from .._models import BaseModel - -__all__ = [ - "ListMemoryBanksResponse", - "Data", - "DataVectorMemoryBank", - "DataKeyValueMemoryBank", - "DataKeywordMemoryBank", - "DataGraphMemoryBank", -] - - -class DataVectorMemoryBank(BaseModel): - chunk_size_in_tokens: int - - embedding_model: str - - identifier: str - - memory_bank_type: Literal["vector"] - - provider_id: str - - provider_resource_id: str - - type: Literal["memory_bank"] - - embedding_dimension: Optional[int] = None - - overlap_size_in_tokens: Optional[int] = None - - -class DataKeyValueMemoryBank(BaseModel): - identifier: str - - memory_bank_type: Literal["keyvalue"] - - provider_id: str - - provider_resource_id: str - - type: Literal["memory_bank"] - - -class DataKeywordMemoryBank(BaseModel): - identifier: str - - memory_bank_type: Literal["keyword"] - - provider_id: str - - provider_resource_id: str - - type: Literal["memory_bank"] - - -class DataGraphMemoryBank(BaseModel): - identifier: str - - memory_bank_type: Literal["graph"] - - provider_id: str - - provider_resource_id: str - - type: Literal["memory_bank"] - - -Data: TypeAlias = Union[DataVectorMemoryBank, DataKeyValueMemoryBank, DataKeywordMemoryBank, DataGraphMemoryBank] - - -class ListMemoryBanksResponse(BaseModel): - data: List[Data] diff --git a/src/llama_stack_client/types/memory/__init__.py b/src/llama_stack_client/types/memory/__init__.py deleted file mode 100644 index c37360d3..00000000 --- a/src/llama_stack_client/types/memory/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .document_delete_params import DocumentDeleteParams as DocumentDeleteParams -from .document_retrieve_params import DocumentRetrieveParams as DocumentRetrieveParams -from .document_retrieve_response import DocumentRetrieveResponse as DocumentRetrieveResponse diff --git a/src/llama_stack_client/types/memory/document_delete_params.py b/src/llama_stack_client/types/memory/document_delete_params.py deleted file mode 100644 index 9ec4bf19..00000000 --- a/src/llama_stack_client/types/memory/document_delete_params.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["DocumentDeleteParams"] - - -class DocumentDeleteParams(TypedDict, total=False): - bank_id: Required[str] - - document_ids: Required[List[str]] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/memory/document_retrieve_params.py b/src/llama_stack_client/types/memory/document_retrieve_params.py deleted file mode 100644 index 3f30f9bc..00000000 --- a/src/llama_stack_client/types/memory/document_retrieve_params.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["DocumentRetrieveParams"] - - -class DocumentRetrieveParams(TypedDict, total=False): - bank_id: Required[str] - - document_ids: Required[List[str]] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/memory/document_retrieve_response.py b/src/llama_stack_client/types/memory/document_retrieve_response.py deleted file mode 100644 index 0249b4df..00000000 --- a/src/llama_stack_client/types/memory/document_retrieve_response.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union, Optional -from typing_extensions import TypeAlias - -from ..._models import BaseModel -from ..shared.image_media import ImageMedia -from ..shared.content_array import ContentArray - -__all__ = ["DocumentRetrieveResponse", "Content"] - -Content: TypeAlias = Union[str, ImageMedia, ContentArray] - - -class DocumentRetrieveResponse(BaseModel): - content: Content - - document_id: str - - metadata: Dict[str, Union[bool, float, str, List[object], object, None]] - - mime_type: Optional[str] = None diff --git a/src/llama_stack_client/types/memory_bank_get_params.py b/src/llama_stack_client/types/memory_bank_get_params.py deleted file mode 100644 index de5b43e6..00000000 --- a/src/llama_stack_client/types/memory_bank_get_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["MemoryBankGetParams"] - - -class MemoryBankGetParams(TypedDict, total=False): - bank_type: Required[Literal["vector", "keyvalue", "keyword", "graph"]] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/memory_bank_list_response.py b/src/llama_stack_client/types/memory_bank_list_response.py deleted file mode 100644 index 27d646f5..00000000 --- a/src/llama_stack_client/types/memory_bank_list_response.py +++ /dev/null @@ -1,81 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Union, Optional -from typing_extensions import Literal, TypeAlias - -from .._models import BaseModel - -__all__ = [ - "MemoryBankListResponse", - "MemoryBankListResponseItem", - "MemoryBankListResponseItemVectorMemoryBank", - "MemoryBankListResponseItemKeyValueMemoryBank", - "MemoryBankListResponseItemKeywordMemoryBank", - "MemoryBankListResponseItemGraphMemoryBank", -] - - -class MemoryBankListResponseItemVectorMemoryBank(BaseModel): - chunk_size_in_tokens: int - - embedding_model: str - - identifier: str - - memory_bank_type: Literal["vector"] - - provider_id: str - - provider_resource_id: str - - type: Literal["memory_bank"] - - embedding_dimension: Optional[int] = None - - overlap_size_in_tokens: Optional[int] = None - - -class MemoryBankListResponseItemKeyValueMemoryBank(BaseModel): - identifier: str - - memory_bank_type: Literal["keyvalue"] - - provider_id: str - - provider_resource_id: str - - type: Literal["memory_bank"] - - -class MemoryBankListResponseItemKeywordMemoryBank(BaseModel): - identifier: str - - memory_bank_type: Literal["keyword"] - - provider_id: str - - provider_resource_id: str - - type: Literal["memory_bank"] - - -class MemoryBankListResponseItemGraphMemoryBank(BaseModel): - identifier: str - - memory_bank_type: Literal["graph"] - - provider_id: str - - provider_resource_id: str - - type: Literal["memory_bank"] - - -MemoryBankListResponseItem: TypeAlias = Union[ - MemoryBankListResponseItemVectorMemoryBank, - MemoryBankListResponseItemKeyValueMemoryBank, - MemoryBankListResponseItemKeywordMemoryBank, - MemoryBankListResponseItemGraphMemoryBank, -] - -MemoryBankListResponse: TypeAlias = List[MemoryBankListResponseItem] diff --git a/src/llama_stack_client/types/memory_bank_register_params.py b/src/llama_stack_client/types/memory_bank_register_params.py deleted file mode 100644 index a74b5215..00000000 --- a/src/llama_stack_client/types/memory_bank_register_params.py +++ /dev/null @@ -1,61 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict - -from .._utils import PropertyInfo - -__all__ = [ - "MemoryBankRegisterParams", - "Params", - "ParamsVectorMemoryBankParams", - "ParamsKeyValueMemoryBankParams", - "ParamsKeywordMemoryBankParams", - "ParamsGraphMemoryBankParams", -] - - -class MemoryBankRegisterParams(TypedDict, total=False): - memory_bank_id: Required[str] - - params: Required[Params] - - provider_id: str - - provider_memory_bank_id: str - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] - - -class ParamsVectorMemoryBankParams(TypedDict, total=False): - chunk_size_in_tokens: Required[int] - - embedding_model: Required[str] - - memory_bank_type: Required[Literal["vector"]] - - overlap_size_in_tokens: int - - -class ParamsKeyValueMemoryBankParams(TypedDict, total=False): - memory_bank_type: Required[Literal["keyvalue"]] - - -class ParamsKeywordMemoryBankParams(TypedDict, total=False): - memory_bank_type: Required[Literal["keyword"]] - - -class ParamsGraphMemoryBankParams(TypedDict, total=False): - memory_bank_type: Required[Literal["graph"]] - - -Params: TypeAlias = Union[ - ParamsVectorMemoryBankParams, - ParamsKeyValueMemoryBankParams, - ParamsKeywordMemoryBankParams, - ParamsGraphMemoryBankParams, -] diff --git a/src/llama_stack_client/types/memory_bank_register_response.py b/src/llama_stack_client/types/memory_bank_register_response.py deleted file mode 100644 index 391a5f28..00000000 --- a/src/llama_stack_client/types/memory_bank_register_response.py +++ /dev/null @@ -1,73 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Union, Optional -from typing_extensions import Literal, TypeAlias - -from .._models import BaseModel - -__all__ = [ - "MemoryBankRegisterResponse", - "VectorMemoryBank", - "KeyValueMemoryBank", - "KeywordMemoryBank", - "GraphMemoryBank", -] - - -class VectorMemoryBank(BaseModel): - chunk_size_in_tokens: int - - embedding_model: str - - identifier: str - - memory_bank_type: Literal["vector"] - - provider_id: str - - provider_resource_id: str - - type: Literal["memory_bank"] - - embedding_dimension: Optional[int] = None - - overlap_size_in_tokens: Optional[int] = None - - -class KeyValueMemoryBank(BaseModel): - identifier: str - - memory_bank_type: Literal["keyvalue"] - - provider_id: str - - provider_resource_id: str - - type: Literal["memory_bank"] - - -class KeywordMemoryBank(BaseModel): - identifier: str - - memory_bank_type: Literal["keyword"] - - provider_id: str - - provider_resource_id: str - - type: Literal["memory_bank"] - - -class GraphMemoryBank(BaseModel): - identifier: str - - memory_bank_type: Literal["graph"] - - provider_id: str - - provider_resource_id: str - - type: Literal["memory_bank"] - - -MemoryBankRegisterResponse: TypeAlias = Union[VectorMemoryBank, KeyValueMemoryBank, KeywordMemoryBank, GraphMemoryBank] diff --git a/src/llama_stack_client/types/memory_bank_retrieve_params.py b/src/llama_stack_client/types/memory_bank_retrieve_params.py deleted file mode 100644 index 7d32dc77..00000000 --- a/src/llama_stack_client/types/memory_bank_retrieve_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["MemoryBankRetrieveParams"] - - -class MemoryBankRetrieveParams(TypedDict, total=False): - memory_bank_id: Required[str] - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/memory_bank_retrieve_response.py b/src/llama_stack_client/types/memory_bank_retrieve_response.py deleted file mode 100644 index fb592e21..00000000 --- a/src/llama_stack_client/types/memory_bank_retrieve_response.py +++ /dev/null @@ -1,75 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Union, Optional -from typing_extensions import Literal, TypeAlias - -from .._models import BaseModel - -__all__ = [ - "MemoryBankRetrieveResponse", - "VectorMemoryBank", - "KeyValueMemoryBank", - "KeywordMemoryBank", - "GraphMemoryBank", -] - - -class VectorMemoryBank(BaseModel): - chunk_size_in_tokens: int - - embedding_model: str - - identifier: str - - memory_bank_type: Literal["vector"] - - provider_id: str - - provider_resource_id: str - - type: Literal["memory_bank"] - - embedding_dimension: Optional[int] = None - - overlap_size_in_tokens: Optional[int] = None - - -class KeyValueMemoryBank(BaseModel): - identifier: str - - memory_bank_type: Literal["keyvalue"] - - provider_id: str - - provider_resource_id: str - - type: Literal["memory_bank"] - - -class KeywordMemoryBank(BaseModel): - identifier: str - - memory_bank_type: Literal["keyword"] - - provider_id: str - - provider_resource_id: str - - type: Literal["memory_bank"] - - -class GraphMemoryBank(BaseModel): - identifier: str - - memory_bank_type: Literal["graph"] - - provider_id: str - - provider_resource_id: str - - type: Literal["memory_bank"] - - -MemoryBankRetrieveResponse: TypeAlias = Union[ - VectorMemoryBank, KeyValueMemoryBank, KeywordMemoryBank, GraphMemoryBank, None -] diff --git a/src/llama_stack_client/types/memory_bank_spec.py b/src/llama_stack_client/types/memory_bank_spec.py deleted file mode 100644 index 0191c08c..00000000 --- a/src/llama_stack_client/types/memory_bank_spec.py +++ /dev/null @@ -1,20 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["MemoryBankSpec", "ProviderConfig"] - - -class ProviderConfig(BaseModel): - config: Dict[str, Union[bool, float, str, List[object], object, None]] - - provider_type: str - - -class MemoryBankSpec(BaseModel): - bank_type: Literal["vector", "keyvalue", "keyword", "graph"] - - provider_config: ProviderConfig diff --git a/src/llama_stack_client/types/memory_bank_unregister_params.py b/src/llama_stack_client/types/memory_bank_unregister_params.py deleted file mode 100644 index 9a69d75e..00000000 --- a/src/llama_stack_client/types/memory_bank_unregister_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["MemoryBankUnregisterParams"] - - -class MemoryBankUnregisterParams(TypedDict, total=False): - memory_bank_id: Required[str] - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/memory_create_params.py b/src/llama_stack_client/types/memory_create_params.py deleted file mode 100644 index 4cd1f041..00000000 --- a/src/llama_stack_client/types/memory_create_params.py +++ /dev/null @@ -1,57 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict - -from .._utils import PropertyInfo - -__all__ = [ - "MemoryCreateParams", - "Config", - "ConfigMemoryBankConfigVectorType", - "ConfigMemoryBankConfigKeyValueType", - "ConfigMemoryBankConfigKeywordType", - "ConfigMemoryBankConfigGraphType", -] - - -class MemoryCreateParams(TypedDict, total=False): - config: Required[Config] - - name: Required[str] - - url: str - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] - - -class ConfigMemoryBankConfigVectorType(TypedDict, total=False): - chunk_size_in_tokens: Required[int] - - embedding_model: Required[str] - - type: Required[Literal["vector"]] - - overlap_size_in_tokens: int - - -class ConfigMemoryBankConfigKeyValueType(TypedDict, total=False): - type: Required[Literal["keyvalue"]] - - -class ConfigMemoryBankConfigKeywordType(TypedDict, total=False): - type: Required[Literal["keyword"]] - - -class ConfigMemoryBankConfigGraphType(TypedDict, total=False): - type: Required[Literal["graph"]] - - -Config: TypeAlias = Union[ - ConfigMemoryBankConfigVectorType, - ConfigMemoryBankConfigKeyValueType, - ConfigMemoryBankConfigKeywordType, - ConfigMemoryBankConfigGraphType, -] diff --git a/src/llama_stack_client/types/memory_create_response.py b/src/llama_stack_client/types/memory_create_response.py deleted file mode 100644 index 6894da31..00000000 --- a/src/llama_stack_client/types/memory_create_response.py +++ /dev/null @@ -1,59 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Union, Optional -from typing_extensions import Literal, Annotated, TypeAlias - -from .._utils import PropertyInfo -from .._models import BaseModel - -__all__ = [ - "MemoryCreateResponse", - "Config", - "ConfigMemoryBankConfigVectorType", - "ConfigMemoryBankConfigKeyValueType", - "ConfigMemoryBankConfigKeywordType", - "ConfigMemoryBankConfigGraphType", -] - - -class ConfigMemoryBankConfigVectorType(BaseModel): - chunk_size_in_tokens: int - - embedding_model: str - - type: Literal["vector"] - - overlap_size_in_tokens: Optional[int] = None - - -class ConfigMemoryBankConfigKeyValueType(BaseModel): - type: Literal["keyvalue"] - - -class ConfigMemoryBankConfigKeywordType(BaseModel): - type: Literal["keyword"] - - -class ConfigMemoryBankConfigGraphType(BaseModel): - type: Literal["graph"] - - -Config: TypeAlias = Annotated[ - Union[ - ConfigMemoryBankConfigVectorType, - ConfigMemoryBankConfigKeyValueType, - ConfigMemoryBankConfigKeywordType, - ConfigMemoryBankConfigGraphType, - ], - PropertyInfo(discriminator="type"), -] - - -class MemoryCreateResponse(BaseModel): - bank_id: str - - config: Config - - name: str - - url: Optional[str] = None diff --git a/src/llama_stack_client/types/memory_drop_params.py b/src/llama_stack_client/types/memory_drop_params.py deleted file mode 100644 index b15ec345..00000000 --- a/src/llama_stack_client/types/memory_drop_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["MemoryDropParams"] - - -class MemoryDropParams(TypedDict, total=False): - bank_id: Required[str] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/memory_drop_response.py b/src/llama_stack_client/types/memory_drop_response.py deleted file mode 100644 index f032e04b..00000000 --- a/src/llama_stack_client/types/memory_drop_response.py +++ /dev/null @@ -1,7 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import TypeAlias - -__all__ = ["MemoryDropResponse"] - -MemoryDropResponse: TypeAlias = str diff --git a/src/llama_stack_client/types/memory_insert_params.py b/src/llama_stack_client/types/memory_insert_params.py deleted file mode 100644 index 8f242975..00000000 --- a/src/llama_stack_client/types/memory_insert_params.py +++ /dev/null @@ -1,59 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Iterable -from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict - -from .._utils import PropertyInfo -from .shared_params.url import URL -from .shared_params.interleaved_content_item import InterleavedContentItem - -__all__ = [ - "MemoryInsertParams", - "Document", - "DocumentContent", - "DocumentContentImageContentItem", - "DocumentContentTextContentItem", -] - - -class MemoryInsertParams(TypedDict, total=False): - bank_id: Required[str] - - documents: Required[Iterable[Document]] - - ttl_seconds: int - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] - - -class DocumentContentImageContentItem(TypedDict, total=False): - type: Required[Literal["image"]] - - data: str - - url: URL - - -class DocumentContentTextContentItem(TypedDict, total=False): - text: Required[str] - - type: Required[Literal["text"]] - - -DocumentContent: TypeAlias = Union[ - str, DocumentContentImageContentItem, DocumentContentTextContentItem, Iterable[InterleavedContentItem], URL -] - - -class Document(TypedDict, total=False): - content: Required[DocumentContent] - - document_id: Required[str] - - metadata: Required[Dict[str, Union[bool, float, str, Iterable[object], object, None]]] - - mime_type: str diff --git a/src/llama_stack_client/types/memory_list_response.py b/src/llama_stack_client/types/memory_list_response.py deleted file mode 100644 index c0b2eb83..00000000 --- a/src/llama_stack_client/types/memory_list_response.py +++ /dev/null @@ -1,59 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Union, Optional -from typing_extensions import Literal, Annotated, TypeAlias - -from .._utils import PropertyInfo -from .._models import BaseModel - -__all__ = [ - "MemoryListResponse", - "Config", - "ConfigMemoryBankConfigVectorType", - "ConfigMemoryBankConfigKeyValueType", - "ConfigMemoryBankConfigKeywordType", - "ConfigMemoryBankConfigGraphType", -] - - -class ConfigMemoryBankConfigVectorType(BaseModel): - chunk_size_in_tokens: int - - embedding_model: str - - type: Literal["vector"] - - overlap_size_in_tokens: Optional[int] = None - - -class ConfigMemoryBankConfigKeyValueType(BaseModel): - type: Literal["keyvalue"] - - -class ConfigMemoryBankConfigKeywordType(BaseModel): - type: Literal["keyword"] - - -class ConfigMemoryBankConfigGraphType(BaseModel): - type: Literal["graph"] - - -Config: TypeAlias = Annotated[ - Union[ - ConfigMemoryBankConfigVectorType, - ConfigMemoryBankConfigKeyValueType, - ConfigMemoryBankConfigKeywordType, - ConfigMemoryBankConfigGraphType, - ], - PropertyInfo(discriminator="type"), -] - - -class MemoryListResponse(BaseModel): - bank_id: str - - config: Config - - name: str - - url: Optional[str] = None diff --git a/src/llama_stack_client/types/memory_query_params.py b/src/llama_stack_client/types/memory_query_params.py deleted file mode 100644 index 55d45f47..00000000 --- a/src/llama_stack_client/types/memory_query_params.py +++ /dev/null @@ -1,23 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Iterable -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo -from .shared_params.interleaved_content import InterleavedContent - -__all__ = ["MemoryQueryParams"] - - -class MemoryQueryParams(TypedDict, total=False): - bank_id: Required[str] - - query: Required[InterleavedContent] - - params: Dict[str, Union[bool, float, str, Iterable[object], object, None]] - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/memory_retrieve_params.py b/src/llama_stack_client/types/memory_retrieve_params.py deleted file mode 100644 index 62f6496b..00000000 --- a/src/llama_stack_client/types/memory_retrieve_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["MemoryRetrieveParams"] - - -class MemoryRetrieveParams(TypedDict, total=False): - bank_id: Required[str] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/memory_retrieve_response.py b/src/llama_stack_client/types/memory_retrieve_response.py deleted file mode 100644 index 661ac505..00000000 --- a/src/llama_stack_client/types/memory_retrieve_response.py +++ /dev/null @@ -1,59 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Union, Optional -from typing_extensions import Literal, Annotated, TypeAlias - -from .._utils import PropertyInfo -from .._models import BaseModel - -__all__ = [ - "MemoryRetrieveResponse", - "Config", - "ConfigMemoryBankConfigVectorType", - "ConfigMemoryBankConfigKeyValueType", - "ConfigMemoryBankConfigKeywordType", - "ConfigMemoryBankConfigGraphType", -] - - -class ConfigMemoryBankConfigVectorType(BaseModel): - chunk_size_in_tokens: int - - embedding_model: str - - type: Literal["vector"] - - overlap_size_in_tokens: Optional[int] = None - - -class ConfigMemoryBankConfigKeyValueType(BaseModel): - type: Literal["keyvalue"] - - -class ConfigMemoryBankConfigKeywordType(BaseModel): - type: Literal["keyword"] - - -class ConfigMemoryBankConfigGraphType(BaseModel): - type: Literal["graph"] - - -Config: TypeAlias = Annotated[ - Union[ - ConfigMemoryBankConfigVectorType, - ConfigMemoryBankConfigKeyValueType, - ConfigMemoryBankConfigKeywordType, - ConfigMemoryBankConfigGraphType, - ], - PropertyInfo(discriminator="type"), -] - - -class MemoryRetrieveResponse(BaseModel): - bank_id: str - - config: Config - - name: str - - url: Optional[str] = None diff --git a/src/llama_stack_client/types/memory_update_params.py b/src/llama_stack_client/types/memory_update_params.py deleted file mode 100644 index a702bae4..00000000 --- a/src/llama_stack_client/types/memory_update_params.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Iterable -from typing_extensions import Required, Annotated, TypeAlias, TypedDict - -from .._utils import PropertyInfo -from .shared_params.image_media import ImageMedia -from .shared_params.content_array import ContentArray - -__all__ = ["MemoryUpdateParams", "Document", "DocumentContent"] - - -class MemoryUpdateParams(TypedDict, total=False): - bank_id: Required[str] - - documents: Required[Iterable[Document]] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] - - -DocumentContent: TypeAlias = Union[str, ImageMedia, ContentArray] - - -class Document(TypedDict, total=False): - content: Required[DocumentContent] - - document_id: Required[str] - - metadata: Required[Dict[str, Union[bool, float, str, Iterable[object], object, None]]] - - mime_type: str diff --git a/src/llama_stack_client/types/model_def_with_provider.py b/src/llama_stack_client/types/model_def_with_provider.py deleted file mode 100644 index 45771b92..00000000 --- a/src/llama_stack_client/types/model_def_with_provider.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union - -from .._models import BaseModel - -__all__ = ["ModelDefWithProvider"] - - -class ModelDefWithProvider(BaseModel): - identifier: str - - llama_model: str - - metadata: Dict[str, Union[bool, float, str, List[object], object, None]] - - provider_id: str diff --git a/src/llama_stack_client/types/model_def_with_provider_param.py b/src/llama_stack_client/types/model_def_with_provider_param.py deleted file mode 100644 index 718a9c48..00000000 --- a/src/llama_stack_client/types/model_def_with_provider_param.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Iterable -from typing_extensions import Required, TypedDict - -__all__ = ["ModelDefWithProviderParam"] - - -class ModelDefWithProviderParam(TypedDict, total=False): - identifier: Required[str] - - llama_model: Required[str] - - metadata: Required[Dict[str, Union[bool, float, str, Iterable[object], object, None]]] - - provider_id: Required[str] diff --git a/src/llama_stack_client/types/model_delete_params.py b/src/llama_stack_client/types/model_delete_params.py deleted file mode 100644 index 1a19147c..00000000 --- a/src/llama_stack_client/types/model_delete_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["ModelDeleteParams"] - - -class ModelDeleteParams(TypedDict, total=False): - model_id: Required[str] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/model_get_params.py b/src/llama_stack_client/types/model_get_params.py deleted file mode 100644 index f3dc87d4..00000000 --- a/src/llama_stack_client/types/model_get_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["ModelGetParams"] - - -class ModelGetParams(TypedDict, total=False): - core_model_id: Required[str] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/model_retrieve_params.py b/src/llama_stack_client/types/model_retrieve_params.py deleted file mode 100644 index 562709f3..00000000 --- a/src/llama_stack_client/types/model_retrieve_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["ModelRetrieveParams"] - - -class ModelRetrieveParams(TypedDict, total=False): - identifier: Required[str] - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/model_retrieve_response.py b/src/llama_stack_client/types/model_retrieve_response.py deleted file mode 100644 index 48c54d01..00000000 --- a/src/llama_stack_client/types/model_retrieve_response.py +++ /dev/null @@ -1,23 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union - -from .._models import BaseModel - -__all__ = ["ModelRetrieveResponse", "ProviderConfig"] - - -class ProviderConfig(BaseModel): - config: Dict[str, Union[bool, float, str, List[object], object, None]] - - provider_type: str - - -class ModelRetrieveResponse(BaseModel): - llama_model: object - """ - The model family and SKU of the model along with other parameters corresponding - to the model. - """ - - provider_config: ProviderConfig diff --git a/src/llama_stack_client/types/model_serving_spec.py b/src/llama_stack_client/types/model_serving_spec.py deleted file mode 100644 index 2e5eb7b9..00000000 --- a/src/llama_stack_client/types/model_serving_spec.py +++ /dev/null @@ -1,23 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union - -from .._models import BaseModel - -__all__ = ["ModelServingSpec", "ProviderConfig"] - - -class ProviderConfig(BaseModel): - config: Dict[str, Union[bool, float, str, List[object], object, None]] - - provider_type: str - - -class ModelServingSpec(BaseModel): - llama_model: object - """ - The model family and SKU of the model along with other parameters corresponding - to the model. - """ - - provider_config: ProviderConfig diff --git a/src/llama_stack_client/types/model_unregister_params.py b/src/llama_stack_client/types/model_unregister_params.py deleted file mode 100644 index 3ab6f5a7..00000000 --- a/src/llama_stack_client/types/model_unregister_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["ModelUnregisterParams"] - - -class ModelUnregisterParams(TypedDict, total=False): - model_id: Required[str] - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/model_update_params.py b/src/llama_stack_client/types/model_update_params.py deleted file mode 100644 index 2aa9e8a2..00000000 --- a/src/llama_stack_client/types/model_update_params.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Iterable -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["ModelUpdateParams"] - - -class ModelUpdateParams(TypedDict, total=False): - model_id: Required[str] - - metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]] - - provider_id: str - - provider_model_id: str - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/post_training/job_logs_params.py b/src/llama_stack_client/types/post_training/job_logs_params.py deleted file mode 100644 index 42f7e071..00000000 --- a/src/llama_stack_client/types/post_training/job_logs_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["JobLogsParams"] - - -class JobLogsParams(TypedDict, total=False): - job_uuid: Required[str] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/post_training/job_logs_response.py b/src/llama_stack_client/types/post_training/job_logs_response.py deleted file mode 100644 index cfd6650d..00000000 --- a/src/llama_stack_client/types/post_training/job_logs_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from ..._models import BaseModel - -__all__ = ["JobLogsResponse"] - - -class JobLogsResponse(BaseModel): - job_uuid: str - - log_lines: List[str] diff --git a/src/llama_stack_client/types/post_training/post_training_job_artifacts.py b/src/llama_stack_client/types/post_training/post_training_job_artifacts.py deleted file mode 100644 index 57c2155c..00000000 --- a/src/llama_stack_client/types/post_training/post_training_job_artifacts.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from ..._models import BaseModel - -__all__ = ["PostTrainingJobArtifacts"] - - -class PostTrainingJobArtifacts(BaseModel): - checkpoints: List[object] - - job_uuid: str diff --git a/src/llama_stack_client/types/post_training/post_training_job_log_stream.py b/src/llama_stack_client/types/post_training/post_training_job_log_stream.py deleted file mode 100644 index 232fca29..00000000 --- a/src/llama_stack_client/types/post_training/post_training_job_log_stream.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from ..._models import BaseModel - -__all__ = ["PostTrainingJobLogStream"] - - -class PostTrainingJobLogStream(BaseModel): - job_uuid: str - - log_lines: List[str] diff --git a/src/llama_stack_client/types/post_training/post_training_job_status.py b/src/llama_stack_client/types/post_training/post_training_job_status.py deleted file mode 100644 index 81de2e0b..00000000 --- a/src/llama_stack_client/types/post_training/post_training_job_status.py +++ /dev/null @@ -1,25 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union, Optional -from datetime import datetime -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["PostTrainingJobStatus"] - - -class PostTrainingJobStatus(BaseModel): - checkpoints: List[object] - - job_uuid: str - - status: Literal["running", "completed", "failed", "scheduled"] - - completed_at: Optional[datetime] = None - - resources_allocated: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None - - scheduled_at: Optional[datetime] = None - - started_at: Optional[datetime] = None diff --git a/src/llama_stack_client/types/query_documents.py b/src/llama_stack_client/types/query_documents.py deleted file mode 100644 index fd40a49b..00000000 --- a/src/llama_stack_client/types/query_documents.py +++ /dev/null @@ -1,26 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Union -from typing_extensions import TypeAlias - -from .._models import BaseModel -from .shared.image_media import ImageMedia -from .shared.content_array import ContentArray - -__all__ = ["QueryDocuments", "Chunk", "ChunkContent"] - -ChunkContent: TypeAlias = Union[str, ImageMedia, ContentArray] - - -class Chunk(BaseModel): - content: ChunkContent - - document_id: str - - token_count: int - - -class QueryDocuments(BaseModel): - chunks: List[Chunk] - - scores: List[float] diff --git a/src/llama_stack_client/types/query_documents_response.py b/src/llama_stack_client/types/query_documents_response.py deleted file mode 100644 index 66259528..00000000 --- a/src/llama_stack_client/types/query_documents_response.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from .._models import BaseModel -from .shared.interleaved_content import InterleavedContent - -__all__ = ["QueryDocumentsResponse", "Chunk"] - - -class Chunk(BaseModel): - content: InterleavedContent - - document_id: str - - token_count: int - - -class QueryDocumentsResponse(BaseModel): - chunks: List[Chunk] - - scores: List[float] diff --git a/src/llama_stack_client/types/response_format_param.py b/src/llama_stack_client/types/response_format_param.py deleted file mode 100644 index bc8fdefc..00000000 --- a/src/llama_stack_client/types/response_format_param.py +++ /dev/null @@ -1,23 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Iterable -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -__all__ = ["ResponseFormatParam", "JsonSchemaResponseFormat", "GrammarResponseFormat"] - - -class JsonSchemaResponseFormat(TypedDict, total=False): - json_schema: Required[Dict[str, Union[bool, float, str, Iterable[object], object, None]]] - - type: Required[Literal["json_schema"]] - - -class GrammarResponseFormat(TypedDict, total=False): - bnf: Required[Dict[str, Union[bool, float, str, Iterable[object], object, None]]] - - type: Required[Literal["grammar"]] - - -ResponseFormatParam: TypeAlias = Union[JsonSchemaResponseFormat, GrammarResponseFormat] diff --git a/src/llama_stack_client/types/rest_api_execution_config_param.py b/src/llama_stack_client/types/rest_api_execution_config_param.py deleted file mode 100644 index 27bc260d..00000000 --- a/src/llama_stack_client/types/rest_api_execution_config_param.py +++ /dev/null @@ -1,20 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Iterable -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["RestAPIExecutionConfigParam"] - - -class RestAPIExecutionConfigParam(TypedDict, total=False): - method: Required[Literal["GET", "POST", "PUT", "DELETE"]] - - url: Required[str] - - body: Dict[str, Union[bool, float, str, Iterable[object], object, None]] - - headers: Dict[str, Union[bool, float, str, Iterable[object], object, None]] - - params: Dict[str, Union[bool, float, str, Iterable[object], object, None]] diff --git a/src/llama_stack_client/types/reward_scoring.py b/src/llama_stack_client/types/reward_scoring.py deleted file mode 100644 index 068b2ece..00000000 --- a/src/llama_stack_client/types/reward_scoring.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from .._models import BaseModel -from .scored_dialog_generations import ScoredDialogGenerations - -__all__ = ["RewardScoring"] - - -class RewardScoring(BaseModel): - scored_generations: List[ScoredDialogGenerations] diff --git a/src/llama_stack_client/types/reward_scoring_response.py b/src/llama_stack_client/types/reward_scoring_response.py deleted file mode 100644 index 8972cae3..00000000 --- a/src/llama_stack_client/types/reward_scoring_response.py +++ /dev/null @@ -1,40 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Union -from typing_extensions import TypeAlias - -from .._models import BaseModel -from .shared.user_message import UserMessage -from .shared.system_message import SystemMessage -from .shared.completion_message import CompletionMessage -from .shared.tool_response_message import ToolResponseMessage - -__all__ = [ - "RewardScoringResponse", - "ScoredGeneration", - "ScoredGenerationDialog", - "ScoredGenerationScoredGeneration", - "ScoredGenerationScoredGenerationMessage", -] - -ScoredGenerationDialog: TypeAlias = Union[UserMessage, SystemMessage, ToolResponseMessage, CompletionMessage] - -ScoredGenerationScoredGenerationMessage: TypeAlias = Union[ - UserMessage, SystemMessage, ToolResponseMessage, CompletionMessage -] - - -class ScoredGenerationScoredGeneration(BaseModel): - message: ScoredGenerationScoredGenerationMessage - - score: float - - -class ScoredGeneration(BaseModel): - dialog: List[ScoredGenerationDialog] - - scored_generations: List[ScoredGenerationScoredGeneration] - - -class RewardScoringResponse(BaseModel): - scored_generations: List[ScoredGeneration] diff --git a/src/llama_stack_client/types/reward_scoring_score_params.py b/src/llama_stack_client/types/reward_scoring_score_params.py deleted file mode 100644 index bb7bfb65..00000000 --- a/src/llama_stack_client/types/reward_scoring_score_params.py +++ /dev/null @@ -1,38 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union, Iterable -from typing_extensions import Required, Annotated, TypeAlias, TypedDict - -from .._utils import PropertyInfo -from .shared_params.user_message import UserMessage -from .shared_params.system_message import SystemMessage -from .shared_params.completion_message import CompletionMessage -from .shared_params.tool_response_message import ToolResponseMessage - -__all__ = [ - "RewardScoringScoreParams", - "DialogGeneration", - "DialogGenerationDialog", - "DialogGenerationSampledGeneration", -] - - -class RewardScoringScoreParams(TypedDict, total=False): - dialog_generations: Required[Iterable[DialogGeneration]] - - model: Required[str] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] - - -DialogGenerationDialog: TypeAlias = Union[UserMessage, SystemMessage, ToolResponseMessage, CompletionMessage] - -DialogGenerationSampledGeneration: TypeAlias = Union[UserMessage, SystemMessage, ToolResponseMessage, CompletionMessage] - - -class DialogGeneration(TypedDict, total=False): - dialog: Required[Iterable[DialogGenerationDialog]] - - sampled_generations: Required[Iterable[DialogGenerationSampledGeneration]] diff --git a/src/llama_stack_client/types/run_sheid_response.py b/src/llama_stack_client/types/run_sheid_response.py deleted file mode 100644 index 478b023a..00000000 --- a/src/llama_stack_client/types/run_sheid_response.py +++ /dev/null @@ -1,20 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union, Optional -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["RunSheidResponse", "Violation"] - - -class Violation(BaseModel): - metadata: Dict[str, Union[bool, float, str, List[object], object, None]] - - violation_level: Literal["info", "warn", "error"] - - user_message: Optional[str] = None - - -class RunSheidResponse(BaseModel): - violation: Optional[Violation] = None diff --git a/src/llama_stack_client/types/score_batch_response.py b/src/llama_stack_client/types/score_batch_response.py deleted file mode 100644 index 876bf062..00000000 --- a/src/llama_stack_client/types/score_batch_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union, Optional - -from .._models import BaseModel - -__all__ = ["ScoreBatchResponse", "Results"] - - -class Results(BaseModel): - aggregated_results: Dict[str, Union[bool, float, str, List[object], object, None]] - - score_rows: List[Dict[str, Union[bool, float, str, List[object], object, None]]] - - -class ScoreBatchResponse(BaseModel): - results: Dict[str, Results] - - dataset_id: Optional[str] = None diff --git a/src/llama_stack_client/types/score_response.py b/src/llama_stack_client/types/score_response.py deleted file mode 100644 index 967cc623..00000000 --- a/src/llama_stack_client/types/score_response.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union - -from .._models import BaseModel - -__all__ = ["ScoreResponse", "Results"] - - -class Results(BaseModel): - aggregated_results: Dict[str, Union[bool, float, str, List[object], object, None]] - - score_rows: List[Dict[str, Union[bool, float, str, List[object], object, None]]] - - -class ScoreResponse(BaseModel): - results: Dict[str, Results] diff --git a/src/llama_stack_client/types/scored_dialog_generations.py b/src/llama_stack_client/types/scored_dialog_generations.py deleted file mode 100644 index 34d726cf..00000000 --- a/src/llama_stack_client/types/scored_dialog_generations.py +++ /dev/null @@ -1,28 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Union -from typing_extensions import TypeAlias - -from .._models import BaseModel -from .shared.user_message import UserMessage -from .shared.system_message import SystemMessage -from .shared.completion_message import CompletionMessage -from .shared.tool_response_message import ToolResponseMessage - -__all__ = ["ScoredDialogGenerations", "Dialog", "ScoredGeneration", "ScoredGenerationMessage"] - -Dialog: TypeAlias = Union[UserMessage, SystemMessage, ToolResponseMessage, CompletionMessage] - -ScoredGenerationMessage: TypeAlias = Union[UserMessage, SystemMessage, ToolResponseMessage, CompletionMessage] - - -class ScoredGeneration(BaseModel): - message: ScoredGenerationMessage - - score: float - - -class ScoredDialogGenerations(BaseModel): - dialog: List[Dialog] - - scored_generations: List[ScoredGeneration] diff --git a/src/llama_stack_client/types/scoring_fn_def_with_provider.py b/src/llama_stack_client/types/scoring_fn_def_with_provider.py deleted file mode 100644 index dd8b1c1e..00000000 --- a/src/llama_stack_client/types/scoring_fn_def_with_provider.py +++ /dev/null @@ -1,84 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union, Optional -from typing_extensions import Literal, TypeAlias - -from .._models import BaseModel - -__all__ = [ - "ScoringFnDefWithProvider", - "Parameter", - "ParameterType", - "ParameterTypeType", - "ReturnType", - "ReturnTypeType", - "Context", -] - - -class ParameterTypeType(BaseModel): - type: Literal["string"] - - -ParameterType: TypeAlias = Union[ - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, -] - - -class Parameter(BaseModel): - name: str - - type: ParameterType - - description: Optional[str] = None - - -class ReturnTypeType(BaseModel): - type: Literal["string"] - - -ReturnType: TypeAlias = Union[ - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, -] - - -class Context(BaseModel): - judge_model: str - - judge_score_regex: Optional[List[str]] = None - - prompt_template: Optional[str] = None - - -class ScoringFnDefWithProvider(BaseModel): - identifier: str - - metadata: Dict[str, Union[bool, float, str, List[object], object, None]] - - parameters: List[Parameter] - - provider_id: str - - return_type: ReturnType - - context: Optional[Context] = None - - description: Optional[str] = None diff --git a/src/llama_stack_client/types/scoring_fn_def_with_provider_param.py b/src/llama_stack_client/types/scoring_fn_def_with_provider_param.py deleted file mode 100644 index e6f8d4fa..00000000 --- a/src/llama_stack_client/types/scoring_fn_def_with_provider_param.py +++ /dev/null @@ -1,84 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -__all__ = [ - "ScoringFnDefWithProviderParam", - "Parameter", - "ParameterType", - "ParameterTypeType", - "ReturnType", - "ReturnTypeType", - "Context", -] - - -class ParameterTypeType(TypedDict, total=False): - type: Required[Literal["string"]] - - -ParameterType: TypeAlias = Union[ - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, -] - - -class Parameter(TypedDict, total=False): - name: Required[str] - - type: Required[ParameterType] - - description: str - - -class ReturnTypeType(TypedDict, total=False): - type: Required[Literal["string"]] - - -ReturnType: TypeAlias = Union[ - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, -] - - -class Context(TypedDict, total=False): - judge_model: Required[str] - - judge_score_regex: List[str] - - prompt_template: str - - -class ScoringFnDefWithProviderParam(TypedDict, total=False): - identifier: Required[str] - - metadata: Required[Dict[str, Union[bool, float, str, Iterable[object], object, None]]] - - parameters: Required[Iterable[Parameter]] - - provider_id: Required[str] - - return_type: Required[ReturnType] - - context: Context - - description: str diff --git a/src/llama_stack_client/types/scoring_function_def_with_provider.py b/src/llama_stack_client/types/scoring_function_def_with_provider.py deleted file mode 100644 index 42d1d2be..00000000 --- a/src/llama_stack_client/types/scoring_function_def_with_provider.py +++ /dev/null @@ -1,98 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union, Optional -from typing_extensions import Literal, TypeAlias - -from .._models import BaseModel - -__all__ = [ - "ScoringFunctionDefWithProvider", - "Parameter", - "ParameterType", - "ParameterTypeType", - "ParameterTypeUnionMember7", - "ReturnType", - "ReturnTypeType", - "ReturnTypeUnionMember7", - "Context", -] - - -class ParameterTypeType(BaseModel): - type: Literal["string"] - - -class ParameterTypeUnionMember7(BaseModel): - type: Literal["custom"] - - validator_class: str - - -ParameterType: TypeAlias = Union[ - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeUnionMember7, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, -] - - -class Parameter(BaseModel): - name: str - - type: ParameterType - - description: Optional[str] = None - - -class ReturnTypeType(BaseModel): - type: Literal["string"] - - -class ReturnTypeUnionMember7(BaseModel): - type: Literal["custom"] - - validator_class: str - - -ReturnType: TypeAlias = Union[ - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeUnionMember7, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, -] - - -class Context(BaseModel): - judge_model: str - - prompt_template: Optional[str] = None - - -class ScoringFunctionDefWithProvider(BaseModel): - identifier: str - - metadata: Dict[str, Union[bool, float, str, List[object], object, None]] - - parameters: List[Parameter] - - provider_id: str - - return_type: ReturnType - - context: Optional[Context] = None - - description: Optional[str] = None diff --git a/src/llama_stack_client/types/scoring_function_def_with_provider_param.py b/src/llama_stack_client/types/scoring_function_def_with_provider_param.py deleted file mode 100644 index 93bdee51..00000000 --- a/src/llama_stack_client/types/scoring_function_def_with_provider_param.py +++ /dev/null @@ -1,98 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Iterable -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -__all__ = [ - "ScoringFunctionDefWithProviderParam", - "Parameter", - "ParameterType", - "ParameterTypeType", - "ParameterTypeUnionMember7", - "ReturnType", - "ReturnTypeType", - "ReturnTypeUnionMember7", - "Context", -] - - -class ParameterTypeType(TypedDict, total=False): - type: Required[Literal["string"]] - - -class ParameterTypeUnionMember7(TypedDict, total=False): - type: Required[Literal["custom"]] - - validator_class: Required[str] - - -ParameterType: TypeAlias = Union[ - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeUnionMember7, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, -] - - -class Parameter(TypedDict, total=False): - name: Required[str] - - type: Required[ParameterType] - - description: str - - -class ReturnTypeType(TypedDict, total=False): - type: Required[Literal["string"]] - - -class ReturnTypeUnionMember7(TypedDict, total=False): - type: Required[Literal["custom"]] - - validator_class: Required[str] - - -ReturnType: TypeAlias = Union[ - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeUnionMember7, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, -] - - -class Context(TypedDict, total=False): - judge_model: Required[str] - - prompt_template: str - - -class ScoringFunctionDefWithProviderParam(TypedDict, total=False): - identifier: Required[str] - - metadata: Required[Dict[str, Union[bool, float, str, Iterable[object], object, None]]] - - parameters: Required[Iterable[Parameter]] - - provider_id: Required[str] - - return_type: Required[ReturnType] - - context: Context - - description: str diff --git a/src/llama_stack_client/types/scoring_function_retrieve_params.py b/src/llama_stack_client/types/scoring_function_retrieve_params.py deleted file mode 100644 index 374907f1..00000000 --- a/src/llama_stack_client/types/scoring_function_retrieve_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["ScoringFunctionRetrieveParams"] - - -class ScoringFunctionRetrieveParams(TypedDict, total=False): - scoring_fn_id: Required[str] - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/scoring_function_retrieve_response.py b/src/llama_stack_client/types/scoring_function_retrieve_response.py deleted file mode 100644 index a99c1fb7..00000000 --- a/src/llama_stack_client/types/scoring_function_retrieve_response.py +++ /dev/null @@ -1,84 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union, Optional -from typing_extensions import Literal, TypeAlias - -from .._models import BaseModel - -__all__ = [ - "ScoringFunctionRetrieveResponse", - "Parameter", - "ParameterType", - "ParameterTypeType", - "ReturnType", - "ReturnTypeType", - "Context", -] - - -class ParameterTypeType(BaseModel): - type: Literal["string"] - - -ParameterType: TypeAlias = Union[ - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, -] - - -class Parameter(BaseModel): - name: str - - type: ParameterType - - description: Optional[str] = None - - -class ReturnTypeType(BaseModel): - type: Literal["string"] - - -ReturnType: TypeAlias = Union[ - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, -] - - -class Context(BaseModel): - judge_model: str - - judge_score_regex: Optional[List[str]] = None - - prompt_template: Optional[str] = None - - -class ScoringFunctionRetrieveResponse(BaseModel): - identifier: str - - metadata: Dict[str, Union[bool, float, str, List[object], object, None]] - - parameters: List[Parameter] - - provider_id: str - - return_type: ReturnType - - context: Optional[Context] = None - - description: Optional[str] = None diff --git a/src/llama_stack_client/types/shared/attachment.py b/src/llama_stack_client/types/shared/attachment.py deleted file mode 100644 index df185a27..00000000 --- a/src/llama_stack_client/types/shared/attachment.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Union, Optional -from typing_extensions import Literal, TypeAlias - -from .url import URL -from ..._models import BaseModel -from .interleaved_content_item import InterleavedContentItem - -__all__ = ["Attachment", "Content", "ContentImageContentItem", "ContentTextContentItem"] - - -class ContentImageContentItem(BaseModel): - type: Literal["image"] - - data: Optional[str] = None - - url: Optional[URL] = None - - -class ContentTextContentItem(BaseModel): - text: str - - type: Literal["text"] - - -Content: TypeAlias = Union[str, ContentImageContentItem, ContentTextContentItem, List[InterleavedContentItem], URL] - - -class Attachment(BaseModel): - content: Content - - mime_type: str diff --git a/src/llama_stack_client/types/shared/code_interpreter_tool_definition.py b/src/llama_stack_client/types/shared/code_interpreter_tool_definition.py deleted file mode 100644 index 6885cc27..00000000 --- a/src/llama_stack_client/types/shared/code_interpreter_tool_definition.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from typing_extensions import Literal - -from ..._models import BaseModel -from .rest_api_execution_config import RestAPIExecutionConfig - -__all__ = ["CodeInterpreterToolDefinition"] - - -class CodeInterpreterToolDefinition(BaseModel): - enable_inline_code_execution: bool - - type: Literal["code_interpreter"] - - input_shields: Optional[List[str]] = None - - output_shields: Optional[List[str]] = None - - remote_execution: Optional[RestAPIExecutionConfig] = None diff --git a/src/llama_stack_client/types/shared/content_array.py b/src/llama_stack_client/types/shared/content_array.py deleted file mode 100644 index c21e61e6..00000000 --- a/src/llama_stack_client/types/shared/content_array.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Union -from typing_extensions import TypeAlias - -from .image_media import ImageMedia - -__all__ = ["ContentArray", "ContentArrayItem"] - -ContentArrayItem: TypeAlias = Union[str, ImageMedia] - -ContentArray: TypeAlias = List[ContentArrayItem] diff --git a/src/llama_stack_client/types/shared/function_call_tool_definition.py b/src/llama_stack_client/types/shared/function_call_tool_definition.py deleted file mode 100644 index c01fc68c..00000000 --- a/src/llama_stack_client/types/shared/function_call_tool_definition.py +++ /dev/null @@ -1,26 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Optional -from typing_extensions import Literal - -from ..._models import BaseModel -from .tool_param_definition import ToolParamDefinition -from .rest_api_execution_config import RestAPIExecutionConfig - -__all__ = ["FunctionCallToolDefinition"] - - -class FunctionCallToolDefinition(BaseModel): - description: str - - function_name: str - - parameters: Dict[str, ToolParamDefinition] - - type: Literal["function_call"] - - input_shields: Optional[List[str]] = None - - output_shields: Optional[List[str]] = None - - remote_execution: Optional[RestAPIExecutionConfig] = None diff --git a/src/llama_stack_client/types/shared/graph_memory_bank_def.py b/src/llama_stack_client/types/shared/graph_memory_bank_def.py deleted file mode 100644 index 22353e6c..00000000 --- a/src/llama_stack_client/types/shared/graph_memory_bank_def.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["GraphMemoryBankDef"] - - -class GraphMemoryBankDef(BaseModel): - identifier: str - - provider_id: str - - type: Literal["graph"] diff --git a/src/llama_stack_client/types/shared/image_media.py b/src/llama_stack_client/types/shared/image_media.py deleted file mode 100644 index 620d81db..00000000 --- a/src/llama_stack_client/types/shared/image_media.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Union, Optional -from typing_extensions import TypeAlias - -from ..._models import BaseModel - -__all__ = ["ImageMedia", "Image", "ImageThisClassRepresentsAnImageObjectToCreate"] - - -class ImageThisClassRepresentsAnImageObjectToCreate(BaseModel): - format: Optional[str] = None - - format_description: Optional[str] = None - - -Image: TypeAlias = Union[ImageThisClassRepresentsAnImageObjectToCreate, str] - - -class ImageMedia(BaseModel): - image: Image diff --git a/src/llama_stack_client/types/shared/key_value_memory_bank_def.py b/src/llama_stack_client/types/shared/key_value_memory_bank_def.py deleted file mode 100644 index 2a328d38..00000000 --- a/src/llama_stack_client/types/shared/key_value_memory_bank_def.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["KeyValueMemoryBankDef"] - - -class KeyValueMemoryBankDef(BaseModel): - identifier: str - - provider_id: str - - type: Literal["keyvalue"] diff --git a/src/llama_stack_client/types/shared/keyword_memory_bank_def.py b/src/llama_stack_client/types/shared/keyword_memory_bank_def.py deleted file mode 100644 index e1637af5..00000000 --- a/src/llama_stack_client/types/shared/keyword_memory_bank_def.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["KeywordMemoryBankDef"] - - -class KeywordMemoryBankDef(BaseModel): - identifier: str - - provider_id: str - - type: Literal["keyword"] diff --git a/src/llama_stack_client/types/shared/memory_tool_definition.py b/src/llama_stack_client/types/shared/memory_tool_definition.py deleted file mode 100644 index 019f9373..00000000 --- a/src/llama_stack_client/types/shared/memory_tool_definition.py +++ /dev/null @@ -1,91 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Union, Optional -from typing_extensions import Literal, TypeAlias - -from ..._models import BaseModel - -__all__ = [ - "MemoryToolDefinition", - "MemoryBankConfig", - "MemoryBankConfigVector", - "MemoryBankConfigKeyValue", - "MemoryBankConfigKeyword", - "MemoryBankConfigGraph", - "QueryGeneratorConfig", - "QueryGeneratorConfigDefault", - "QueryGeneratorConfigLlm", - "QueryGeneratorConfigCustom", -] - - -class MemoryBankConfigVector(BaseModel): - bank_id: str - - type: Literal["vector"] - - -class MemoryBankConfigKeyValue(BaseModel): - bank_id: str - - keys: List[str] - - type: Literal["keyvalue"] - - -class MemoryBankConfigKeyword(BaseModel): - bank_id: str - - type: Literal["keyword"] - - -class MemoryBankConfigGraph(BaseModel): - bank_id: str - - entities: List[str] - - type: Literal["graph"] - - -MemoryBankConfig: TypeAlias = Union[ - MemoryBankConfigVector, MemoryBankConfigKeyValue, MemoryBankConfigKeyword, MemoryBankConfigGraph -] - - -class QueryGeneratorConfigDefault(BaseModel): - sep: str - - type: Literal["default"] - - -class QueryGeneratorConfigLlm(BaseModel): - model: str - - template: str - - type: Literal["llm"] - - -class QueryGeneratorConfigCustom(BaseModel): - type: Literal["custom"] - - -QueryGeneratorConfig: TypeAlias = Union[ - QueryGeneratorConfigDefault, QueryGeneratorConfigLlm, QueryGeneratorConfigCustom -] - - -class MemoryToolDefinition(BaseModel): - max_chunks: int - - max_tokens_in_context: int - - memory_bank_configs: List[MemoryBankConfig] - - query_generator_config: QueryGeneratorConfig - - type: Literal["memory"] - - input_shields: Optional[List[str]] = None - - output_shields: Optional[List[str]] = None diff --git a/src/llama_stack_client/types/shared/photogen_tool_definition.py b/src/llama_stack_client/types/shared/photogen_tool_definition.py deleted file mode 100644 index 9a03edee..00000000 --- a/src/llama_stack_client/types/shared/photogen_tool_definition.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from typing_extensions import Literal - -from ..._models import BaseModel -from .rest_api_execution_config import RestAPIExecutionConfig - -__all__ = ["PhotogenToolDefinition"] - - -class PhotogenToolDefinition(BaseModel): - type: Literal["photogen"] - - input_shields: Optional[List[str]] = None - - output_shields: Optional[List[str]] = None - - remote_execution: Optional[RestAPIExecutionConfig] = None diff --git a/src/llama_stack_client/types/shared/rest_api_execution_config.py b/src/llama_stack_client/types/shared/rest_api_execution_config.py deleted file mode 100644 index f08b2a53..00000000 --- a/src/llama_stack_client/types/shared/rest_api_execution_config.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union, Optional -from typing_extensions import Literal - -from .url import URL -from ..._models import BaseModel - -__all__ = ["RestAPIExecutionConfig"] - - -class RestAPIExecutionConfig(BaseModel): - method: Literal["GET", "POST", "PUT", "DELETE"] - - url: URL - - body: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None - - headers: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None - - params: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None diff --git a/src/llama_stack_client/types/shared/search_tool_definition.py b/src/llama_stack_client/types/shared/search_tool_definition.py deleted file mode 100644 index 46dd1e08..00000000 --- a/src/llama_stack_client/types/shared/search_tool_definition.py +++ /dev/null @@ -1,23 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from typing_extensions import Literal - -from ..._models import BaseModel -from .rest_api_execution_config import RestAPIExecutionConfig - -__all__ = ["SearchToolDefinition"] - - -class SearchToolDefinition(BaseModel): - api_key: str - - engine: Literal["bing", "brave", "tavily"] - - type: Literal["brave_search"] - - input_shields: Optional[List[str]] = None - - output_shields: Optional[List[str]] = None - - remote_execution: Optional[RestAPIExecutionConfig] = None diff --git a/src/llama_stack_client/types/shared/url.py b/src/llama_stack_client/types/shared/url.py deleted file mode 100644 index a333bf7e..00000000 --- a/src/llama_stack_client/types/shared/url.py +++ /dev/null @@ -1,10 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - - -from ..._models import BaseModel - -__all__ = ["URL"] - - -class URL(BaseModel): - uri: str diff --git a/src/llama_stack_client/types/shared/vector_memory_bank_def.py b/src/llama_stack_client/types/shared/vector_memory_bank_def.py deleted file mode 100644 index 04297526..00000000 --- a/src/llama_stack_client/types/shared/vector_memory_bank_def.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["VectorMemoryBankDef"] - - -class VectorMemoryBankDef(BaseModel): - chunk_size_in_tokens: int - - embedding_model: str - - identifier: str - - provider_id: str - - type: Literal["vector"] - - overlap_size_in_tokens: Optional[int] = None diff --git a/src/llama_stack_client/types/shared/wolfram_alpha_tool_definition.py b/src/llama_stack_client/types/shared/wolfram_alpha_tool_definition.py deleted file mode 100644 index bfad5620..00000000 --- a/src/llama_stack_client/types/shared/wolfram_alpha_tool_definition.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from typing_extensions import Literal - -from ..._models import BaseModel -from .rest_api_execution_config import RestAPIExecutionConfig - -__all__ = ["WolframAlphaToolDefinition"] - - -class WolframAlphaToolDefinition(BaseModel): - api_key: str - - type: Literal["wolfram_alpha"] - - input_shields: Optional[List[str]] = None - - output_shields: Optional[List[str]] = None - - remote_execution: Optional[RestAPIExecutionConfig] = None diff --git a/src/llama_stack_client/types/shared_params/attachment.py b/src/llama_stack_client/types/shared_params/attachment.py deleted file mode 100644 index 170acff5..00000000 --- a/src/llama_stack_client/types/shared_params/attachment.py +++ /dev/null @@ -1,34 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union, Iterable -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -from .url import URL -from .interleaved_content_item import InterleavedContentItem - -__all__ = ["Attachment", "Content", "ContentImageContentItem", "ContentTextContentItem"] - - -class ContentImageContentItem(TypedDict, total=False): - type: Required[Literal["image"]] - - data: str - - url: URL - - -class ContentTextContentItem(TypedDict, total=False): - text: Required[str] - - type: Required[Literal["text"]] - - -Content: TypeAlias = Union[str, ContentImageContentItem, ContentTextContentItem, Iterable[InterleavedContentItem], URL] - - -class Attachment(TypedDict, total=False): - content: Required[Content] - - mime_type: Required[str] diff --git a/src/llama_stack_client/types/shared_params/code_interpreter_tool_definition.py b/src/llama_stack_client/types/shared_params/code_interpreter_tool_definition.py deleted file mode 100644 index 8b979ea1..00000000 --- a/src/llama_stack_client/types/shared_params/code_interpreter_tool_definition.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, TypedDict - -from .rest_api_execution_config import RestAPIExecutionConfig - -__all__ = ["CodeInterpreterToolDefinition"] - - -class CodeInterpreterToolDefinition(TypedDict, total=False): - enable_inline_code_execution: Required[bool] - - type: Required[Literal["code_interpreter"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfig diff --git a/src/llama_stack_client/types/shared_params/content_array.py b/src/llama_stack_client/types/shared_params/content_array.py deleted file mode 100644 index 6ff775b8..00000000 --- a/src/llama_stack_client/types/shared_params/content_array.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union -from typing_extensions import TypeAlias - -from .image_media import ImageMedia - -__all__ = ["ContentArray", "ContentArrayItem"] - -ContentArrayItem: TypeAlias = Union[str, ImageMedia] - -ContentArray: TypeAlias = List[ContentArrayItem] diff --git a/src/llama_stack_client/types/shared_params/function_call_tool_definition.py b/src/llama_stack_client/types/shared_params/function_call_tool_definition.py deleted file mode 100644 index b355dcef..00000000 --- a/src/llama_stack_client/types/shared_params/function_call_tool_definition.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List -from typing_extensions import Literal, Required, TypedDict - -from .tool_param_definition import ToolParamDefinition -from .rest_api_execution_config import RestAPIExecutionConfig - -__all__ = ["FunctionCallToolDefinition"] - - -class FunctionCallToolDefinition(TypedDict, total=False): - description: Required[str] - - function_name: Required[str] - - parameters: Required[Dict[str, ToolParamDefinition]] - - type: Required[Literal["function_call"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfig diff --git a/src/llama_stack_client/types/shared_params/graph_memory_bank_def.py b/src/llama_stack_client/types/shared_params/graph_memory_bank_def.py deleted file mode 100644 index d9858622..00000000 --- a/src/llama_stack_client/types/shared_params/graph_memory_bank_def.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["GraphMemoryBankDef"] - - -class GraphMemoryBankDef(TypedDict, total=False): - identifier: Required[str] - - provider_id: Required[str] - - type: Required[Literal["graph"]] diff --git a/src/llama_stack_client/types/shared_params/image_media.py b/src/llama_stack_client/types/shared_params/image_media.py deleted file mode 100644 index d87d4c6c..00000000 --- a/src/llama_stack_client/types/shared_params/image_media.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Required, TypeAlias, TypedDict - -__all__ = ["ImageMedia", "Image", "ImageThisClassRepresentsAnImageObjectToCreate"] - - -class ImageThisClassRepresentsAnImageObjectToCreate(TypedDict, total=False): - format: str - - format_description: str - - -Image: TypeAlias = Union[ImageThisClassRepresentsAnImageObjectToCreate, str] - - -class ImageMedia(TypedDict, total=False): - image: Required[Image] diff --git a/src/llama_stack_client/types/shared_params/key_value_memory_bank_def.py b/src/llama_stack_client/types/shared_params/key_value_memory_bank_def.py deleted file mode 100644 index c6e2999b..00000000 --- a/src/llama_stack_client/types/shared_params/key_value_memory_bank_def.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["KeyValueMemoryBankDef"] - - -class KeyValueMemoryBankDef(TypedDict, total=False): - identifier: Required[str] - - provider_id: Required[str] - - type: Required[Literal["keyvalue"]] diff --git a/src/llama_stack_client/types/shared_params/keyword_memory_bank_def.py b/src/llama_stack_client/types/shared_params/keyword_memory_bank_def.py deleted file mode 100644 index d71ca72d..00000000 --- a/src/llama_stack_client/types/shared_params/keyword_memory_bank_def.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["KeywordMemoryBankDef"] - - -class KeywordMemoryBankDef(TypedDict, total=False): - identifier: Required[str] - - provider_id: Required[str] - - type: Required[Literal["keyword"]] diff --git a/src/llama_stack_client/types/shared_params/memory_tool_definition.py b/src/llama_stack_client/types/shared_params/memory_tool_definition.py deleted file mode 100644 index 442597ee..00000000 --- a/src/llama_stack_client/types/shared_params/memory_tool_definition.py +++ /dev/null @@ -1,91 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union, Iterable -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -__all__ = [ - "MemoryToolDefinition", - "MemoryBankConfig", - "MemoryBankConfigVector", - "MemoryBankConfigKeyValue", - "MemoryBankConfigKeyword", - "MemoryBankConfigGraph", - "QueryGeneratorConfig", - "QueryGeneratorConfigDefault", - "QueryGeneratorConfigLlm", - "QueryGeneratorConfigCustom", -] - - -class MemoryBankConfigVector(TypedDict, total=False): - bank_id: Required[str] - - type: Required[Literal["vector"]] - - -class MemoryBankConfigKeyValue(TypedDict, total=False): - bank_id: Required[str] - - keys: Required[List[str]] - - type: Required[Literal["keyvalue"]] - - -class MemoryBankConfigKeyword(TypedDict, total=False): - bank_id: Required[str] - - type: Required[Literal["keyword"]] - - -class MemoryBankConfigGraph(TypedDict, total=False): - bank_id: Required[str] - - entities: Required[List[str]] - - type: Required[Literal["graph"]] - - -MemoryBankConfig: TypeAlias = Union[ - MemoryBankConfigVector, MemoryBankConfigKeyValue, MemoryBankConfigKeyword, MemoryBankConfigGraph -] - - -class QueryGeneratorConfigDefault(TypedDict, total=False): - sep: Required[str] - - type: Required[Literal["default"]] - - -class QueryGeneratorConfigLlm(TypedDict, total=False): - model: Required[str] - - template: Required[str] - - type: Required[Literal["llm"]] - - -class QueryGeneratorConfigCustom(TypedDict, total=False): - type: Required[Literal["custom"]] - - -QueryGeneratorConfig: TypeAlias = Union[ - QueryGeneratorConfigDefault, QueryGeneratorConfigLlm, QueryGeneratorConfigCustom -] - - -class MemoryToolDefinition(TypedDict, total=False): - max_chunks: Required[int] - - max_tokens_in_context: Required[int] - - memory_bank_configs: Required[Iterable[MemoryBankConfig]] - - query_generator_config: Required[QueryGeneratorConfig] - - type: Required[Literal["memory"]] - - input_shields: List[str] - - output_shields: List[str] diff --git a/src/llama_stack_client/types/shared_params/photogen_tool_definition.py b/src/llama_stack_client/types/shared_params/photogen_tool_definition.py deleted file mode 100644 index 38e7f47f..00000000 --- a/src/llama_stack_client/types/shared_params/photogen_tool_definition.py +++ /dev/null @@ -1,20 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, TypedDict - -from .rest_api_execution_config import RestAPIExecutionConfig - -__all__ = ["PhotogenToolDefinition"] - - -class PhotogenToolDefinition(TypedDict, total=False): - type: Required[Literal["photogen"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfig diff --git a/src/llama_stack_client/types/shared_params/rest_api_execution_config.py b/src/llama_stack_client/types/shared_params/rest_api_execution_config.py deleted file mode 100644 index 3d07e53e..00000000 --- a/src/llama_stack_client/types/shared_params/rest_api_execution_config.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Iterable -from typing_extensions import Literal, Required, TypedDict - -from .url import URL - -__all__ = ["RestAPIExecutionConfig"] - - -class RestAPIExecutionConfig(TypedDict, total=False): - method: Required[Literal["GET", "POST", "PUT", "DELETE"]] - - url: Required[URL] - - body: Dict[str, Union[bool, float, str, Iterable[object], object, None]] - - headers: Dict[str, Union[bool, float, str, Iterable[object], object, None]] - - params: Dict[str, Union[bool, float, str, Iterable[object], object, None]] diff --git a/src/llama_stack_client/types/shared_params/search_tool_definition.py b/src/llama_stack_client/types/shared_params/search_tool_definition.py deleted file mode 100644 index dceeec56..00000000 --- a/src/llama_stack_client/types/shared_params/search_tool_definition.py +++ /dev/null @@ -1,24 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, TypedDict - -from .rest_api_execution_config import RestAPIExecutionConfig - -__all__ = ["SearchToolDefinition"] - - -class SearchToolDefinition(TypedDict, total=False): - api_key: Required[str] - - engine: Required[Literal["bing", "brave", "tavily"]] - - type: Required[Literal["brave_search"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfig diff --git a/src/llama_stack_client/types/shared_params/url.py b/src/llama_stack_client/types/shared_params/url.py deleted file mode 100644 index 8b4418aa..00000000 --- a/src/llama_stack_client/types/shared_params/url.py +++ /dev/null @@ -1,11 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, TypedDict - -__all__ = ["URL"] - - -class URL(TypedDict, total=False): - uri: Required[str] diff --git a/src/llama_stack_client/types/shared_params/vector_memory_bank_def.py b/src/llama_stack_client/types/shared_params/vector_memory_bank_def.py deleted file mode 100644 index 50428659..00000000 --- a/src/llama_stack_client/types/shared_params/vector_memory_bank_def.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["VectorMemoryBankDef"] - - -class VectorMemoryBankDef(TypedDict, total=False): - chunk_size_in_tokens: Required[int] - - embedding_model: Required[str] - - identifier: Required[str] - - provider_id: Required[str] - - type: Required[Literal["vector"]] - - overlap_size_in_tokens: int diff --git a/src/llama_stack_client/types/shared_params/wolfram_alpha_tool_definition.py b/src/llama_stack_client/types/shared_params/wolfram_alpha_tool_definition.py deleted file mode 100644 index 13fd62fa..00000000 --- a/src/llama_stack_client/types/shared_params/wolfram_alpha_tool_definition.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, TypedDict - -from .rest_api_execution_config import RestAPIExecutionConfig - -__all__ = ["WolframAlphaToolDefinition"] - - -class WolframAlphaToolDefinition(TypedDict, total=False): - api_key: Required[str] - - type: Required[Literal["wolfram_alpha"]] - - input_shields: List[str] - - output_shields: List[str] - - remote_execution: RestAPIExecutionConfig diff --git a/src/llama_stack_client/types/shield_def_with_provider.py b/src/llama_stack_client/types/shield_def_with_provider.py deleted file mode 100644 index 7adee59a..00000000 --- a/src/llama_stack_client/types/shield_def_with_provider.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union - -from .._models import BaseModel - -__all__ = ["ShieldDefWithProvider"] - - -class ShieldDefWithProvider(BaseModel): - identifier: str - - params: Dict[str, Union[bool, float, str, List[object], object, None]] - - provider_id: str - - type: str diff --git a/src/llama_stack_client/types/shield_def_with_provider_param.py b/src/llama_stack_client/types/shield_def_with_provider_param.py deleted file mode 100644 index 0d4df4ce..00000000 --- a/src/llama_stack_client/types/shield_def_with_provider_param.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Iterable -from typing_extensions import Required, TypedDict - -__all__ = ["ShieldDefWithProviderParam"] - - -class ShieldDefWithProviderParam(TypedDict, total=False): - identifier: Required[str] - - params: Required[Dict[str, Union[bool, float, str, Iterable[object], object, None]]] - - provider_id: Required[str] - - type: Required[str] diff --git a/src/llama_stack_client/types/shield_get_params.py b/src/llama_stack_client/types/shield_get_params.py deleted file mode 100644 index cb9ce90f..00000000 --- a/src/llama_stack_client/types/shield_get_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["ShieldGetParams"] - - -class ShieldGetParams(TypedDict, total=False): - shield_type: Required[str] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/shield_retrieve_params.py b/src/llama_stack_client/types/shield_retrieve_params.py deleted file mode 100644 index 7ce7cb7e..00000000 --- a/src/llama_stack_client/types/shield_retrieve_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["ShieldRetrieveParams"] - - -class ShieldRetrieveParams(TypedDict, total=False): - identifier: Required[str] - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/shield_retrieve_response.py b/src/llama_stack_client/types/shield_retrieve_response.py deleted file mode 100644 index d552342f..00000000 --- a/src/llama_stack_client/types/shield_retrieve_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union - -from .._models import BaseModel - -__all__ = ["ShieldRetrieveResponse", "ProviderConfig"] - - -class ProviderConfig(BaseModel): - config: Dict[str, Union[bool, float, str, List[object], object, None]] - - provider_type: str - - -class ShieldRetrieveResponse(BaseModel): - provider_config: ProviderConfig - - shield_type: str diff --git a/src/llama_stack_client/types/shield_spec.py b/src/llama_stack_client/types/shield_spec.py deleted file mode 100644 index ae6ffdb9..00000000 --- a/src/llama_stack_client/types/shield_spec.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union - -from .._models import BaseModel - -__all__ = ["ShieldSpec", "ProviderConfig"] - - -class ProviderConfig(BaseModel): - config: Dict[str, Union[bool, float, str, List[object], object, None]] - - provider_type: str - - -class ShieldSpec(BaseModel): - provider_config: ProviderConfig - - shield_type: str diff --git a/src/llama_stack_client/types/span_with_children.py b/src/llama_stack_client/types/span_with_children.py deleted file mode 100644 index 24eb6d7b..00000000 --- a/src/llama_stack_client/types/span_with_children.py +++ /dev/null @@ -1,38 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Optional -from datetime import datetime -from typing_extensions import Literal - -from .._compat import PYDANTIC_V2 -from .._models import BaseModel - -__all__ = ["SpanWithChildren"] - - -class SpanWithChildren(BaseModel): - children: List[SpanWithChildren] - - name: str - - span_id: str - - start_time: datetime - - trace_id: str - - attributes: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None - - end_time: Optional[datetime] = None - - parent_span_id: Optional[str] = None - - status: Optional[Literal["ok", "error"]] = None - - -if PYDANTIC_V2: - SpanWithChildren.model_rebuild() -else: - SpanWithChildren.update_forward_refs() # type: ignore diff --git a/src/llama_stack_client/types/synthetic_data_generation.py b/src/llama_stack_client/types/synthetic_data_generation.py deleted file mode 100644 index eea06e6f..00000000 --- a/src/llama_stack_client/types/synthetic_data_generation.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union, Optional - -from .._models import BaseModel -from .scored_dialog_generations import ScoredDialogGenerations - -__all__ = ["SyntheticDataGeneration"] - - -class SyntheticDataGeneration(BaseModel): - synthetic_data: List[ScoredDialogGenerations] - - statistics: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None diff --git a/src/llama_stack_client/types/telemetry_get_trace_params.py b/src/llama_stack_client/types/telemetry_get_trace_params.py deleted file mode 100644 index dbee6988..00000000 --- a/src/llama_stack_client/types/telemetry_get_trace_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["TelemetryGetTraceParams"] - - -class TelemetryGetTraceParams(TypedDict, total=False): - trace_id: Required[str] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] diff --git a/src/llama_stack_client/types/telemetry_get_trace_response.py b/src/llama_stack_client/types/telemetry_get_trace_response.py deleted file mode 100644 index c1fa453e..00000000 --- a/src/llama_stack_client/types/telemetry_get_trace_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from datetime import datetime - -from .._models import BaseModel - -__all__ = ["TelemetryGetTraceResponse"] - - -class TelemetryGetTraceResponse(BaseModel): - root_span_id: str - - start_time: datetime - - trace_id: str - - end_time: Optional[datetime] = None diff --git a/src/llama_stack_client/types/telemetry_log_params.py b/src/llama_stack_client/types/telemetry_log_params.py deleted file mode 100644 index a2e4d9b0..00000000 --- a/src/llama_stack_client/types/telemetry_log_params.py +++ /dev/null @@ -1,96 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Iterable -from datetime import datetime -from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict - -from .._utils import PropertyInfo - -__all__ = [ - "TelemetryLogParams", - "Event", - "EventUnstructuredLogEvent", - "EventMetricEvent", - "EventStructuredLogEvent", - "EventStructuredLogEventPayload", - "EventStructuredLogEventPayloadSpanStartPayload", - "EventStructuredLogEventPayloadSpanEndPayload", -] - - -class TelemetryLogParams(TypedDict, total=False): - event: Required[Event] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-ProviderData")] - - -class EventUnstructuredLogEvent(TypedDict, total=False): - message: Required[str] - - severity: Required[Literal["verbose", "debug", "info", "warn", "error", "critical"]] - - span_id: Required[str] - - timestamp: Required[Annotated[Union[str, datetime], PropertyInfo(format="iso8601")]] - - trace_id: Required[str] - - type: Required[Literal["unstructured_log"]] - - attributes: Dict[str, Union[bool, float, str, Iterable[object], object, None]] - - -class EventMetricEvent(TypedDict, total=False): - metric: Required[str] - - span_id: Required[str] - - timestamp: Required[Annotated[Union[str, datetime], PropertyInfo(format="iso8601")]] - - trace_id: Required[str] - - type: Required[Literal["metric"]] - - unit: Required[str] - - value: Required[float] - - attributes: Dict[str, Union[bool, float, str, Iterable[object], object, None]] - - -class EventStructuredLogEventPayloadSpanStartPayload(TypedDict, total=False): - name: Required[str] - - type: Required[Literal["span_start"]] - - parent_span_id: str - - -class EventStructuredLogEventPayloadSpanEndPayload(TypedDict, total=False): - status: Required[Literal["ok", "error"]] - - type: Required[Literal["span_end"]] - - -EventStructuredLogEventPayload: TypeAlias = Union[ - EventStructuredLogEventPayloadSpanStartPayload, EventStructuredLogEventPayloadSpanEndPayload -] - - -class EventStructuredLogEvent(TypedDict, total=False): - payload: Required[EventStructuredLogEventPayload] - - span_id: Required[str] - - timestamp: Required[Annotated[Union[str, datetime], PropertyInfo(format="iso8601")]] - - trace_id: Required[str] - - type: Required[Literal["structured_log"]] - - attributes: Dict[str, Union[bool, float, str, Iterable[object], object, None]] - - -Event: TypeAlias = Union[EventUnstructuredLogEvent, EventMetricEvent, EventStructuredLogEvent] diff --git a/src/llama_stack_client/types/tool_get_params.py b/src/llama_stack_client/types/tool_get_params.py deleted file mode 100644 index ba1c6e81..00000000 --- a/src/llama_stack_client/types/tool_get_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["ToolGetParams"] - - -class ToolGetParams(TypedDict, total=False): - tool_name: Required[str] - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/tool_param_definition_param.py b/src/llama_stack_client/types/tool_param_definition_param.py deleted file mode 100644 index b76d4f50..00000000 --- a/src/llama_stack_client/types/tool_param_definition_param.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union, Iterable -from typing_extensions import Required, TypedDict - -__all__ = ["ToolParamDefinitionParam"] - - -class ToolParamDefinitionParam(TypedDict, total=False): - param_type: Required[str] - - default: Union[bool, float, str, Iterable[object], object, None] - - description: str - - required: bool diff --git a/src/llama_stack_client/types/tool_runtime/document_param.py b/src/llama_stack_client/types/tool_runtime/document_param.py deleted file mode 100644 index 9e2e5bd7..00000000 --- a/src/llama_stack_client/types/tool_runtime/document_param.py +++ /dev/null @@ -1,48 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Iterable -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -from ..shared_params.url import URL -from ..shared_params.interleaved_content_item import InterleavedContentItem - -__all__ = [ - "DocumentParam", - "Content", - "ContentImageContentItem", - "ContentImageContentItemImage", - "ContentTextContentItem", -] - - -class ContentImageContentItemImage(TypedDict, total=False): - data: str - - url: URL - - -class ContentImageContentItem(TypedDict, total=False): - image: Required[ContentImageContentItemImage] - - type: Required[Literal["image"]] - - -class ContentTextContentItem(TypedDict, total=False): - text: Required[str] - - type: Required[Literal["text"]] - - -Content: TypeAlias = Union[str, ContentImageContentItem, ContentTextContentItem, Iterable[InterleavedContentItem], URL] - - -class DocumentParam(TypedDict, total=False): - content: Required[Content] - - document_id: Required[str] - - metadata: Required[Dict[str, Union[bool, float, str, Iterable[object], object, None]]] - - mime_type: str diff --git a/src/llama_stack_client/types/tool_runtime/query_config_param.py b/src/llama_stack_client/types/tool_runtime/query_config_param.py deleted file mode 100644 index 6b106e50..00000000 --- a/src/llama_stack_client/types/tool_runtime/query_config_param.py +++ /dev/null @@ -1,40 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -__all__ = [ - "QueryConfigParam", - "QueryGeneratorConfig", - "QueryGeneratorConfigDefaultRagQueryGeneratorConfig", - "QueryGeneratorConfigLlmragQueryGeneratorConfig", -] - - -class QueryGeneratorConfigDefaultRagQueryGeneratorConfig(TypedDict, total=False): - separator: Required[str] - - type: Required[Literal["default"]] - - -class QueryGeneratorConfigLlmragQueryGeneratorConfig(TypedDict, total=False): - model: Required[str] - - template: Required[str] - - type: Required[Literal["llm"]] - - -QueryGeneratorConfig: TypeAlias = Union[ - QueryGeneratorConfigDefaultRagQueryGeneratorConfig, QueryGeneratorConfigLlmragQueryGeneratorConfig -] - - -class QueryConfigParam(TypedDict, total=False): - max_chunks: Required[int] - - max_tokens_in_context: Required[int] - - query_generator_config: Required[QueryGeneratorConfig] diff --git a/src/llama_stack_client/types/tool_runtime/query_result.py b/src/llama_stack_client/types/tool_runtime/query_result.py deleted file mode 100644 index 4486763b..00000000 --- a/src/llama_stack_client/types/tool_runtime/query_result.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from ..shared.interleaved_content import InterleavedContent - -__all__ = ["QueryResult"] - - -class QueryResult(BaseModel): - content: Optional[InterleavedContent] = None diff --git a/src/llama_stack_client/types/toolgroup_get_params.py b/src/llama_stack_client/types/toolgroup_get_params.py deleted file mode 100644 index daffe283..00000000 --- a/src/llama_stack_client/types/toolgroup_get_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["ToolgroupGetParams"] - - -class ToolgroupGetParams(TypedDict, total=False): - toolgroup_id: Required[str] - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/toolgroup_unregister_params.py b/src/llama_stack_client/types/toolgroup_unregister_params.py deleted file mode 100644 index 7f64607d..00000000 --- a/src/llama_stack_client/types/toolgroup_unregister_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["ToolgroupUnregisterParams"] - - -class ToolgroupUnregisterParams(TypedDict, total=False): - tool_group_id: Required[str] - - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] - - x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/train_eval_dataset.py b/src/llama_stack_client/types/train_eval_dataset.py deleted file mode 100644 index 2b6494bd..00000000 --- a/src/llama_stack_client/types/train_eval_dataset.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union, Optional -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["TrainEvalDataset"] - - -class TrainEvalDataset(BaseModel): - columns: Dict[str, Literal["dialog", "text", "media", "number", "json"]] - - content_url: str - - metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None diff --git a/src/llama_stack_client/types/train_eval_dataset_param.py b/src/llama_stack_client/types/train_eval_dataset_param.py deleted file mode 100644 index 311b3fd9..00000000 --- a/src/llama_stack_client/types/train_eval_dataset_param.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Iterable -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["TrainEvalDatasetParam"] - - -class TrainEvalDatasetParam(TypedDict, total=False): - columns: Required[Dict[str, Literal["dialog", "text", "media", "number", "json"]]] - - content_url: Required[str] - - metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]] diff --git a/tests/api_resources/eval/test_job.py b/tests/api_resources/eval/test_job.py deleted file mode 100644 index ffffca71..00000000 --- a/tests/api_resources/eval/test_job.py +++ /dev/null @@ -1,259 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, Optional, cast - -import pytest - -from tests.utils import assert_matches_type -from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types.eval import ( - JobStatus, - JobResultResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestJob: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - def test_method_cancel(self, client: LlamaStackClient) -> None: - job = client.eval.job.cancel( - job_id="job_id", - ) - assert job is None - - @parametrize - def test_method_cancel_with_all_params(self, client: LlamaStackClient) -> None: - job = client.eval.job.cancel( - job_id="job_id", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert job is None - - @parametrize - def test_raw_response_cancel(self, client: LlamaStackClient) -> None: - response = client.eval.job.with_raw_response.cancel( - job_id="job_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = response.parse() - assert job is None - - @parametrize - def test_streaming_response_cancel(self, client: LlamaStackClient) -> None: - with client.eval.job.with_streaming_response.cancel( - job_id="job_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = response.parse() - assert job is None - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_method_result(self, client: LlamaStackClient) -> None: - job = client.eval.job.result( - job_id="job_id", - ) - assert_matches_type(JobResultResponse, job, path=["response"]) - - @parametrize - def test_method_result_with_all_params(self, client: LlamaStackClient) -> None: - job = client.eval.job.result( - job_id="job_id", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(JobResultResponse, job, path=["response"]) - - @parametrize - def test_raw_response_result(self, client: LlamaStackClient) -> None: - response = client.eval.job.with_raw_response.result( - job_id="job_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = response.parse() - assert_matches_type(JobResultResponse, job, path=["response"]) - - @parametrize - def test_streaming_response_result(self, client: LlamaStackClient) -> None: - with client.eval.job.with_streaming_response.result( - job_id="job_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = response.parse() - assert_matches_type(JobResultResponse, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_method_status(self, client: LlamaStackClient) -> None: - job = client.eval.job.status( - job_id="job_id", - ) - assert_matches_type(Optional[JobStatus], job, path=["response"]) - - @parametrize - def test_method_status_with_all_params(self, client: LlamaStackClient) -> None: - job = client.eval.job.status( - job_id="job_id", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(Optional[JobStatus], job, path=["response"]) - - @parametrize - def test_raw_response_status(self, client: LlamaStackClient) -> None: - response = client.eval.job.with_raw_response.status( - job_id="job_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = response.parse() - assert_matches_type(Optional[JobStatus], job, path=["response"]) - - @parametrize - def test_streaming_response_status(self, client: LlamaStackClient) -> None: - with client.eval.job.with_streaming_response.status( - job_id="job_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = response.parse() - assert_matches_type(Optional[JobStatus], job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncJob: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - async def test_method_cancel(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.eval.job.cancel( - job_id="job_id", - ) - assert job is None - - @parametrize - async def test_method_cancel_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.eval.job.cancel( - job_id="job_id", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert job is None - - @parametrize - async def test_raw_response_cancel(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.eval.job.with_raw_response.cancel( - job_id="job_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = await response.parse() - assert job is None - - @parametrize - async def test_streaming_response_cancel(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.eval.job.with_streaming_response.cancel( - job_id="job_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = await response.parse() - assert job is None - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_method_result(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.eval.job.result( - job_id="job_id", - ) - assert_matches_type(JobResultResponse, job, path=["response"]) - - @parametrize - async def test_method_result_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.eval.job.result( - job_id="job_id", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(JobResultResponse, job, path=["response"]) - - @parametrize - async def test_raw_response_result(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.eval.job.with_raw_response.result( - job_id="job_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = await response.parse() - assert_matches_type(JobResultResponse, job, path=["response"]) - - @parametrize - async def test_streaming_response_result(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.eval.job.with_streaming_response.result( - job_id="job_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = await response.parse() - assert_matches_type(JobResultResponse, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_method_status(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.eval.job.status( - job_id="job_id", - ) - assert_matches_type(Optional[JobStatus], job, path=["response"]) - - @parametrize - async def test_method_status_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.eval.job.status( - job_id="job_id", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(Optional[JobStatus], job, path=["response"]) - - @parametrize - async def test_raw_response_status(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.eval.job.with_raw_response.status( - job_id="job_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = await response.parse() - assert_matches_type(Optional[JobStatus], job, path=["response"]) - - @parametrize - async def test_streaming_response_status(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.eval.job.with_streaming_response.status( - job_id="job_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = await response.parse() - assert_matches_type(Optional[JobStatus], job, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/evaluate/__init__.py b/tests/api_resources/evaluate/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/evaluate/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/evaluate/jobs/__init__.py b/tests/api_resources/evaluate/jobs/__init__.py deleted file mode 100755 index fd8019a9..00000000 --- a/tests/api_resources/evaluate/jobs/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/evaluate/jobs/test_artifacts.py b/tests/api_resources/evaluate/jobs/test_artifacts.py deleted file mode 100755 index 52a7e372..00000000 --- a/tests/api_resources/evaluate/jobs/test_artifacts.py +++ /dev/null @@ -1,100 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types.evaluate import EvaluationJobArtifacts - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestArtifacts: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - def test_method_list(self, client: LlamaStackClient) -> None: - artifact = client.evaluate.jobs.artifacts.list( - job_uuid="job_uuid", - ) - assert_matches_type(EvaluationJobArtifacts, artifact, path=["response"]) - - @parametrize - def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: - artifact = client.evaluate.jobs.artifacts.list( - job_uuid="job_uuid", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(EvaluationJobArtifacts, artifact, path=["response"]) - - @parametrize - def test_raw_response_list(self, client: LlamaStackClient) -> None: - response = client.evaluate.jobs.artifacts.with_raw_response.list( - job_uuid="job_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - artifact = response.parse() - assert_matches_type(EvaluationJobArtifacts, artifact, path=["response"]) - - @parametrize - def test_streaming_response_list(self, client: LlamaStackClient) -> None: - with client.evaluate.jobs.artifacts.with_streaming_response.list( - job_uuid="job_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - artifact = response.parse() - assert_matches_type(EvaluationJobArtifacts, artifact, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncArtifacts: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: - artifact = await async_client.evaluate.jobs.artifacts.list( - job_uuid="job_uuid", - ) - assert_matches_type(EvaluationJobArtifacts, artifact, path=["response"]) - - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - artifact = await async_client.evaluate.jobs.artifacts.list( - job_uuid="job_uuid", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(EvaluationJobArtifacts, artifact, path=["response"]) - - @parametrize - async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.evaluate.jobs.artifacts.with_raw_response.list( - job_uuid="job_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - artifact = await response.parse() - assert_matches_type(EvaluationJobArtifacts, artifact, path=["response"]) - - @parametrize - async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.evaluate.jobs.artifacts.with_streaming_response.list( - job_uuid="job_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - artifact = await response.parse() - assert_matches_type(EvaluationJobArtifacts, artifact, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/evaluate/jobs/test_logs.py b/tests/api_resources/evaluate/jobs/test_logs.py deleted file mode 100755 index 018412dd..00000000 --- a/tests/api_resources/evaluate/jobs/test_logs.py +++ /dev/null @@ -1,100 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types.evaluate import EvaluationJobLogStream - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestLogs: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - def test_method_list(self, client: LlamaStackClient) -> None: - log = client.evaluate.jobs.logs.list( - job_uuid="job_uuid", - ) - assert_matches_type(EvaluationJobLogStream, log, path=["response"]) - - @parametrize - def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: - log = client.evaluate.jobs.logs.list( - job_uuid="job_uuid", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(EvaluationJobLogStream, log, path=["response"]) - - @parametrize - def test_raw_response_list(self, client: LlamaStackClient) -> None: - response = client.evaluate.jobs.logs.with_raw_response.list( - job_uuid="job_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - log = response.parse() - assert_matches_type(EvaluationJobLogStream, log, path=["response"]) - - @parametrize - def test_streaming_response_list(self, client: LlamaStackClient) -> None: - with client.evaluate.jobs.logs.with_streaming_response.list( - job_uuid="job_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - log = response.parse() - assert_matches_type(EvaluationJobLogStream, log, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncLogs: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: - log = await async_client.evaluate.jobs.logs.list( - job_uuid="job_uuid", - ) - assert_matches_type(EvaluationJobLogStream, log, path=["response"]) - - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - log = await async_client.evaluate.jobs.logs.list( - job_uuid="job_uuid", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(EvaluationJobLogStream, log, path=["response"]) - - @parametrize - async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.evaluate.jobs.logs.with_raw_response.list( - job_uuid="job_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - log = await response.parse() - assert_matches_type(EvaluationJobLogStream, log, path=["response"]) - - @parametrize - async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.evaluate.jobs.logs.with_streaming_response.list( - job_uuid="job_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - log = await response.parse() - assert_matches_type(EvaluationJobLogStream, log, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/evaluate/jobs/test_status.py b/tests/api_resources/evaluate/jobs/test_status.py deleted file mode 100755 index f11f67c8..00000000 --- a/tests/api_resources/evaluate/jobs/test_status.py +++ /dev/null @@ -1,100 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types.evaluate import EvaluationJobStatus - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestStatus: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - def test_method_list(self, client: LlamaStackClient) -> None: - status = client.evaluate.jobs.status.list( - job_uuid="job_uuid", - ) - assert_matches_type(EvaluationJobStatus, status, path=["response"]) - - @parametrize - def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: - status = client.evaluate.jobs.status.list( - job_uuid="job_uuid", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(EvaluationJobStatus, status, path=["response"]) - - @parametrize - def test_raw_response_list(self, client: LlamaStackClient) -> None: - response = client.evaluate.jobs.status.with_raw_response.list( - job_uuid="job_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - status = response.parse() - assert_matches_type(EvaluationJobStatus, status, path=["response"]) - - @parametrize - def test_streaming_response_list(self, client: LlamaStackClient) -> None: - with client.evaluate.jobs.status.with_streaming_response.list( - job_uuid="job_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - status = response.parse() - assert_matches_type(EvaluationJobStatus, status, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncStatus: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: - status = await async_client.evaluate.jobs.status.list( - job_uuid="job_uuid", - ) - assert_matches_type(EvaluationJobStatus, status, path=["response"]) - - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - status = await async_client.evaluate.jobs.status.list( - job_uuid="job_uuid", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(EvaluationJobStatus, status, path=["response"]) - - @parametrize - async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.evaluate.jobs.status.with_raw_response.list( - job_uuid="job_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - status = await response.parse() - assert_matches_type(EvaluationJobStatus, status, path=["response"]) - - @parametrize - async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.evaluate.jobs.status.with_streaming_response.list( - job_uuid="job_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - status = await response.parse() - assert_matches_type(EvaluationJobStatus, status, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/evaluate/test_jobs.py b/tests/api_resources/evaluate/test_jobs.py deleted file mode 100644 index 6a60f944..00000000 --- a/tests/api_resources/evaluate/test_jobs.py +++ /dev/null @@ -1,259 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, Optional, cast - -import pytest - -from tests.utils import assert_matches_type -from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types.evaluate import ( - JobStatus, - EvaluateResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestJobs: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - def test_method_cancel(self, client: LlamaStackClient) -> None: - job = client.evaluate.jobs.cancel( - job_id="job_id", - ) - assert job is None - - @parametrize - def test_method_cancel_with_all_params(self, client: LlamaStackClient) -> None: - job = client.evaluate.jobs.cancel( - job_id="job_id", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert job is None - - @parametrize - def test_raw_response_cancel(self, client: LlamaStackClient) -> None: - response = client.evaluate.jobs.with_raw_response.cancel( - job_id="job_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = response.parse() - assert job is None - - @parametrize - def test_streaming_response_cancel(self, client: LlamaStackClient) -> None: - with client.evaluate.jobs.with_streaming_response.cancel( - job_id="job_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = response.parse() - assert job is None - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_method_result(self, client: LlamaStackClient) -> None: - job = client.evaluate.jobs.result( - job_id="job_id", - ) - assert_matches_type(EvaluateResponse, job, path=["response"]) - - @parametrize - def test_method_result_with_all_params(self, client: LlamaStackClient) -> None: - job = client.evaluate.jobs.result( - job_id="job_id", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(EvaluateResponse, job, path=["response"]) - - @parametrize - def test_raw_response_result(self, client: LlamaStackClient) -> None: - response = client.evaluate.jobs.with_raw_response.result( - job_id="job_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = response.parse() - assert_matches_type(EvaluateResponse, job, path=["response"]) - - @parametrize - def test_streaming_response_result(self, client: LlamaStackClient) -> None: - with client.evaluate.jobs.with_streaming_response.result( - job_id="job_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = response.parse() - assert_matches_type(EvaluateResponse, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_method_status(self, client: LlamaStackClient) -> None: - job = client.evaluate.jobs.status( - job_id="job_id", - ) - assert_matches_type(Optional[JobStatus], job, path=["response"]) - - @parametrize - def test_method_status_with_all_params(self, client: LlamaStackClient) -> None: - job = client.evaluate.jobs.status( - job_id="job_id", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(Optional[JobStatus], job, path=["response"]) - - @parametrize - def test_raw_response_status(self, client: LlamaStackClient) -> None: - response = client.evaluate.jobs.with_raw_response.status( - job_id="job_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = response.parse() - assert_matches_type(Optional[JobStatus], job, path=["response"]) - - @parametrize - def test_streaming_response_status(self, client: LlamaStackClient) -> None: - with client.evaluate.jobs.with_streaming_response.status( - job_id="job_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = response.parse() - assert_matches_type(Optional[JobStatus], job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncJobs: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - async def test_method_cancel(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.evaluate.jobs.cancel( - job_id="job_id", - ) - assert job is None - - @parametrize - async def test_method_cancel_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.evaluate.jobs.cancel( - job_id="job_id", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert job is None - - @parametrize - async def test_raw_response_cancel(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.evaluate.jobs.with_raw_response.cancel( - job_id="job_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = await response.parse() - assert job is None - - @parametrize - async def test_streaming_response_cancel(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.evaluate.jobs.with_streaming_response.cancel( - job_id="job_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = await response.parse() - assert job is None - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_method_result(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.evaluate.jobs.result( - job_id="job_id", - ) - assert_matches_type(EvaluateResponse, job, path=["response"]) - - @parametrize - async def test_method_result_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.evaluate.jobs.result( - job_id="job_id", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(EvaluateResponse, job, path=["response"]) - - @parametrize - async def test_raw_response_result(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.evaluate.jobs.with_raw_response.result( - job_id="job_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = await response.parse() - assert_matches_type(EvaluateResponse, job, path=["response"]) - - @parametrize - async def test_streaming_response_result(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.evaluate.jobs.with_streaming_response.result( - job_id="job_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = await response.parse() - assert_matches_type(EvaluateResponse, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_method_status(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.evaluate.jobs.status( - job_id="job_id", - ) - assert_matches_type(Optional[JobStatus], job, path=["response"]) - - @parametrize - async def test_method_status_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.evaluate.jobs.status( - job_id="job_id", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(Optional[JobStatus], job, path=["response"]) - - @parametrize - async def test_raw_response_status(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.evaluate.jobs.with_raw_response.status( - job_id="job_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = await response.parse() - assert_matches_type(Optional[JobStatus], job, path=["response"]) - - @parametrize - async def test_streaming_response_status(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.evaluate.jobs.with_streaming_response.status( - job_id="job_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = await response.parse() - assert_matches_type(Optional[JobStatus], job, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/evaluate/test_question_answering.py b/tests/api_resources/evaluate/test_question_answering.py deleted file mode 100644 index 4b5e88e1..00000000 --- a/tests/api_resources/evaluate/test_question_answering.py +++ /dev/null @@ -1,100 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types import EvaluationJob - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestQuestionAnswering: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - def test_method_create(self, client: LlamaStackClient) -> None: - question_answering = client.evaluate.question_answering.create( - metrics=["em", "f1"], - ) - assert_matches_type(EvaluationJob, question_answering, path=["response"]) - - @parametrize - def test_method_create_with_all_params(self, client: LlamaStackClient) -> None: - question_answering = client.evaluate.question_answering.create( - metrics=["em", "f1"], - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(EvaluationJob, question_answering, path=["response"]) - - @parametrize - def test_raw_response_create(self, client: LlamaStackClient) -> None: - response = client.evaluate.question_answering.with_raw_response.create( - metrics=["em", "f1"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - question_answering = response.parse() - assert_matches_type(EvaluationJob, question_answering, path=["response"]) - - @parametrize - def test_streaming_response_create(self, client: LlamaStackClient) -> None: - with client.evaluate.question_answering.with_streaming_response.create( - metrics=["em", "f1"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - question_answering = response.parse() - assert_matches_type(EvaluationJob, question_answering, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncQuestionAnswering: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None: - question_answering = await async_client.evaluate.question_answering.create( - metrics=["em", "f1"], - ) - assert_matches_type(EvaluationJob, question_answering, path=["response"]) - - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - question_answering = await async_client.evaluate.question_answering.create( - metrics=["em", "f1"], - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(EvaluationJob, question_answering, path=["response"]) - - @parametrize - async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.evaluate.question_answering.with_raw_response.create( - metrics=["em", "f1"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - question_answering = await response.parse() - assert_matches_type(EvaluationJob, question_answering, path=["response"]) - - @parametrize - async def test_streaming_response_create(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.evaluate.question_answering.with_streaming_response.create( - metrics=["em", "f1"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - question_answering = await response.parse() - assert_matches_type(EvaluationJob, question_answering, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/inference/__init__.py b/tests/api_resources/inference/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/inference/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/inference/test_embeddings.py b/tests/api_resources/inference/test_embeddings.py deleted file mode 100644 index dac6d391..00000000 --- a/tests/api_resources/inference/test_embeddings.py +++ /dev/null @@ -1,108 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types.inference import Embeddings - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestEmbeddings: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - def test_method_create(self, client: LlamaStackClient) -> None: - embedding = client.inference.embeddings.create( - contents=["string", "string", "string"], - model="model", - ) - assert_matches_type(Embeddings, embedding, path=["response"]) - - @parametrize - def test_method_create_with_all_params(self, client: LlamaStackClient) -> None: - embedding = client.inference.embeddings.create( - contents=["string", "string", "string"], - model="model", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(Embeddings, embedding, path=["response"]) - - @parametrize - def test_raw_response_create(self, client: LlamaStackClient) -> None: - response = client.inference.embeddings.with_raw_response.create( - contents=["string", "string", "string"], - model="model", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - embedding = response.parse() - assert_matches_type(Embeddings, embedding, path=["response"]) - - @parametrize - def test_streaming_response_create(self, client: LlamaStackClient) -> None: - with client.inference.embeddings.with_streaming_response.create( - contents=["string", "string", "string"], - model="model", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - embedding = response.parse() - assert_matches_type(Embeddings, embedding, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncEmbeddings: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None: - embedding = await async_client.inference.embeddings.create( - contents=["string", "string", "string"], - model="model", - ) - assert_matches_type(Embeddings, embedding, path=["response"]) - - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - embedding = await async_client.inference.embeddings.create( - contents=["string", "string", "string"], - model="model", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(Embeddings, embedding, path=["response"]) - - @parametrize - async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.inference.embeddings.with_raw_response.create( - contents=["string", "string", "string"], - model="model", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - embedding = await response.parse() - assert_matches_type(Embeddings, embedding, path=["response"]) - - @parametrize - async def test_streaming_response_create(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.inference.embeddings.with_streaming_response.create( - contents=["string", "string", "string"], - model="model", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - embedding = await response.parse() - assert_matches_type(Embeddings, embedding, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/memory/__init__.py b/tests/api_resources/memory/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/memory/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/memory/test_documents.py b/tests/api_resources/memory/test_documents.py deleted file mode 100644 index 765aa1a2..00000000 --- a/tests/api_resources/memory/test_documents.py +++ /dev/null @@ -1,218 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types.memory import DocumentRetrieveResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestDocuments: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) - @parametrize - def test_method_retrieve(self, client: LlamaStackClient) -> None: - document = client.memory.documents.retrieve( - bank_id="bank_id", - document_ids=["string", "string", "string"], - ) - assert_matches_type(DocumentRetrieveResponse, document, path=["response"]) - - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) - @parametrize - def test_method_retrieve_with_all_params(self, client: LlamaStackClient) -> None: - document = client.memory.documents.retrieve( - bank_id="bank_id", - document_ids=["string", "string", "string"], - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(DocumentRetrieveResponse, document, path=["response"]) - - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) - @parametrize - def test_raw_response_retrieve(self, client: LlamaStackClient) -> None: - response = client.memory.documents.with_raw_response.retrieve( - bank_id="bank_id", - document_ids=["string", "string", "string"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - document = response.parse() - assert_matches_type(DocumentRetrieveResponse, document, path=["response"]) - - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) - @parametrize - def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: - with client.memory.documents.with_streaming_response.retrieve( - bank_id="bank_id", - document_ids=["string", "string", "string"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - document = response.parse() - assert_matches_type(DocumentRetrieveResponse, document, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_method_delete(self, client: LlamaStackClient) -> None: - document = client.memory.documents.delete( - bank_id="bank_id", - document_ids=["string", "string", "string"], - ) - assert document is None - - @parametrize - def test_method_delete_with_all_params(self, client: LlamaStackClient) -> None: - document = client.memory.documents.delete( - bank_id="bank_id", - document_ids=["string", "string", "string"], - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert document is None - - @parametrize - def test_raw_response_delete(self, client: LlamaStackClient) -> None: - response = client.memory.documents.with_raw_response.delete( - bank_id="bank_id", - document_ids=["string", "string", "string"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - document = response.parse() - assert document is None - - @parametrize - def test_streaming_response_delete(self, client: LlamaStackClient) -> None: - with client.memory.documents.with_streaming_response.delete( - bank_id="bank_id", - document_ids=["string", "string", "string"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - document = response.parse() - assert document is None - - assert cast(Any, response.is_closed) is True - - -class TestAsyncDocuments: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) - @parametrize - async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None: - document = await async_client.memory.documents.retrieve( - bank_id="bank_id", - document_ids=["string", "string", "string"], - ) - assert_matches_type(DocumentRetrieveResponse, document, path=["response"]) - - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) - @parametrize - async def test_method_retrieve_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - document = await async_client.memory.documents.retrieve( - bank_id="bank_id", - document_ids=["string", "string", "string"], - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(DocumentRetrieveResponse, document, path=["response"]) - - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.memory.documents.with_raw_response.retrieve( - bank_id="bank_id", - document_ids=["string", "string", "string"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - document = await response.parse() - assert_matches_type(DocumentRetrieveResponse, document, path=["response"]) - - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.memory.documents.with_streaming_response.retrieve( - bank_id="bank_id", - document_ids=["string", "string", "string"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - document = await response.parse() - assert_matches_type(DocumentRetrieveResponse, document, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_method_delete(self, async_client: AsyncLlamaStackClient) -> None: - document = await async_client.memory.documents.delete( - bank_id="bank_id", - document_ids=["string", "string", "string"], - ) - assert document is None - - @parametrize - async def test_method_delete_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - document = await async_client.memory.documents.delete( - bank_id="bank_id", - document_ids=["string", "string", "string"], - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert document is None - - @parametrize - async def test_raw_response_delete(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.memory.documents.with_raw_response.delete( - bank_id="bank_id", - document_ids=["string", "string", "string"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - document = await response.parse() - assert document is None - - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.memory.documents.with_streaming_response.delete( - bank_id="bank_id", - document_ids=["string", "string", "string"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - document = await response.parse() - assert document is None - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/post_training/test_jobs.py b/tests/api_resources/post_training/test_jobs.py deleted file mode 100644 index 400f5aa4..00000000 --- a/tests/api_resources/post_training/test_jobs.py +++ /dev/null @@ -1,427 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types import PostTrainingJob -from llama_stack_client.types.post_training import ( - PostTrainingJobStatus, - PostTrainingJobArtifacts, - PostTrainingJobLogStream, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestJobs: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) - @parametrize - def test_method_list(self, client: LlamaStackClient) -> None: - job = client.post_training.jobs.list() - assert_matches_type(PostTrainingJob, job, path=["response"]) - - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) - @parametrize - def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: - job = client.post_training.jobs.list( - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(PostTrainingJob, job, path=["response"]) - - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) - @parametrize - def test_raw_response_list(self, client: LlamaStackClient) -> None: - response = client.post_training.jobs.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = response.parse() - assert_matches_type(PostTrainingJob, job, path=["response"]) - - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) - @parametrize - def test_streaming_response_list(self, client: LlamaStackClient) -> None: - with client.post_training.jobs.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = response.parse() - assert_matches_type(PostTrainingJob, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_method_artifacts(self, client: LlamaStackClient) -> None: - job = client.post_training.jobs.artifacts( - job_uuid="job_uuid", - ) - assert_matches_type(PostTrainingJobArtifacts, job, path=["response"]) - - @parametrize - def test_method_artifacts_with_all_params(self, client: LlamaStackClient) -> None: - job = client.post_training.jobs.artifacts( - job_uuid="job_uuid", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(PostTrainingJobArtifacts, job, path=["response"]) - - @parametrize - def test_raw_response_artifacts(self, client: LlamaStackClient) -> None: - response = client.post_training.jobs.with_raw_response.artifacts( - job_uuid="job_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = response.parse() - assert_matches_type(PostTrainingJobArtifacts, job, path=["response"]) - - @parametrize - def test_streaming_response_artifacts(self, client: LlamaStackClient) -> None: - with client.post_training.jobs.with_streaming_response.artifacts( - job_uuid="job_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = response.parse() - assert_matches_type(PostTrainingJobArtifacts, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_method_cancel(self, client: LlamaStackClient) -> None: - job = client.post_training.jobs.cancel( - job_uuid="job_uuid", - ) - assert job is None - - @parametrize - def test_method_cancel_with_all_params(self, client: LlamaStackClient) -> None: - job = client.post_training.jobs.cancel( - job_uuid="job_uuid", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert job is None - - @parametrize - def test_raw_response_cancel(self, client: LlamaStackClient) -> None: - response = client.post_training.jobs.with_raw_response.cancel( - job_uuid="job_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = response.parse() - assert job is None - - @parametrize - def test_streaming_response_cancel(self, client: LlamaStackClient) -> None: - with client.post_training.jobs.with_streaming_response.cancel( - job_uuid="job_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = response.parse() - assert job is None - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_method_logs(self, client: LlamaStackClient) -> None: - job = client.post_training.jobs.logs( - job_uuid="job_uuid", - ) - assert_matches_type(PostTrainingJobLogStream, job, path=["response"]) - - @parametrize - def test_method_logs_with_all_params(self, client: LlamaStackClient) -> None: - job = client.post_training.jobs.logs( - job_uuid="job_uuid", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(PostTrainingJobLogStream, job, path=["response"]) - - @parametrize - def test_raw_response_logs(self, client: LlamaStackClient) -> None: - response = client.post_training.jobs.with_raw_response.logs( - job_uuid="job_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = response.parse() - assert_matches_type(PostTrainingJobLogStream, job, path=["response"]) - - @parametrize - def test_streaming_response_logs(self, client: LlamaStackClient) -> None: - with client.post_training.jobs.with_streaming_response.logs( - job_uuid="job_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = response.parse() - assert_matches_type(PostTrainingJobLogStream, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_method_status(self, client: LlamaStackClient) -> None: - job = client.post_training.jobs.status( - job_uuid="job_uuid", - ) - assert_matches_type(PostTrainingJobStatus, job, path=["response"]) - - @parametrize - def test_method_status_with_all_params(self, client: LlamaStackClient) -> None: - job = client.post_training.jobs.status( - job_uuid="job_uuid", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(PostTrainingJobStatus, job, path=["response"]) - - @parametrize - def test_raw_response_status(self, client: LlamaStackClient) -> None: - response = client.post_training.jobs.with_raw_response.status( - job_uuid="job_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = response.parse() - assert_matches_type(PostTrainingJobStatus, job, path=["response"]) - - @parametrize - def test_streaming_response_status(self, client: LlamaStackClient) -> None: - with client.post_training.jobs.with_streaming_response.status( - job_uuid="job_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = response.parse() - assert_matches_type(PostTrainingJobStatus, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncJobs: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) - @parametrize - async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.post_training.jobs.list() - assert_matches_type(PostTrainingJob, job, path=["response"]) - - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.post_training.jobs.list( - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(PostTrainingJob, job, path=["response"]) - - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) - @parametrize - async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.post_training.jobs.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = await response.parse() - assert_matches_type(PostTrainingJob, job, path=["response"]) - - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) - @parametrize - async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.post_training.jobs.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = await response.parse() - assert_matches_type(PostTrainingJob, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_method_artifacts(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.post_training.jobs.artifacts( - job_uuid="job_uuid", - ) - assert_matches_type(PostTrainingJobArtifacts, job, path=["response"]) - - @parametrize - async def test_method_artifacts_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.post_training.jobs.artifacts( - job_uuid="job_uuid", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(PostTrainingJobArtifacts, job, path=["response"]) - - @parametrize - async def test_raw_response_artifacts(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.post_training.jobs.with_raw_response.artifacts( - job_uuid="job_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = await response.parse() - assert_matches_type(PostTrainingJobArtifacts, job, path=["response"]) - - @parametrize - async def test_streaming_response_artifacts(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.post_training.jobs.with_streaming_response.artifacts( - job_uuid="job_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = await response.parse() - assert_matches_type(PostTrainingJobArtifacts, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_method_cancel(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.post_training.jobs.cancel( - job_uuid="job_uuid", - ) - assert job is None - - @parametrize - async def test_method_cancel_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.post_training.jobs.cancel( - job_uuid="job_uuid", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert job is None - - @parametrize - async def test_raw_response_cancel(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.post_training.jobs.with_raw_response.cancel( - job_uuid="job_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = await response.parse() - assert job is None - - @parametrize - async def test_streaming_response_cancel(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.post_training.jobs.with_streaming_response.cancel( - job_uuid="job_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = await response.parse() - assert job is None - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_method_logs(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.post_training.jobs.logs( - job_uuid="job_uuid", - ) - assert_matches_type(PostTrainingJobLogStream, job, path=["response"]) - - @parametrize - async def test_method_logs_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.post_training.jobs.logs( - job_uuid="job_uuid", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(PostTrainingJobLogStream, job, path=["response"]) - - @parametrize - async def test_raw_response_logs(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.post_training.jobs.with_raw_response.logs( - job_uuid="job_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = await response.parse() - assert_matches_type(PostTrainingJobLogStream, job, path=["response"]) - - @parametrize - async def test_streaming_response_logs(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.post_training.jobs.with_streaming_response.logs( - job_uuid="job_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = await response.parse() - assert_matches_type(PostTrainingJobLogStream, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_method_status(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.post_training.jobs.status( - job_uuid="job_uuid", - ) - assert_matches_type(PostTrainingJobStatus, job, path=["response"]) - - @parametrize - async def test_method_status_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - job = await async_client.post_training.jobs.status( - job_uuid="job_uuid", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(PostTrainingJobStatus, job, path=["response"]) - - @parametrize - async def test_raw_response_status(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.post_training.jobs.with_raw_response.status( - job_uuid="job_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = await response.parse() - assert_matches_type(PostTrainingJobStatus, job, path=["response"]) - - @parametrize - async def test_streaming_response_status(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.post_training.jobs.with_streaming_response.status( - job_uuid="job_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = await response.parse() - assert_matches_type(PostTrainingJobStatus, job, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_batch_inferences.py b/tests/api_resources/test_batch_inferences.py deleted file mode 100644 index d323e62a..00000000 --- a/tests/api_resources/test_batch_inferences.py +++ /dev/null @@ -1,675 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types import ( - BatchInferenceChatCompletionResponse, -) -from llama_stack_client.types.shared import BatchCompletion - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestBatchInferences: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - def test_method_chat_completion(self, client: LlamaStackClient) -> None: - batch_inference = client.batch_inferences.chat_completion( - messages_batch=[ - [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - ], - model="model", - ) - assert_matches_type(BatchInferenceChatCompletionResponse, batch_inference, path=["response"]) - - @parametrize - def test_method_chat_completion_with_all_params(self, client: LlamaStackClient) -> None: - batch_inference = client.batch_inferences.chat_completion( - messages_batch=[ - [ - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - ], - [ - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - ], - [ - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - ], - ], - model="model", - logprobs={"top_k": 0}, - sampling_params={ - "strategy": "greedy", - "max_tokens": 0, - "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, - }, - tool_choice="auto", - tool_prompt_format="json", - tools=[ - { - "tool_name": "brave_search", - "description": "description", - "parameters": { - "foo": { - "param_type": "param_type", - "default": True, - "description": "description", - "required": True, - } - }, - }, - { - "tool_name": "brave_search", - "description": "description", - "parameters": { - "foo": { - "param_type": "param_type", - "default": True, - "description": "description", - "required": True, - } - }, - }, - { - "tool_name": "brave_search", - "description": "description", - "parameters": { - "foo": { - "param_type": "param_type", - "default": True, - "description": "description", - "required": True, - } - }, - }, - ], - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(BatchInferenceChatCompletionResponse, batch_inference, path=["response"]) - - @parametrize - def test_raw_response_chat_completion(self, client: LlamaStackClient) -> None: - response = client.batch_inferences.with_raw_response.chat_completion( - messages_batch=[ - [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - ], - model="model", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - batch_inference = response.parse() - assert_matches_type(BatchInferenceChatCompletionResponse, batch_inference, path=["response"]) - - @parametrize - def test_streaming_response_chat_completion(self, client: LlamaStackClient) -> None: - with client.batch_inferences.with_streaming_response.chat_completion( - messages_batch=[ - [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - ], - model="model", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - batch_inference = response.parse() - assert_matches_type(BatchInferenceChatCompletionResponse, batch_inference, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_method_completion(self, client: LlamaStackClient) -> None: - batch_inference = client.batch_inferences.completion( - content_batch=["string", "string", "string"], - model="model", - ) - assert_matches_type(BatchCompletion, batch_inference, path=["response"]) - - @parametrize - def test_method_completion_with_all_params(self, client: LlamaStackClient) -> None: - batch_inference = client.batch_inferences.completion( - content_batch=["string", "string", "string"], - model="model", - logprobs={"top_k": 0}, - sampling_params={ - "strategy": "greedy", - "max_tokens": 0, - "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, - }, - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(BatchCompletion, batch_inference, path=["response"]) - - @parametrize - def test_raw_response_completion(self, client: LlamaStackClient) -> None: - response = client.batch_inferences.with_raw_response.completion( - content_batch=["string", "string", "string"], - model="model", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - batch_inference = response.parse() - assert_matches_type(BatchCompletion, batch_inference, path=["response"]) - - @parametrize - def test_streaming_response_completion(self, client: LlamaStackClient) -> None: - with client.batch_inferences.with_streaming_response.completion( - content_batch=["string", "string", "string"], - model="model", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - batch_inference = response.parse() - assert_matches_type(BatchCompletion, batch_inference, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncBatchInferences: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - async def test_method_chat_completion(self, async_client: AsyncLlamaStackClient) -> None: - batch_inference = await async_client.batch_inferences.chat_completion( - messages_batch=[ - [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - ], - model="model", - ) - assert_matches_type(BatchInferenceChatCompletionResponse, batch_inference, path=["response"]) - - @parametrize - async def test_method_chat_completion_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - batch_inference = await async_client.batch_inferences.chat_completion( - messages_batch=[ - [ - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - ], - [ - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - ], - [ - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - ], - ], - model="model", - logprobs={"top_k": 0}, - sampling_params={ - "strategy": "greedy", - "max_tokens": 0, - "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, - }, - tool_choice="auto", - tool_prompt_format="json", - tools=[ - { - "tool_name": "brave_search", - "description": "description", - "parameters": { - "foo": { - "param_type": "param_type", - "default": True, - "description": "description", - "required": True, - } - }, - }, - { - "tool_name": "brave_search", - "description": "description", - "parameters": { - "foo": { - "param_type": "param_type", - "default": True, - "description": "description", - "required": True, - } - }, - }, - { - "tool_name": "brave_search", - "description": "description", - "parameters": { - "foo": { - "param_type": "param_type", - "default": True, - "description": "description", - "required": True, - } - }, - }, - ], - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(BatchInferenceChatCompletionResponse, batch_inference, path=["response"]) - - @parametrize - async def test_raw_response_chat_completion(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.batch_inferences.with_raw_response.chat_completion( - messages_batch=[ - [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - ], - model="model", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - batch_inference = await response.parse() - assert_matches_type(BatchInferenceChatCompletionResponse, batch_inference, path=["response"]) - - @parametrize - async def test_streaming_response_chat_completion(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.batch_inferences.with_streaming_response.chat_completion( - messages_batch=[ - [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - ], - model="model", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - batch_inference = await response.parse() - assert_matches_type(BatchInferenceChatCompletionResponse, batch_inference, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_method_completion(self, async_client: AsyncLlamaStackClient) -> None: - batch_inference = await async_client.batch_inferences.completion( - content_batch=["string", "string", "string"], - model="model", - ) - assert_matches_type(BatchCompletion, batch_inference, path=["response"]) - - @parametrize - async def test_method_completion_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - batch_inference = await async_client.batch_inferences.completion( - content_batch=["string", "string", "string"], - model="model", - logprobs={"top_k": 0}, - sampling_params={ - "strategy": "greedy", - "max_tokens": 0, - "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, - }, - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(BatchCompletion, batch_inference, path=["response"]) - - @parametrize - async def test_raw_response_completion(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.batch_inferences.with_raw_response.completion( - content_batch=["string", "string", "string"], - model="model", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - batch_inference = await response.parse() - assert_matches_type(BatchCompletion, batch_inference, path=["response"]) - - @parametrize - async def test_streaming_response_completion(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.batch_inferences.with_streaming_response.completion( - content_batch=["string", "string", "string"], - model="model", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - batch_inference = await response.parse() - assert_matches_type(BatchCompletion, batch_inference, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_eval_tasks.py b/tests/api_resources/test_eval_tasks.py deleted file mode 100644 index 6ca2f2c4..00000000 --- a/tests/api_resources/test_eval_tasks.py +++ /dev/null @@ -1,246 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, Optional, cast - -import pytest - -from tests.utils import assert_matches_type -from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types import Benchmark, BenchmarkListResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestEvalTasks: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - def test_method_retrieve(self, client: LlamaStackClient) -> None: - eval_task = client.eval_tasks.retrieve( - "eval_task_id", - ) - assert_matches_type(Optional[Benchmark], eval_task, path=["response"]) - - @parametrize - def test_raw_response_retrieve(self, client: LlamaStackClient) -> None: - response = client.eval_tasks.with_raw_response.retrieve( - "eval_task_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - eval_task = response.parse() - assert_matches_type(Optional[Benchmark], eval_task, path=["response"]) - - @parametrize - def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: - with client.eval_tasks.with_streaming_response.retrieve( - "eval_task_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - eval_task = response.parse() - assert_matches_type(Optional[Benchmark], eval_task, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_retrieve(self, client: LlamaStackClient) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `eval_task_id` but received ''"): - client.eval_tasks.with_raw_response.retrieve( - "", - ) - - @parametrize - def test_method_list(self, client: LlamaStackClient) -> None: - eval_task = client.eval_tasks.list() - assert_matches_type(BenchmarkListResponse, eval_task, path=["response"]) - - @parametrize - def test_raw_response_list(self, client: LlamaStackClient) -> None: - response = client.eval_tasks.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - eval_task = response.parse() - assert_matches_type(BenchmarkListResponse, eval_task, path=["response"]) - - @parametrize - def test_streaming_response_list(self, client: LlamaStackClient) -> None: - with client.eval_tasks.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - eval_task = response.parse() - assert_matches_type(BenchmarkListResponse, eval_task, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_method_register(self, client: LlamaStackClient) -> None: - eval_task = client.eval_tasks.register( - dataset_id="dataset_id", - eval_task_id="eval_task_id", - scoring_functions=["string"], - ) - assert eval_task is None - - @parametrize - def test_method_register_with_all_params(self, client: LlamaStackClient) -> None: - eval_task = client.eval_tasks.register( - dataset_id="dataset_id", - eval_task_id="eval_task_id", - scoring_functions=["string"], - metadata={"foo": True}, - provider_benchmark_id="provider_benchmark_id", - provider_id="provider_id", - ) - assert eval_task is None - - @parametrize - def test_raw_response_register(self, client: LlamaStackClient) -> None: - response = client.eval_tasks.with_raw_response.register( - dataset_id="dataset_id", - eval_task_id="eval_task_id", - scoring_functions=["string"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - eval_task = response.parse() - assert eval_task is None - - @parametrize - def test_streaming_response_register(self, client: LlamaStackClient) -> None: - with client.eval_tasks.with_streaming_response.register( - dataset_id="dataset_id", - eval_task_id="eval_task_id", - scoring_functions=["string"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - eval_task = response.parse() - assert eval_task is None - - assert cast(Any, response.is_closed) is True - - -class TestAsyncEvalTasks: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None: - eval_task = await async_client.eval_tasks.retrieve( - "eval_task_id", - ) - assert_matches_type(Optional[Benchmark], eval_task, path=["response"]) - - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.eval_tasks.with_raw_response.retrieve( - "eval_task_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - eval_task = await response.parse() - assert_matches_type(Optional[Benchmark], eval_task, path=["response"]) - - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.eval_tasks.with_streaming_response.retrieve( - "eval_task_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - eval_task = await response.parse() - assert_matches_type(Optional[Benchmark], eval_task, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `eval_task_id` but received ''"): - await async_client.eval_tasks.with_raw_response.retrieve( - "", - ) - - @parametrize - async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: - eval_task = await async_client.eval_tasks.list() - assert_matches_type(BenchmarkListResponse, eval_task, path=["response"]) - - @parametrize - async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.eval_tasks.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - eval_task = await response.parse() - assert_matches_type(BenchmarkListResponse, eval_task, path=["response"]) - - @parametrize - async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.eval_tasks.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - eval_task = await response.parse() - assert_matches_type(BenchmarkListResponse, eval_task, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_method_register(self, async_client: AsyncLlamaStackClient) -> None: - eval_task = await async_client.eval_tasks.register( - dataset_id="dataset_id", - eval_task_id="eval_task_id", - scoring_functions=["string"], - ) - assert eval_task is None - - @parametrize - async def test_method_register_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - eval_task = await async_client.eval_tasks.register( - dataset_id="dataset_id", - eval_task_id="eval_task_id", - scoring_functions=["string"], - metadata={"foo": True}, - provider_benchmark_id="provider_benchmark_id", - provider_id="provider_id", - ) - assert eval_task is None - - @parametrize - async def test_raw_response_register(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.eval_tasks.with_raw_response.register( - dataset_id="dataset_id", - eval_task_id="eval_task_id", - scoring_functions=["string"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - eval_task = await response.parse() - assert eval_task is None - - @parametrize - async def test_streaming_response_register(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.eval_tasks.with_streaming_response.register( - dataset_id="dataset_id", - eval_task_id="eval_task_id", - scoring_functions=["string"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - eval_task = await response.parse() - assert eval_task is None - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_evaluate.py b/tests/api_resources/test_evaluate.py deleted file mode 100644 index 0c8283f5..00000000 --- a/tests/api_resources/test_evaluate.py +++ /dev/null @@ -1,319 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types import Job -from llama_stack_client.types.evaluate import EvaluateResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestEvaluate: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - def test_method_evaluate(self, client: LlamaStackClient) -> None: - evaluate = client.evaluate.evaluate( - candidate={ - "model": "model", - "sampling_params": {"strategy": "greedy"}, - "type": "model", - }, - input_rows=[{"foo": True}, {"foo": True}, {"foo": True}], - scoring_functions=["string", "string", "string"], - ) - assert_matches_type(EvaluateResponse, evaluate, path=["response"]) - - @parametrize - def test_method_evaluate_with_all_params(self, client: LlamaStackClient) -> None: - evaluate = client.evaluate.evaluate( - candidate={ - "model": "model", - "sampling_params": { - "strategy": "greedy", - "max_tokens": 0, - "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, - }, - "type": "model", - "system_message": { - "content": "string", - "role": "system", - }, - }, - input_rows=[{"foo": True}, {"foo": True}, {"foo": True}], - scoring_functions=["string", "string", "string"], - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(EvaluateResponse, evaluate, path=["response"]) - - @parametrize - def test_raw_response_evaluate(self, client: LlamaStackClient) -> None: - response = client.evaluate.with_raw_response.evaluate( - candidate={ - "model": "model", - "sampling_params": {"strategy": "greedy"}, - "type": "model", - }, - input_rows=[{"foo": True}, {"foo": True}, {"foo": True}], - scoring_functions=["string", "string", "string"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - evaluate = response.parse() - assert_matches_type(EvaluateResponse, evaluate, path=["response"]) - - @parametrize - def test_streaming_response_evaluate(self, client: LlamaStackClient) -> None: - with client.evaluate.with_streaming_response.evaluate( - candidate={ - "model": "model", - "sampling_params": {"strategy": "greedy"}, - "type": "model", - }, - input_rows=[{"foo": True}, {"foo": True}, {"foo": True}], - scoring_functions=["string", "string", "string"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - evaluate = response.parse() - assert_matches_type(EvaluateResponse, evaluate, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_method_evaluate_batch(self, client: LlamaStackClient) -> None: - evaluate = client.evaluate.evaluate_batch( - candidate={ - "model": "model", - "sampling_params": {"strategy": "greedy"}, - "type": "model", - }, - dataset_id="dataset_id", - scoring_functions=["string", "string", "string"], - ) - assert_matches_type(Job, evaluate, path=["response"]) - - @parametrize - def test_method_evaluate_batch_with_all_params(self, client: LlamaStackClient) -> None: - evaluate = client.evaluate.evaluate_batch( - candidate={ - "model": "model", - "sampling_params": { - "strategy": "greedy", - "max_tokens": 0, - "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, - }, - "type": "model", - "system_message": { - "content": "string", - "role": "system", - }, - }, - dataset_id="dataset_id", - scoring_functions=["string", "string", "string"], - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(Job, evaluate, path=["response"]) - - @parametrize - def test_raw_response_evaluate_batch(self, client: LlamaStackClient) -> None: - response = client.evaluate.with_raw_response.evaluate_batch( - candidate={ - "model": "model", - "sampling_params": {"strategy": "greedy"}, - "type": "model", - }, - dataset_id="dataset_id", - scoring_functions=["string", "string", "string"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - evaluate = response.parse() - assert_matches_type(Job, evaluate, path=["response"]) - - @parametrize - def test_streaming_response_evaluate_batch(self, client: LlamaStackClient) -> None: - with client.evaluate.with_streaming_response.evaluate_batch( - candidate={ - "model": "model", - "sampling_params": {"strategy": "greedy"}, - "type": "model", - }, - dataset_id="dataset_id", - scoring_functions=["string", "string", "string"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - evaluate = response.parse() - assert_matches_type(Job, evaluate, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncEvaluate: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - async def test_method_evaluate(self, async_client: AsyncLlamaStackClient) -> None: - evaluate = await async_client.evaluate.evaluate( - candidate={ - "model": "model", - "sampling_params": {"strategy": "greedy"}, - "type": "model", - }, - input_rows=[{"foo": True}, {"foo": True}, {"foo": True}], - scoring_functions=["string", "string", "string"], - ) - assert_matches_type(EvaluateResponse, evaluate, path=["response"]) - - @parametrize - async def test_method_evaluate_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - evaluate = await async_client.evaluate.evaluate( - candidate={ - "model": "model", - "sampling_params": { - "strategy": "greedy", - "max_tokens": 0, - "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, - }, - "type": "model", - "system_message": { - "content": "string", - "role": "system", - }, - }, - input_rows=[{"foo": True}, {"foo": True}, {"foo": True}], - scoring_functions=["string", "string", "string"], - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(EvaluateResponse, evaluate, path=["response"]) - - @parametrize - async def test_raw_response_evaluate(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.evaluate.with_raw_response.evaluate( - candidate={ - "model": "model", - "sampling_params": {"strategy": "greedy"}, - "type": "model", - }, - input_rows=[{"foo": True}, {"foo": True}, {"foo": True}], - scoring_functions=["string", "string", "string"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - evaluate = await response.parse() - assert_matches_type(EvaluateResponse, evaluate, path=["response"]) - - @parametrize - async def test_streaming_response_evaluate(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.evaluate.with_streaming_response.evaluate( - candidate={ - "model": "model", - "sampling_params": {"strategy": "greedy"}, - "type": "model", - }, - input_rows=[{"foo": True}, {"foo": True}, {"foo": True}], - scoring_functions=["string", "string", "string"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - evaluate = await response.parse() - assert_matches_type(EvaluateResponse, evaluate, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_method_evaluate_batch(self, async_client: AsyncLlamaStackClient) -> None: - evaluate = await async_client.evaluate.evaluate_batch( - candidate={ - "model": "model", - "sampling_params": {"strategy": "greedy"}, - "type": "model", - }, - dataset_id="dataset_id", - scoring_functions=["string", "string", "string"], - ) - assert_matches_type(Job, evaluate, path=["response"]) - - @parametrize - async def test_method_evaluate_batch_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - evaluate = await async_client.evaluate.evaluate_batch( - candidate={ - "model": "model", - "sampling_params": { - "strategy": "greedy", - "max_tokens": 0, - "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, - }, - "type": "model", - "system_message": { - "content": "string", - "role": "system", - }, - }, - dataset_id="dataset_id", - scoring_functions=["string", "string", "string"], - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(Job, evaluate, path=["response"]) - - @parametrize - async def test_raw_response_evaluate_batch(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.evaluate.with_raw_response.evaluate_batch( - candidate={ - "model": "model", - "sampling_params": {"strategy": "greedy"}, - "type": "model", - }, - dataset_id="dataset_id", - scoring_functions=["string", "string", "string"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - evaluate = await response.parse() - assert_matches_type(Job, evaluate, path=["response"]) - - @parametrize - async def test_streaming_response_evaluate_batch(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.evaluate.with_streaming_response.evaluate_batch( - candidate={ - "model": "model", - "sampling_params": {"strategy": "greedy"}, - "type": "model", - }, - dataset_id="dataset_id", - scoring_functions=["string", "string", "string"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - evaluate = await response.parse() - assert_matches_type(Job, evaluate, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_evaluations.py b/tests/api_resources/test_evaluations.py deleted file mode 100644 index 8291396c..00000000 --- a/tests/api_resources/test_evaluations.py +++ /dev/null @@ -1,100 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types import EvaluationJob - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestEvaluations: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - def test_method_text_generation(self, client: LlamaStackClient) -> None: - evaluation = client.evaluations.text_generation( - metrics=["perplexity", "rouge", "bleu"], - ) - assert_matches_type(EvaluationJob, evaluation, path=["response"]) - - @parametrize - def test_method_text_generation_with_all_params(self, client: LlamaStackClient) -> None: - evaluation = client.evaluations.text_generation( - metrics=["perplexity", "rouge", "bleu"], - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(EvaluationJob, evaluation, path=["response"]) - - @parametrize - def test_raw_response_text_generation(self, client: LlamaStackClient) -> None: - response = client.evaluations.with_raw_response.text_generation( - metrics=["perplexity", "rouge", "bleu"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - evaluation = response.parse() - assert_matches_type(EvaluationJob, evaluation, path=["response"]) - - @parametrize - def test_streaming_response_text_generation(self, client: LlamaStackClient) -> None: - with client.evaluations.with_streaming_response.text_generation( - metrics=["perplexity", "rouge", "bleu"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - evaluation = response.parse() - assert_matches_type(EvaluationJob, evaluation, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncEvaluations: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - async def test_method_text_generation(self, async_client: AsyncLlamaStackClient) -> None: - evaluation = await async_client.evaluations.text_generation( - metrics=["perplexity", "rouge", "bleu"], - ) - assert_matches_type(EvaluationJob, evaluation, path=["response"]) - - @parametrize - async def test_method_text_generation_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - evaluation = await async_client.evaluations.text_generation( - metrics=["perplexity", "rouge", "bleu"], - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(EvaluationJob, evaluation, path=["response"]) - - @parametrize - async def test_raw_response_text_generation(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.evaluations.with_raw_response.text_generation( - metrics=["perplexity", "rouge", "bleu"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - evaluation = await response.parse() - assert_matches_type(EvaluationJob, evaluation, path=["response"]) - - @parametrize - async def test_streaming_response_text_generation(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.evaluations.with_streaming_response.text_generation( - metrics=["perplexity", "rouge", "bleu"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - evaluation = await response.parse() - assert_matches_type(EvaluationJob, evaluation, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_memory.py b/tests/api_resources/test_memory.py deleted file mode 100644 index b403620b..00000000 --- a/tests/api_resources/test_memory.py +++ /dev/null @@ -1,252 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types import QueryDocumentsResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestMemory: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - def test_method_insert(self, client: LlamaStackClient) -> None: - memory = client.memory.insert( - bank_id="bank_id", - documents=[ - { - "content": "string", - "document_id": "document_id", - "metadata": {"foo": True}, - } - ], - ) - assert memory is None - - @parametrize - def test_method_insert_with_all_params(self, client: LlamaStackClient) -> None: - memory = client.memory.insert( - bank_id="bank_id", - documents=[ - { - "content": "string", - "document_id": "document_id", - "metadata": {"foo": True}, - "mime_type": "mime_type", - } - ], - ttl_seconds=0, - x_llama_stack_client_version="X-LlamaStack-Client-Version", - x_llama_stack_provider_data="X-LlamaStack-Provider-Data", - ) - assert memory is None - - @parametrize - def test_raw_response_insert(self, client: LlamaStackClient) -> None: - response = client.memory.with_raw_response.insert( - bank_id="bank_id", - documents=[ - { - "content": "string", - "document_id": "document_id", - "metadata": {"foo": True}, - } - ], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - memory = response.parse() - assert memory is None - - @parametrize - def test_streaming_response_insert(self, client: LlamaStackClient) -> None: - with client.memory.with_streaming_response.insert( - bank_id="bank_id", - documents=[ - { - "content": "string", - "document_id": "document_id", - "metadata": {"foo": True}, - } - ], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - memory = response.parse() - assert memory is None - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_method_query(self, client: LlamaStackClient) -> None: - memory = client.memory.query( - bank_id="bank_id", - query="string", - ) - assert_matches_type(QueryDocumentsResponse, memory, path=["response"]) - - @parametrize - def test_method_query_with_all_params(self, client: LlamaStackClient) -> None: - memory = client.memory.query( - bank_id="bank_id", - query="string", - params={"foo": True}, - x_llama_stack_client_version="X-LlamaStack-Client-Version", - x_llama_stack_provider_data="X-LlamaStack-Provider-Data", - ) - assert_matches_type(QueryDocumentsResponse, memory, path=["response"]) - - @parametrize - def test_raw_response_query(self, client: LlamaStackClient) -> None: - response = client.memory.with_raw_response.query( - bank_id="bank_id", - query="string", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - memory = response.parse() - assert_matches_type(QueryDocumentsResponse, memory, path=["response"]) - - @parametrize - def test_streaming_response_query(self, client: LlamaStackClient) -> None: - with client.memory.with_streaming_response.query( - bank_id="bank_id", - query="string", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - memory = response.parse() - assert_matches_type(QueryDocumentsResponse, memory, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncMemory: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - async def test_method_insert(self, async_client: AsyncLlamaStackClient) -> None: - memory = await async_client.memory.insert( - bank_id="bank_id", - documents=[ - { - "content": "string", - "document_id": "document_id", - "metadata": {"foo": True}, - } - ], - ) - assert memory is None - - @parametrize - async def test_method_insert_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - memory = await async_client.memory.insert( - bank_id="bank_id", - documents=[ - { - "content": "string", - "document_id": "document_id", - "metadata": {"foo": True}, - "mime_type": "mime_type", - } - ], - ttl_seconds=0, - x_llama_stack_client_version="X-LlamaStack-Client-Version", - x_llama_stack_provider_data="X-LlamaStack-Provider-Data", - ) - assert memory is None - - @parametrize - async def test_raw_response_insert(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.memory.with_raw_response.insert( - bank_id="bank_id", - documents=[ - { - "content": "string", - "document_id": "document_id", - "metadata": {"foo": True}, - } - ], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - memory = await response.parse() - assert memory is None - - @parametrize - async def test_streaming_response_insert(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.memory.with_streaming_response.insert( - bank_id="bank_id", - documents=[ - { - "content": "string", - "document_id": "document_id", - "metadata": {"foo": True}, - } - ], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - memory = await response.parse() - assert memory is None - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_method_query(self, async_client: AsyncLlamaStackClient) -> None: - memory = await async_client.memory.query( - bank_id="bank_id", - query="string", - ) - assert_matches_type(QueryDocumentsResponse, memory, path=["response"]) - - @parametrize - async def test_method_query_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - memory = await async_client.memory.query( - bank_id="bank_id", - query="string", - params={"foo": True}, - x_llama_stack_client_version="X-LlamaStack-Client-Version", - x_llama_stack_provider_data="X-LlamaStack-Provider-Data", - ) - assert_matches_type(QueryDocumentsResponse, memory, path=["response"]) - - @parametrize - async def test_raw_response_query(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.memory.with_raw_response.query( - bank_id="bank_id", - query="string", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - memory = await response.parse() - assert_matches_type(QueryDocumentsResponse, memory, path=["response"]) - - @parametrize - async def test_streaming_response_query(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.memory.with_streaming_response.query( - bank_id="bank_id", - query="string", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - memory = await response.parse() - assert_matches_type(QueryDocumentsResponse, memory, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_memory_banks.py b/tests/api_resources/test_memory_banks.py deleted file mode 100644 index 764f59cc..00000000 --- a/tests/api_resources/test_memory_banks.py +++ /dev/null @@ -1,406 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, Optional, cast - -import pytest - -from tests.utils import assert_matches_type -from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types import ( - MemoryBankListResponse, - MemoryBankRegisterResponse, - MemoryBankRetrieveResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestMemoryBanks: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - def test_method_retrieve(self, client: LlamaStackClient) -> None: - memory_bank = client.memory_banks.retrieve( - memory_bank_id="memory_bank_id", - ) - assert_matches_type(Optional[MemoryBankRetrieveResponse], memory_bank, path=["response"]) - - @parametrize - def test_method_retrieve_with_all_params(self, client: LlamaStackClient) -> None: - memory_bank = client.memory_banks.retrieve( - memory_bank_id="memory_bank_id", - x_llama_stack_client_version="X-LlamaStack-Client-Version", - x_llama_stack_provider_data="X-LlamaStack-Provider-Data", - ) - assert_matches_type(Optional[MemoryBankRetrieveResponse], memory_bank, path=["response"]) - - @parametrize - def test_raw_response_retrieve(self, client: LlamaStackClient) -> None: - response = client.memory_banks.with_raw_response.retrieve( - memory_bank_id="memory_bank_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - memory_bank = response.parse() - assert_matches_type(Optional[MemoryBankRetrieveResponse], memory_bank, path=["response"]) - - @parametrize - def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: - with client.memory_banks.with_streaming_response.retrieve( - memory_bank_id="memory_bank_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - memory_bank = response.parse() - assert_matches_type(Optional[MemoryBankRetrieveResponse], memory_bank, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_retrieve(self, client: LlamaStackClient) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `memory_bank_id` but received ''"): - client.memory_banks.with_raw_response.retrieve( - memory_bank_id="", - ) - - @parametrize - def test_method_list(self, client: LlamaStackClient) -> None: - memory_bank = client.memory_banks.list() - assert_matches_type(MemoryBankListResponse, memory_bank, path=["response"]) - - @parametrize - def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: - memory_bank = client.memory_banks.list( - x_llama_stack_client_version="X-LlamaStack-Client-Version", - x_llama_stack_provider_data="X-LlamaStack-Provider-Data", - ) - assert_matches_type(MemoryBankListResponse, memory_bank, path=["response"]) - - @parametrize - def test_raw_response_list(self, client: LlamaStackClient) -> None: - response = client.memory_banks.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - memory_bank = response.parse() - assert_matches_type(MemoryBankListResponse, memory_bank, path=["response"]) - - @parametrize - def test_streaming_response_list(self, client: LlamaStackClient) -> None: - with client.memory_banks.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - memory_bank = response.parse() - assert_matches_type(MemoryBankListResponse, memory_bank, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_method_register(self, client: LlamaStackClient) -> None: - memory_bank = client.memory_banks.register( - memory_bank_id="memory_bank_id", - params={ - "chunk_size_in_tokens": 0, - "embedding_model": "embedding_model", - "memory_bank_type": "vector", - }, - ) - assert_matches_type(MemoryBankRegisterResponse, memory_bank, path=["response"]) - - @parametrize - def test_method_register_with_all_params(self, client: LlamaStackClient) -> None: - memory_bank = client.memory_banks.register( - memory_bank_id="memory_bank_id", - params={ - "chunk_size_in_tokens": 0, - "embedding_model": "embedding_model", - "memory_bank_type": "vector", - "overlap_size_in_tokens": 0, - }, - provider_id="provider_id", - provider_memory_bank_id="provider_memory_bank_id", - x_llama_stack_client_version="X-LlamaStack-Client-Version", - x_llama_stack_provider_data="X-LlamaStack-Provider-Data", - ) - assert_matches_type(MemoryBankRegisterResponse, memory_bank, path=["response"]) - - @parametrize - def test_raw_response_register(self, client: LlamaStackClient) -> None: - response = client.memory_banks.with_raw_response.register( - memory_bank_id="memory_bank_id", - params={ - "chunk_size_in_tokens": 0, - "embedding_model": "embedding_model", - "memory_bank_type": "vector", - }, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - memory_bank = response.parse() - assert_matches_type(MemoryBankRegisterResponse, memory_bank, path=["response"]) - - @parametrize - def test_streaming_response_register(self, client: LlamaStackClient) -> None: - with client.memory_banks.with_streaming_response.register( - memory_bank_id="memory_bank_id", - params={ - "chunk_size_in_tokens": 0, - "embedding_model": "embedding_model", - "memory_bank_type": "vector", - }, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - memory_bank = response.parse() - assert_matches_type(MemoryBankRegisterResponse, memory_bank, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_method_unregister(self, client: LlamaStackClient) -> None: - memory_bank = client.memory_banks.unregister( - memory_bank_id="memory_bank_id", - ) - assert memory_bank is None - - @parametrize - def test_method_unregister_with_all_params(self, client: LlamaStackClient) -> None: - memory_bank = client.memory_banks.unregister( - memory_bank_id="memory_bank_id", - x_llama_stack_client_version="X-LlamaStack-Client-Version", - x_llama_stack_provider_data="X-LlamaStack-Provider-Data", - ) - assert memory_bank is None - - @parametrize - def test_raw_response_unregister(self, client: LlamaStackClient) -> None: - response = client.memory_banks.with_raw_response.unregister( - memory_bank_id="memory_bank_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - memory_bank = response.parse() - assert memory_bank is None - - @parametrize - def test_streaming_response_unregister(self, client: LlamaStackClient) -> None: - with client.memory_banks.with_streaming_response.unregister( - memory_bank_id="memory_bank_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - memory_bank = response.parse() - assert memory_bank is None - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_unregister(self, client: LlamaStackClient) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `memory_bank_id` but received ''"): - client.memory_banks.with_raw_response.unregister( - memory_bank_id="", - ) - - -class TestAsyncMemoryBanks: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None: - memory_bank = await async_client.memory_banks.retrieve( - memory_bank_id="memory_bank_id", - ) - assert_matches_type(Optional[MemoryBankRetrieveResponse], memory_bank, path=["response"]) - - @parametrize - async def test_method_retrieve_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - memory_bank = await async_client.memory_banks.retrieve( - memory_bank_id="memory_bank_id", - x_llama_stack_client_version="X-LlamaStack-Client-Version", - x_llama_stack_provider_data="X-LlamaStack-Provider-Data", - ) - assert_matches_type(Optional[MemoryBankRetrieveResponse], memory_bank, path=["response"]) - - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.memory_banks.with_raw_response.retrieve( - memory_bank_id="memory_bank_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - memory_bank = await response.parse() - assert_matches_type(Optional[MemoryBankRetrieveResponse], memory_bank, path=["response"]) - - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.memory_banks.with_streaming_response.retrieve( - memory_bank_id="memory_bank_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - memory_bank = await response.parse() - assert_matches_type(Optional[MemoryBankRetrieveResponse], memory_bank, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `memory_bank_id` but received ''"): - await async_client.memory_banks.with_raw_response.retrieve( - memory_bank_id="", - ) - - @parametrize - async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: - memory_bank = await async_client.memory_banks.list() - assert_matches_type(MemoryBankListResponse, memory_bank, path=["response"]) - - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - memory_bank = await async_client.memory_banks.list( - x_llama_stack_client_version="X-LlamaStack-Client-Version", - x_llama_stack_provider_data="X-LlamaStack-Provider-Data", - ) - assert_matches_type(MemoryBankListResponse, memory_bank, path=["response"]) - - @parametrize - async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.memory_banks.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - memory_bank = await response.parse() - assert_matches_type(MemoryBankListResponse, memory_bank, path=["response"]) - - @parametrize - async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.memory_banks.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - memory_bank = await response.parse() - assert_matches_type(MemoryBankListResponse, memory_bank, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_method_register(self, async_client: AsyncLlamaStackClient) -> None: - memory_bank = await async_client.memory_banks.register( - memory_bank_id="memory_bank_id", - params={ - "chunk_size_in_tokens": 0, - "embedding_model": "embedding_model", - "memory_bank_type": "vector", - }, - ) - assert_matches_type(MemoryBankRegisterResponse, memory_bank, path=["response"]) - - @parametrize - async def test_method_register_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - memory_bank = await async_client.memory_banks.register( - memory_bank_id="memory_bank_id", - params={ - "chunk_size_in_tokens": 0, - "embedding_model": "embedding_model", - "memory_bank_type": "vector", - "overlap_size_in_tokens": 0, - }, - provider_id="provider_id", - provider_memory_bank_id="provider_memory_bank_id", - x_llama_stack_client_version="X-LlamaStack-Client-Version", - x_llama_stack_provider_data="X-LlamaStack-Provider-Data", - ) - assert_matches_type(MemoryBankRegisterResponse, memory_bank, path=["response"]) - - @parametrize - async def test_raw_response_register(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.memory_banks.with_raw_response.register( - memory_bank_id="memory_bank_id", - params={ - "chunk_size_in_tokens": 0, - "embedding_model": "embedding_model", - "memory_bank_type": "vector", - }, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - memory_bank = await response.parse() - assert_matches_type(MemoryBankRegisterResponse, memory_bank, path=["response"]) - - @parametrize - async def test_streaming_response_register(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.memory_banks.with_streaming_response.register( - memory_bank_id="memory_bank_id", - params={ - "chunk_size_in_tokens": 0, - "embedding_model": "embedding_model", - "memory_bank_type": "vector", - }, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - memory_bank = await response.parse() - assert_matches_type(MemoryBankRegisterResponse, memory_bank, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_method_unregister(self, async_client: AsyncLlamaStackClient) -> None: - memory_bank = await async_client.memory_banks.unregister( - memory_bank_id="memory_bank_id", - ) - assert memory_bank is None - - @parametrize - async def test_method_unregister_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - memory_bank = await async_client.memory_banks.unregister( - memory_bank_id="memory_bank_id", - x_llama_stack_client_version="X-LlamaStack-Client-Version", - x_llama_stack_provider_data="X-LlamaStack-Provider-Data", - ) - assert memory_bank is None - - @parametrize - async def test_raw_response_unregister(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.memory_banks.with_raw_response.unregister( - memory_bank_id="memory_bank_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - memory_bank = await response.parse() - assert memory_bank is None - - @parametrize - async def test_streaming_response_unregister(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.memory_banks.with_streaming_response.unregister( - memory_bank_id="memory_bank_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - memory_bank = await response.parse() - assert memory_bank is None - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_unregister(self, async_client: AsyncLlamaStackClient) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `memory_bank_id` but received ''"): - await async_client.memory_banks.with_raw_response.unregister( - memory_bank_id="", - ) diff --git a/tests/api_resources/test_reward_scoring.py b/tests/api_resources/test_reward_scoring.py deleted file mode 100644 index 12823fbd..00000000 --- a/tests/api_resources/test_reward_scoring.py +++ /dev/null @@ -1,872 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types import RewardScoringResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestRewardScoring: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - def test_method_score(self, client: LlamaStackClient) -> None: - reward_scoring = client.reward_scoring.score( - dialog_generations=[ - { - "dialog": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - }, - { - "dialog": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - }, - { - "dialog": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - }, - ], - model="model", - ) - assert_matches_type(RewardScoringResponse, reward_scoring, path=["response"]) - - @parametrize - def test_method_score_with_all_params(self, client: LlamaStackClient) -> None: - reward_scoring = client.reward_scoring.score( - dialog_generations=[ - { - "dialog": [ - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - ], - }, - { - "dialog": [ - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - ], - }, - { - "dialog": [ - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - ], - }, - ], - model="model", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(RewardScoringResponse, reward_scoring, path=["response"]) - - @parametrize - def test_raw_response_score(self, client: LlamaStackClient) -> None: - response = client.reward_scoring.with_raw_response.score( - dialog_generations=[ - { - "dialog": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - }, - { - "dialog": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - }, - { - "dialog": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - }, - ], - model="model", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - reward_scoring = response.parse() - assert_matches_type(RewardScoringResponse, reward_scoring, path=["response"]) - - @parametrize - def test_streaming_response_score(self, client: LlamaStackClient) -> None: - with client.reward_scoring.with_streaming_response.score( - dialog_generations=[ - { - "dialog": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - }, - { - "dialog": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - }, - { - "dialog": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - }, - ], - model="model", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - reward_scoring = response.parse() - assert_matches_type(RewardScoringResponse, reward_scoring, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncRewardScoring: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - async def test_method_score(self, async_client: AsyncLlamaStackClient) -> None: - reward_scoring = await async_client.reward_scoring.score( - dialog_generations=[ - { - "dialog": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - }, - { - "dialog": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - }, - { - "dialog": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - }, - ], - model="model", - ) - assert_matches_type(RewardScoringResponse, reward_scoring, path=["response"]) - - @parametrize - async def test_method_score_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - reward_scoring = await async_client.reward_scoring.score( - dialog_generations=[ - { - "dialog": [ - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - ], - }, - { - "dialog": [ - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - ], - }, - { - "dialog": [ - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - { - "content": "string", - "role": "user", - "context": "string", - }, - ], - }, - ], - model="model", - x_llama_stack_provider_data="X-LlamaStack-ProviderData", - ) - assert_matches_type(RewardScoringResponse, reward_scoring, path=["response"]) - - @parametrize - async def test_raw_response_score(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.reward_scoring.with_raw_response.score( - dialog_generations=[ - { - "dialog": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - }, - { - "dialog": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - }, - { - "dialog": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - }, - ], - model="model", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - reward_scoring = await response.parse() - assert_matches_type(RewardScoringResponse, reward_scoring, path=["response"]) - - @parametrize - async def test_streaming_response_score(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.reward_scoring.with_streaming_response.score( - dialog_generations=[ - { - "dialog": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - }, - { - "dialog": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - }, - { - "dialog": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - "sampled_generations": [ - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - { - "content": "string", - "role": "user", - }, - ], - }, - ], - model="model", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - reward_scoring = await response.parse() - assert_matches_type(RewardScoringResponse, reward_scoring, path=["response"]) - - assert cast(Any, response.is_closed) is True From de50d3166147c8459c029c9a7175f84f8a85e901 Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Thu, 13 Mar 2025 00:24:52 -0700 Subject: [PATCH 2/2] fix imports --- src/llama_stack_client/lib/agents/agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llama_stack_client/lib/agents/agent.py b/src/llama_stack_client/lib/agents/agent.py index 76841e27..9ccc5063 100644 --- a/src/llama_stack_client/lib/agents/agent.py +++ b/src/llama_stack_client/lib/agents/agent.py @@ -11,7 +11,7 @@ from llama_stack_client.types.agent_create_params import AgentConfig from llama_stack_client.types.agents.turn import CompletionMessage, Turn from llama_stack_client.types.agents.turn_create_params import Document, Toolgroup -from llama_stack_client.types.agents.turn_create_response import AgentTurnResponseStreamChunk +from llama_stack_client.types.agents.agent_turn_response_stream_chunk import AgentTurnResponseStreamChunk from llama_stack_client.types.shared.tool_call import ToolCall from llama_stack_client.types.shared_params.agent_config import ToolConfig from llama_stack_client.types.shared_params.response_format import ResponseFormat