Skip to content

Commit 8afaf2a

Browse files
committed
feat: POC automation test for AI using python
JIRA: QA-23855 risk: nonprod
1 parent 40b351c commit 8afaf2a

16 files changed

+673
-0
lines changed
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
# (C) 2024 GoodData Corporation
2+
HOST=
3+
TOKEN=
4+
DATASOURCE_ID=
5+
WORKSPACE_ID=
6+
LLM_TOKEN=
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
# (C) 2021 GoodData Corporation
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
# (C) 2024 GoodData Corporation
2+
# filepath: /Users/tubui/Documents/CODE/gooddata-python-sdk-1/gooddata-sdk/integration_tests/scripts/conftest.py
3+
import os
4+
5+
import pytest
6+
from dotenv import load_dotenv
7+
8+
# Load the .env file from the current directory
9+
load_dotenv()
10+
11+
12+
@pytest.fixture(scope="session", autouse=True)
13+
def setup_env():
14+
# Ensure that the environment variables are set
15+
os.environ["HOST"] = os.getenv("HOST", "https://checklist.staging.stg11.panther.intgdc.com")
16+
os.environ["TOKEN"] = os.getenv("TOKEN", "")
17+
os.environ["DATASOURCE_ID"] = os.getenv("DATASOURCE_ID", "")
18+
os.environ["WORKSPACE_ID"] = os.getenv("WORKSPACE_ID", "")
19+
os.environ["DATASOURCE_TYPE"] = os.getenv("DATASOURCE_TYPE", "")
20+
os.environ["DATASOURCE_PASSWORD"] = os.getenv("DATASOURCE_PASSWORD", "")
21+
22+
# Check if the necessary environment variables are set
23+
if not os.environ["HOST"]:
24+
raise OSError("\nHOST environment variable is not set.")
25+
if not os.environ["TOKEN"]:
26+
raise OSError("\nTOKEN environment variable is not set.")
27+
if not os.environ["DATASOURCE_ID"]:
28+
print("\nWarning: DATA_SOURCE_ID environment variable is not set.")
29+
if not os.environ["WORKSPACE_ID"]:
30+
print("\nWarning: WORKSPACE_ID environment variable is not set.")
31+
if not os.environ["DATASOURCE_TYPE"]:
32+
print("\nWarning: DATASOURCE_TYPE environment variable is not set.")
33+
if not os.environ["DATASOURCE_PASSWORD"]:
34+
print("\nWarning: DATASOURCE_PASSWORD environment variable is not set.")
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
{
2+
"id": "total_returns_per_month",
3+
"title": "Total Returns per Month",
4+
"visualizationType": "COLUMN",
5+
"metrics": [
6+
{
7+
"id": "total_returns",
8+
"type": "metric",
9+
"title": "Total Returns"
10+
}
11+
],
12+
"dimensionality": [
13+
{
14+
"id": "return_date.month",
15+
"type": "attribute",
16+
"title": "Return date - Month/Year"
17+
}
18+
],
19+
"filters": [],
20+
"suggestions": [
21+
{
22+
"query": "Switch to a line chart to better visualize the trend of total returns over the months.",
23+
"label": "Line Chart for Trends"
24+
},
25+
{
26+
"query": "Filter the data to show total returns for this year only.",
27+
"label": "This Year's Returns"
28+
}
29+
]
30+
}
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
{
2+
"id": "number_of_order_ids",
3+
"title": "Number of Order IDs",
4+
"visualizationType": "HEADLINE",
5+
"metrics": [
6+
{
7+
"id": "order_id",
8+
"type": "attribute",
9+
"title": "Number of Order IDs",
10+
"aggFunction": "COUNT"
11+
}
12+
],
13+
"dimensionality": [],
14+
"filters": [],
15+
"suggestions": [
16+
{
17+
"query": "Show the number of orders by year",
18+
"label": "Show by Year"
19+
}
20+
]
21+
}
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
[
2+
{
3+
"question": "What is number of order id, show as HEADLINE chart?",
4+
"expected_objects_file": "headline_count_of_order.json"
5+
},
6+
{
7+
"question": "What is total returns per month? show as COLUMN chart",
8+
"expected_objects_file": "column_total_returns_by_month.json"
9+
}
10+
]
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
# (C) 2021 GoodData Corporation
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
# (C) 2024 GoodData Corporation
2+
import os
3+
import sys
4+
5+
import pytest
6+
from dotenv import load_dotenv
7+
from gooddata_sdk import GoodDataSdk
8+
9+
SCRIPTS_DIR = os.path.dirname(os.path.abspath(__file__))
10+
sys.path.append(SCRIPTS_DIR)
11+
12+
13+
# Load environment variables from the .env file
14+
load_dotenv()
15+
16+
# Create the test_config dictionary with the loaded environment variables
17+
test_config = {"host": os.getenv("HOST"), "token": os.getenv("TOKEN")}
18+
workspace_id = os.getenv("WORKSPACE_ID")
19+
20+
questions = ["What is number of order line id ?"]
21+
sdk = GoodDataSdk.create(host_=test_config["host"], token_=test_config["token"])
22+
23+
24+
@pytest.mark.parametrize("question", questions)
25+
def test_ask_ai(question):
26+
sdk = GoodDataSdk.create(host_=test_config["host"], token_=test_config["token"])
27+
chat_ai_res = sdk.compute.ai_chat(workspace_id, question=question)
28+
29+
print(f"Chat AI response: {chat_ai_res}")
30+
assert chat_ai_res is not None, "Response should not be None"
31+
32+
print("before test")
33+
34+
35+
if __name__ == "__main__":
36+
pytest.main()
Lines changed: 100 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,100 @@
1+
# (C) 2024 GoodData Corporation
2+
3+
import os
4+
import sys
5+
from pathlib import Path
6+
from pprint import pprint
7+
8+
import gooddata_api_client
9+
import pytest
10+
from dotenv import load_dotenv
11+
from gooddata_api_client.api import smart_functions_api
12+
from gooddata_api_client.model.chat_history_request import ChatHistoryRequest
13+
from gooddata_api_client.model.chat_request import ChatRequest
14+
15+
SCRIPTS_DIR = os.path.dirname(os.path.abspath(__file__))
16+
sys.path.append(SCRIPTS_DIR)
17+
from utils import compare_and_print_diff, load_json, normalize_metrics
18+
19+
_current_dir = Path(__file__).parent.absolute()
20+
parent_dir = _current_dir.parent
21+
expected_object_dir = parent_dir / "expected"
22+
questions_list_dir = parent_dir / "fixtures" / "ai_questions.json"
23+
24+
# Load environment variables from the .env file
25+
load_dotenv()
26+
27+
28+
@pytest.fixture(scope="module")
29+
def test_config():
30+
return {
31+
"host": os.getenv("HOST"),
32+
"token": os.getenv("TOKEN"),
33+
"workspace_id": os.getenv("WORKSPACE_ID"),
34+
"llm_token": os.getenv("LLM_TOKEN"),
35+
}
36+
37+
38+
@pytest.fixture(scope="module")
39+
def api_client(test_config):
40+
configuration = gooddata_api_client.Configuration(host=test_config["host"])
41+
api_client = gooddata_api_client.ApiClient(configuration)
42+
api_client.default_headers["Authorization"] = f"Bearer {test_config['token']}"
43+
return api_client
44+
45+
46+
def validate_response(actual_response, expected_response):
47+
actual_metrics = normalize_metrics(
48+
actual_response["created_visualizations"]["objects"][0]["metrics"], exclude_keys=["title"]
49+
)
50+
expected_metrics = normalize_metrics(expected_response["metrics"], exclude_keys=["title"])
51+
compare_and_print_diff(actual_metrics, expected_metrics, "Metrics")
52+
actual_visualization_type = actual_response["created_visualizations"]["objects"][0]["visualization_type"]
53+
expected_visualization_type = expected_response["visualizationType"]
54+
compare_and_print_diff(actual_visualization_type, expected_visualization_type, "Visualization type")
55+
actual_dimensionality = actual_response["created_visualizations"]["objects"][0]["dimensionality"]
56+
expected_dimensionality = expected_response["dimensionality"]
57+
compare_and_print_diff(actual_dimensionality, expected_dimensionality, "Dimensionality")
58+
actual_filters = actual_response["created_visualizations"]["objects"][0]["filters"]
59+
expected_filters = expected_response["filters"]
60+
compare_and_print_diff(actual_filters, expected_filters, "Filters")
61+
62+
63+
def test_ai_chat_history_reset(api_client, test_config):
64+
api_instance = smart_functions_api.SmartFunctionsApi(api_client)
65+
chat_history_request = ChatHistoryRequest(reset=True)
66+
try:
67+
api_response = api_instance.ai_chat_history(test_config["workspace_id"], chat_history_request)
68+
pprint(api_response)
69+
except gooddata_api_client.ApiException as e:
70+
pytest.fail(f"API exception: {e}")
71+
except Exception as e:
72+
pytest.fail(f"Unexpected error: {e}")
73+
74+
75+
questions_list = load_json(questions_list_dir)
76+
77+
78+
@pytest.mark.parametrize(
79+
"question, expected_file",
80+
[(item["question"], item["expected_objects_file"]) for item in questions_list],
81+
ids=[item["question"] for item in questions_list],
82+
)
83+
def test_ai_chat(api_client, test_config, question, expected_file):
84+
expected_objects = load_json(os.path.join(expected_object_dir, expected_file))
85+
api_instance = smart_functions_api.SmartFunctionsApi(api_client)
86+
try:
87+
api_response = api_instance.ai_chat(test_config["workspace_id"], ChatRequest(question=question))
88+
print("\napi_response", api_response.created_visualizations.objects[0])
89+
print("\nexpected_file", expected_objects)
90+
91+
validate_response(api_response.to_dict(), expected_objects)
92+
93+
except gooddata_api_client.ApiException as e:
94+
pytest.fail(f"API exception: {e}")
95+
except Exception as e:
96+
pytest.fail(f"Unexpected error: {e}")
97+
98+
99+
if __name__ == "__main__":
100+
pytest.main(["-s", __file__])
Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,116 @@
1+
# (C) 2024 GoodData Corporation
2+
3+
import os
4+
import sys
5+
import time
6+
import uuid
7+
from pprint import pprint
8+
9+
import gooddata_api_client
10+
import pytest
11+
from dotenv import load_dotenv
12+
from gooddata_api_client.api import entities_api, llm_endpoints_api, metadata_sync_api
13+
from gooddata_api_client.model.json_api_llm_endpoint_in import JsonApiLlmEndpointIn
14+
from gooddata_api_client.model.json_api_llm_endpoint_in_attributes import JsonApiLlmEndpointInAttributes
15+
from gooddata_api_client.model.json_api_llm_endpoint_in_document import JsonApiLlmEndpointInDocument
16+
from gooddata_api_client.model.json_api_workspace_in import JsonApiWorkspaceIn
17+
from gooddata_api_client.model.json_api_workspace_in_attributes import JsonApiWorkspaceInAttributes
18+
from gooddata_api_client.model.json_api_workspace_in_document import JsonApiWorkspaceInDocument
19+
20+
SCRIPTS_DIR = os.path.dirname(os.path.abspath(__file__))
21+
sys.path.append(SCRIPTS_DIR)
22+
23+
24+
# Load environment variables from the .env file
25+
load_dotenv()
26+
27+
28+
@pytest.fixture(scope="module")
29+
def test_config():
30+
return {
31+
"host": os.getenv("HOST"),
32+
"token": os.getenv("TOKEN"),
33+
"workspace_id": os.getenv("WORKSPACE_ID"),
34+
"llm_token": os.getenv("LLM_TOKEN"),
35+
}
36+
37+
38+
@pytest.fixture(scope="module")
39+
def api_client(test_config):
40+
configuration = gooddata_api_client.Configuration(host=test_config["host"])
41+
api_client = gooddata_api_client.ApiClient(configuration)
42+
api_client.default_headers["Authorization"] = f"Bearer {test_config['token']}"
43+
return api_client
44+
45+
46+
def test_create_llm_endpoint(api_client, test_config):
47+
llm_title = f"python_sdk_test_{int(time.time())}"
48+
api_instance = llm_endpoints_api.LLMEndpointsApi(api_client)
49+
json_api_llm_endpoint_in_document = JsonApiLlmEndpointInDocument(
50+
data=JsonApiLlmEndpointIn(
51+
attributes=JsonApiLlmEndpointInAttributes(
52+
llm_model="gpt-4o",
53+
provider="OPENAI",
54+
title=llm_title,
55+
token=test_config["llm_token"],
56+
workspaceIds=[test_config["workspace_id"]],
57+
),
58+
id=uuid.uuid4().hex,
59+
type="llmEndpoint",
60+
),
61+
)
62+
63+
print("json_api_llm_endpoint_in_document", json_api_llm_endpoint_in_document)
64+
try:
65+
# Post LLM endpoint entities
66+
api_response = api_instance.create_entity_llm_endpoints(json_api_llm_endpoint_in_document)
67+
pprint(api_response)
68+
assert api_response is not None, "API response should not be None"
69+
except gooddata_api_client.ApiException as e:
70+
pytest.fail(f"API exception: {e}")
71+
except Exception as e:
72+
pytest.fail(f"Unexpected error: {e}")
73+
74+
75+
def enable_early_access_per_workspace(api_client, test_config, feature_flag_name):
76+
api_instance = entities_api.EntitiesApi(api_client)
77+
json_api_workspace_in_document = JsonApiWorkspaceInDocument(
78+
data=JsonApiWorkspaceIn(
79+
attributes=JsonApiWorkspaceInAttributes(early_access_values=[feature_flag_name]),
80+
id=test_config["workspace_id"],
81+
type="workspace",
82+
),
83+
)
84+
try:
85+
print("Attempting to enable early access feature flag...")
86+
print("workspace_id", test_config["workspace_id"])
87+
88+
api_response = api_instance.update_entity_workspaces(
89+
test_config["workspace_id"], json_api_workspace_in_document
90+
)
91+
pprint(api_response)
92+
except gooddata_api_client.ApiException as e:
93+
pytest.fail(f"API exception: {e}")
94+
except Exception as e:
95+
pytest.fail(f"Unexpected error: {e}")
96+
97+
98+
def test_enable_early_access_per_workspace(api_client, test_config):
99+
enable_early_access_per_workspace(api_client, test_config, "experimental-genai-chat")
100+
101+
102+
def test_metadata_sync(api_client, test_config):
103+
api_instance = metadata_sync_api.MetadataSyncApi(api_client)
104+
try:
105+
print("Attempting to sync metadata...")
106+
print("workspace_id", test_config["workspace_id"])
107+
108+
api_instance.metadata_sync(test_config["workspace_id"])
109+
except gooddata_api_client.ApiException as e:
110+
pytest.fail(f"API exception: {e}")
111+
except Exception as e:
112+
pytest.fail(f"Unexpected error: {e}")
113+
114+
115+
if __name__ == "__main__":
116+
pytest.main(["-s", __file__])

0 commit comments

Comments
 (0)