Skip to content

Commit d0c33bc

Browse files
committed
Merge remote-tracking branch 'origin/release/8.0' into feature/dss50-analysis-cleanup
2 parents bd7dfcc + 211ef21 commit d0c33bc

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

44 files changed

+7552
-976
lines changed

.gitignore

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,3 @@
11
*.pyc
2-
.idea
2+
.idea
3+
*.iml

HISTORY.txt

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,16 @@
11
Changelog
22
==========
33

4+
5.1.0 (2019-03-01)
5+
------------------
6+
7+
* Initial release for DSS 5.1
8+
9+
5.0.0 (2018-07-26)
10+
------------------
11+
12+
* Initial release for DSS 5.0
13+
414
4.3.0 (2018-06-01)
515
------------------
616

README

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
API client for Dataiku Data Science Studio
22

33
For more information, see:
4-
https://doc.dataiku.com/dss/latest/api/public/client-python/index.html
4+
https://doc.dataiku.com/dss/latest/python-api/rest-api-client/index.html

dataikuapi/apinode_client.py

Lines changed: 30 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,8 @@ def __init__(self, uri, service_id, api_key=None):
1616
"""
1717
DSSBaseClient.__init__(self, "%s/%s" % (uri, "public/api/v1/%s" % service_id), api_key)
1818

19-
def predict_record(self, endpoint_id, features, forced_generation=None, dispatch_key=None, context=None):
19+
def predict_record(self, endpoint_id, features, forced_generation=None, dispatch_key=None, context=None,
20+
with_explanations=None, explanation_method=None, n_explanations=None, n_explanations_mc_steps=None):
2021
"""
2122
Predicts a single record on a DSS API node endpoint (standard or custom prediction)
2223
@@ -25,12 +26,24 @@ def predict_record(self, endpoint_id, features, forced_generation=None, dispatch
2526
:param forced_generation: See documentation about multi-version prediction
2627
:param dispatch_key: See documentation about multi-version prediction
2728
:param context: Optional, Python dictionary of additional context information. The context information is logged, but not directly used.
29+
:param with_explanations: Optional, whether individual explanations should be computed for each record. The prediction endpoint must be compatible. If None, will use the value configured in the endpoint.
30+
:param explanation_method: Optional, method to compute explanations. Valid values are 'SHAPLEY' or 'ICE'. If None, will use the value configured in the endpoint.
31+
:param n_explanations: Optional, number of explanations to output per prediction. If None, will use the value configured in the endpoint.
32+
:param n_explanations_mc_steps: Optional, precision parameter for SHAPLEY method, higher means more precise but slower (between 25 and 1000).
33+
If None, will use the value configured in the endpoint.
2834
2935
:return: a Python dict of the API answer. The answer contains a "result" key (itself a dict)
3036
"""
31-
obj = {
32-
"features" :features
37+
obj = {
38+
"features": features,
39+
"explanations": {
40+
"enabled": with_explanations,
41+
"method": explanation_method,
42+
"nExplanations": n_explanations,
43+
"nMonteCarloSteps": n_explanations_mc_steps
44+
}
3345
}
46+
3447
if context is not None:
3548
obj["context"] = context
3649
if forced_generation is not None:
@@ -40,14 +53,20 @@ def predict_record(self, endpoint_id, features, forced_generation=None, dispatch
4053

4154
return self._perform_json("POST", "%s/predict" % endpoint_id, body = obj)
4255

43-
def predict_records(self, endpoint_id, records, forced_generation=None, dispatch_key=None):
56+
def predict_records(self, endpoint_id, records, forced_generation=None, dispatch_key=None, with_explanations=None,
57+
explanation_method=None, n_explanations=None, n_explanations_mc_steps=None):
4458
"""
4559
Predicts a batch of records on a DSS API node endpoint (standard or custom prediction)
4660
4761
:param str endpoint_id: Identifier of the endpoint to query
4862
:param records: Python list of records. Each record must be a Python dict. Each record must contain a "features" dict (see predict_record) and optionally a "context" dict.
4963
:param forced_generation: See documentation about multi-version prediction
5064
:param dispatch_key: See documentation about multi-version prediction
65+
:param with_explanations: Optional, whether individual explanations should be computed for each record. The prediction endpoint must be compatible. If None, will use the value configured in the endpoint.
66+
:param explanation_method: Optional, method to compute explanations. Valid values are 'SHAPLEY' or 'ICE'. If None, will use the value configured in the endpoint.
67+
:param n_explanations: Optional, number of explanations to output per prediction. If None, will use the value configured in the endpoint.
68+
:param n_explanations_mc_steps: Optional, precision parameter for SHAPLEY method, higher means more precise but slower (between 25 and 1000).
69+
If None, will use the value configured in the endpoint.
5170
5271
:return: a Python dict of the API answer. The answer contains a "results" key (which is an array of result objects)
5372
"""
@@ -57,7 +76,13 @@ def predict_records(self, endpoint_id, records, forced_generation=None, dispatch
5776
raise ValueError("Each record must contain a 'features' dict")
5877

5978
obj = {
60-
"items" : records
79+
"items": records,
80+
"explanations": {
81+
"enabled": with_explanations,
82+
"method": explanation_method,
83+
"nExplanations": n_explanations,
84+
"nMonteCarloSteps": n_explanations_mc_steps
85+
}
6186
}
6287

6388
if forced_generation is not None:

0 commit comments

Comments
 (0)