Skip to content

Commit 8d48fab

Browse files
committed
Refactor significance to sensitivity_indices
1 parent 138a56b commit 8d48fab

File tree

9 files changed

+48
-99
lines changed

9 files changed

+48
-99
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ When public
1414
analysis method, which is based on Monte Carlo simulation. SimDec consists of
1515
three major parts:
1616

17-
1. computing significance indices,
17+
1. computing sensitivity indices,
1818
2. creating multi-variable scenarios and mapping the output values to them, and
1919
3. visualizing the scenarios on the output distribution by color-coding its segments.
2020

app.py

Lines changed: 20 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -56,12 +56,12 @@ def filtered_output(data, output_name):
5656

5757

5858
@pn.cache
59-
def significance(inputs, output):
60-
si = sd.significance(inputs=inputs, output=output).si
59+
def sensitivity_indices(inputs, output):
60+
si = sd.sensitivity_indices(inputs=inputs, output=output).si
6161
return si
6262

6363

64-
def significance_table(si, inputs):
64+
def sensitivity_indices_table(si, inputs):
6565
var_names = inputs.columns
6666
var_order = np.argsort(si)[::-1]
6767
var_names = var_names[var_order].tolist()
@@ -98,30 +98,30 @@ def explained_variance(si):
9898
return sum(si) + np.finfo(np.float64).eps
9999

100100

101-
def filtered_si(significance_table, input_names):
102-
df = significance_table.value
101+
def filtered_si(sensitivity_indices_table, input_names):
102+
df = sensitivity_indices_table.value
103103
si = []
104104
for input_name in input_names:
105105
si.append(df.loc[df["Inputs"] == input_name, "Indices"])
106106
return np.asarray(si).flatten()
107107

108108

109-
def explained_variance_80(significance_table):
110-
si = significance_table.value["Indices"]
109+
def explained_variance_80(sensitivity_indices_table):
110+
si = sensitivity_indices_table.value["Indices"]
111111
pos_80 = bisect.bisect_right(np.cumsum(si), 0.8)
112112

113113
# pos_80 = max(2, pos_80)
114114
# pos_80 = min(len(si), pos_80)
115115

116-
input_names = significance_table.value["Inputs"]
116+
input_names = sensitivity_indices_table.value["Inputs"]
117117
return input_names.to_list()[: pos_80 + 1]
118118

119119

120120
def decomposition(dec_limit, si, inputs, output):
121121
return sd.decomposition(
122122
inputs=inputs,
123123
output=output,
124-
significance=si,
124+
sensitivity_indices=si,
125125
dec_limit=dec_limit,
126126
auto_ordering=False,
127127
)
@@ -203,15 +203,19 @@ def tableau_states(res, states):
203203
filtered_output, interactive_file, selector_inputs_sensitivity
204204
)
205205

206-
interactive_significance = pn.bind(significance, interactive_inputs, interactive_output)
207-
interactive_explained_variance = pn.bind(explained_variance, interactive_significance)
206+
interactive_sensitivity_indices = pn.bind(
207+
sensitivity_indices, interactive_inputs, interactive_output
208+
)
209+
interactive_explained_variance = pn.bind(
210+
explained_variance, interactive_sensitivity_indices
211+
)
208212

209-
interactive_significance_table = pn.bind(
210-
significance_table, interactive_significance, interactive_inputs
213+
interactive_sensitivity_indices_table = pn.bind(
214+
sensitivity_indices_table, interactive_sensitivity_indices, interactive_inputs
211215
)
212216

213217
interactive_explained_variance_80 = pn.bind(
214-
explained_variance_80, interactive_significance_table
218+
explained_variance_80, interactive_sensitivity_indices_table
215219
)
216220
selector_inputs_decomposition = pn.widgets.MultiChoice(
217221
name="Select inputs for decomposition",
@@ -224,7 +228,7 @@ def tableau_states(res, states):
224228
)
225229

226230
interactive_filtered_si = pn.bind(
227-
filtered_si, interactive_significance_table, selector_inputs_decomposition
231+
filtered_si, interactive_sensitivity_indices_table, selector_inputs_decomposition
228232
)
229233
interactive_filtered_explained_variance = pn.bind(
230234
explained_variance, interactive_filtered_si
@@ -339,7 +343,7 @@ def tableau_states(res, states):
339343
),
340344
pn.Spacer(height=50),
341345
pn.pane.Markdown(si_description, styles={"color": "#0072b5"}),
342-
pn.Column(interactive_significance_table, width=400),
346+
pn.Column(interactive_sensitivity_indices_table, width=400),
343347
),
344348
pn.Column(
345349
pn.pane.Markdown(table_description, styles={"color": "#0072b5"}),

docs/index.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ Welcome to SimDec's documentation!
55
analysis method, which is based on Monte Carlo simulation. SimDec consists of
66
three major parts:
77

8-
1. computing significance indices,
8+
1. computing sensitivity indices,
99
2. creating multi-variable scenarios and mapping the output values to them, and
1010
3. visualizing the scenarios on the output distribution by color-coding its segments.
1111

src/simdec/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
"""SimDec main namespace."""
22
from simdec.decomposition import *
3-
from simdec.significance import *
3+
from simdec.sensitivity_indices import *
44
from simdec.visualization import *
55

66
__all__ = [
7-
"significance",
7+
"sensitivity_indices",
88
"states_expansion",
99
"decomposition",
1010
"visualization",

src/simdec/decomposition.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def decomposition(
5151
inputs: pd.DataFrame,
5252
output: pd.DataFrame,
5353
*,
54-
significance: np.ndarray,
54+
sensitivity_indices: np.ndarray,
5555
dec_limit: float = 1,
5656
auto_ordering: bool = True,
5757
states: list[int] | None = None,
@@ -65,12 +65,12 @@ def decomposition(
6565
Input variables.
6666
output : DataFrame of shape (n_runs, 1)
6767
Target variable.
68-
significance : ndarray of shape (n_factors, 1)
69-
Significance index, combined effect of each input.
68+
sensitivity_indices : ndarray of shape (n_factors, 1)
69+
Sensitivity indices, combined effect of each input.
7070
dec_limit : float
7171
Explained variance ratio to filter the number input variables.
7272
auto_ordering : bool
73-
Automatically order input columns based on the relative significance
73+
Automatically order input columns based on the relative sensitivity_indices
7474
or use the provided order.
7575
states : list of int, optional
7676
List of possible states for the considered parameter.
@@ -105,11 +105,11 @@ def decomposition(
105105
output = output.to_numpy()
106106

107107
# 1. variables for decomposition
108-
var_order = np.argsort(significance)[::-1]
108+
var_order = np.argsort(sensitivity_indices)[::-1]
109109

110110
# only keep the explained variance corresponding to `dec_limit`
111-
significance = significance[var_order]
112-
n_var_dec = np.where(np.cumsum(significance) < dec_limit)[0].size
111+
sensitivity_indices = sensitivity_indices[var_order]
112+
n_var_dec = np.where(np.cumsum(sensitivity_indices) < dec_limit)[0].size
113113
n_var_dec = max(1, n_var_dec) # keep at least one variable
114114
n_var_dec = min(5, n_var_dec) # use at most 5 variables
115115

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,11 @@
66
from scipy import stats
77

88

9-
__all__ = ["significance"]
9+
__all__ = ["sensitivity_indices"]
1010

1111

1212
def number_of_bins(n_runs: int, n_factors: int) -> tuple[int, int]:
13-
"""Optimal number of bins for first & second-order significance indices.
13+
"""Optimal number of bins for first & second-order sensitivity_indices indices.
1414
1515
Linear approximation of experimental results from (Marzban & Lahmer, 2016).
1616
"""
@@ -31,18 +31,18 @@ def _weighted_var(x: np.ndarray, weights: np.ndarray) -> np.ndarray:
3131

3232

3333
@dataclass
34-
class SignificanceResult:
34+
class SensitivityAnalysisResult:
3535
si: np.ndarray
3636
first_order: np.ndarray
3737
second_order: np.ndarray
3838

3939

40-
def significance(
40+
def sensitivity_indices(
4141
inputs: pd.DataFrame | np.ndarray, output: pd.DataFrame | np.ndarray
42-
) -> SignificanceResult:
43-
"""Significance indices.
42+
) -> SensitivityAnalysisResult:
43+
"""Sensitivity indices.
4444
45-
The significance express how much variability of the output is
45+
The sensitivity_indices express how much variability of the output is
4646
explained by the inputs.
4747
4848
Parameters
@@ -54,11 +54,11 @@ def significance(
5454
5555
Returns
5656
-------
57-
res : SignificanceResult
57+
res : SensitivityAnalysisResult
5858
An object with attributes:
5959
6060
si : ndarray of shape (n_factors, 1)
61-
Significance index, combined effect of each input.
61+
Sensitivity indices, combined effect of each input.
6262
foe : ndarray of shape (n_factors, 1)
6363
First-order effects (also called 'main' or 'individual').
6464
soe : ndarray of shape (n_factors, 1)
@@ -89,9 +89,9 @@ def significance(
8989
... )
9090
>>> output = f_ishigami(inputs.T)
9191
92-
We can now pass our inputs and outputs to the `significance` function:
92+
We can now pass our inputs and outputs to the `sensitivity_indices` function:
9393
94-
>>> res = sd.significance(inputs=inputs, output=output)
94+
>>> res = sd.sensitivity_indices(inputs=inputs, output=output)
9595
>>> res.si
9696
array([0.43157591, 0.44241433, 0.11767249])
9797
@@ -168,4 +168,4 @@ def significance(
168168
soe = np.where(soe == 0, soe.T, soe)
169169
si[i] = foe[i] + soe[:, i].sum() / 2
170170

171-
return SignificanceResult(si, foe, soe)
171+
return SensitivityAnalysisResult(si, foe, soe)

src/simdec/workflow.py

Lines changed: 0 additions & 55 deletions
This file was deleted.

tests/test_decomposition.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ def test_decomposition():
1616
output_name, *v_names = list(data.columns)
1717
inputs, output = data[v_names], data[output_name]
1818
si = np.array([0.04, 0.50, 0.11, 0.28])
19-
res = sd.decomposition(inputs=inputs, output=output, significance=si)
19+
res = sd.decomposition(inputs=inputs, output=output, sensitivity_indices=si)
2020

2121
assert res.var_names == ["sigma_res", "R", "Rp0.2", "Kf"]
2222
assert res.states == [2, 2, 2, 2]
Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ def ishigami_ref_indices():
4747
return s_first, s_second, s_total
4848

4949

50-
def test_significance(ishigami_ref_indices):
50+
def test_sensitivity_indices(ishigami_ref_indices):
5151
rng = np.random.default_rng(1655943881803900660874135192647741156)
5252
n_dim = 3
5353

@@ -57,7 +57,7 @@ def test_significance(ishigami_ref_indices):
5757
)
5858
output = f_ishigami(inputs.T)
5959

60-
res = sd.significance(inputs=inputs, output=output)
60+
res = sd.sensitivity_indices(inputs=inputs, output=output)
6161

6262
assert res.si.shape == (3,)
6363
assert res.first_order.shape == (3,)
@@ -83,12 +83,12 @@ def test_significance(ishigami_ref_indices):
8383
),
8484
],
8585
)
86-
def test_significance_dataset(fname, foe_ref, si_ref):
86+
def test_sensitivity_indices_dataset(fname, foe_ref, si_ref):
8787
data = pd.read_csv(fname)
8888
output_name, *v_names = list(data.columns)
8989
inputs, output = data[v_names], data[output_name]
9090

91-
res = sd.significance(inputs=inputs, output=output)
91+
res = sd.sensitivity_indices(inputs=inputs, output=output)
9292

9393
npt.assert_allclose(res.first_order, foe_ref, atol=5e-3)
9494
npt.assert_allclose(res.si, si_ref, atol=5e-2)

0 commit comments

Comments
 (0)