Skip to content

Commit f10c19b

Browse files
Merge pull request #1298 from gooddata/snapshot-master-3b046b5d-to-rel/dev
[bot] Merge master/3b046b5d into rel/dev
2 parents cf230d5 + 3b046b5 commit f10c19b

File tree

3 files changed

+46
-9
lines changed

3 files changed

+46
-9
lines changed

packages/gooddata-pandas/src/gooddata_pandas/dataframe.py

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# (C) 2021 GoodData Corporation
22
from __future__ import annotations
33

4-
from typing import Callable, Optional, Union
4+
from typing import Callable, Literal, Optional, Union
55

66
import pandas
77
from gooddata_api_client import models
@@ -259,6 +259,7 @@ def for_created_visualization(
259259
on_execution_submitted: Optional[Callable[[Execution], None]] = None,
260260
is_cancellable: bool = False,
261261
optimized: bool = False,
262+
grand_totals_position: Optional[Literal["pinnedBottom", "pinnedTop", "bottom", "top"]] = "bottom",
262263
) -> tuple[pandas.DataFrame, DataFrameMetadata]:
263264
"""
264265
Creates a data frame using a created visualization.
@@ -272,6 +273,9 @@ def for_created_visualization(
272273
headers in memory as lists of dicts, which can consume a lot of memory for large results.
273274
Optimized accumulator stores only unique values and story only reference to them in the list,
274275
which can significantly reduce memory usage.
276+
grand_totals_position (Literal["pinnedBottom", "pinnedTop", "bottom", "top"], optional):
277+
Position where grand totals should be placed. "pinnedBottom" and "bottom" append totals,
278+
"pinnedTop" and "top" prepend totals. Defaults to "bottom".
275279
276280
Returns:
277281
pandas.DataFrame: A DataFrame instance.
@@ -283,6 +287,7 @@ def for_created_visualization(
283287
exec_def=execution_definition,
284288
on_execution_submitted=on_execution_submitted,
285289
optimized=optimized,
290+
grand_totals_position=grand_totals_position,
286291
)
287292

288293
def result_cache_metadata_for_exec_result_id(self, result_id: str) -> ResultCacheMetadata:
@@ -306,6 +311,7 @@ def for_exec_def(
306311
page_size: int = _DEFAULT_PAGE_SIZE,
307312
on_execution_submitted: Optional[Callable[[Execution], None]] = None,
308313
optimized: bool = False,
314+
grand_totals_position: Optional[Literal["pinnedBottom", "pinnedTop", "bottom", "top"]] = "bottom",
309315
) -> tuple[pandas.DataFrame, DataFrameMetadata]:
310316
"""
311317
Creates a data frame using an execution definition.
@@ -342,6 +348,9 @@ def for_exec_def(
342348
headers in memory as lists of dicts, which can consume a lot of memory for large results.
343349
Optimized accumulator stores only unique values and story only reference to them in the list,
344350
which can significantly reduce memory usage.
351+
grand_totals_position (Literal["pinnedBottom", "pinnedTop", "bottom", "top"], optional):
352+
Position where grand totals should be placed. "pinnedBottom" and "bottom" append totals,
353+
"pinnedTop" and "top" prepend totals. Defaults to "bottom".
345354
346355
Returns:
347356
Tuple[pandas.DataFrame, DataFrameMetadata]: Tuple holding DataFrame and DataFrame metadata.
@@ -363,6 +372,7 @@ def for_exec_def(
363372
result_size_bytes_limit=result_size_bytes_limit,
364373
page_size=page_size,
365374
optimized=optimized,
375+
grand_totals_position=grand_totals_position,
366376
)
367377

368378
def for_exec_result_id(
@@ -376,6 +386,7 @@ def for_exec_result_id(
376386
use_primary_labels_in_attributes: bool = False,
377387
page_size: int = _DEFAULT_PAGE_SIZE,
378388
optimized: bool = False,
389+
grand_totals_position: Optional[Literal["pinnedBottom", "pinnedTop", "bottom", "top"]] = "bottom",
379390
) -> tuple[pandas.DataFrame, DataFrameMetadata]:
380391
"""
381392
Retrieves a DataFrame and DataFrame metadata for a given execution result identifier.
@@ -410,6 +421,9 @@ def for_exec_result_id(
410421
headers in memory as lists of dicts, which can consume a lot of memory for large results.
411422
Optimized accumulator stores only unique values and story only reference to them in the list,
412423
which can significantly reduce memory usage.
424+
grand_totals_position (Literal["pinnedBottom", "pinnedTop", "bottom", "top"], optional):
425+
Position where grand totals should be placed. "pinnedBottom" and "bottom" append totals,
426+
"pinnedTop" and "top" prepend totals. Defaults to "bottom".
413427
414428
Returns:
415429
Tuple[pandas.DataFrame, DataFrameMetadata]: Tuple holding DataFrame and DataFrame metadata.
@@ -436,4 +450,5 @@ def for_exec_result_id(
436450
use_primary_labels_in_attributes=use_primary_labels_in_attributes,
437451
page_size=page_size,
438452
optimized=optimized,
453+
grand_totals_position=grand_totals_position,
439454
)

packages/gooddata-pandas/src/gooddata_pandas/result_convertor.py

Lines changed: 25 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
from abc import ABC, abstractmethod
33
from collections.abc import Iterator
44
from functools import cached_property
5-
from typing import Any, Callable, Optional, Union, cast
5+
from typing import Any, Callable, Literal, Optional, Union, cast
66

77
import pandas
88
from attrs import define, field, frozen
@@ -698,31 +698,46 @@ def _headers_to_index(
698698
), primary_attribute_labels_mapping
699699

700700

701-
def _merge_grand_totals_into_data(extract: _DataWithHeaders) -> Union[_DataArray, list[_DataArray]]:
701+
def _merge_grand_totals_into_data(
702+
extract: _DataWithHeaders,
703+
grand_totals_position: Optional[Literal["pinnedBottom", "pinnedTop", "bottom", "top"]] = "bottom",
704+
) -> Union[_DataArray, list[_DataArray]]:
702705
"""
703706
Merges grand totals into the extracted data. This function will mutate the extracted data,
704707
extending the rows and columns with grand totals. Going with mutation here so as not to copy arrays around.
705708
706709
Args:
707710
extract (_DataWithHeaders): Extracted data with headers and grand totals.
711+
grand_totals_position (Literal["pinnedBottom", "pinnedTop", "bottom", "top"], optional):
712+
Position where grand totals should be placed. "pinnedBottom" and "bottom" append totals,
713+
"pinnedTop" and "top" prepend totals. Defaults to "bottom".
708714
709715
Returns:
710716
Union[_DataArray, List[_DataArray]]: Mutated data with rows and columns extended with grand totals.
711717
"""
712718
data: list[_DataArray] = extract.data
719+
# Treat None as "bottom" as a fallback
720+
if grand_totals_position is None:
721+
grand_totals_position = "bottom"
722+
# Determine if grand totals should be prepended or appended
723+
should_prepend = grand_totals_position in ("pinnedTop", "top")
713724

714725
if extract.grand_totals[0] is not None:
715726
# column totals are computed into extra rows, one row per column total
716-
# add those rows at the end of the data rows
717-
data.extend(extract.grand_totals[0])
727+
# add those rows at the beginning or end of the data rows based on position
728+
if should_prepend:
729+
data[:0] = extract.grand_totals[0]
730+
else:
731+
data.extend(extract.grand_totals[0])
718732

719733
if extract.grand_totals[1] is not None:
720734
# row totals are computed into extra columns that should be appended to
721-
# existing data rows
735+
# existing data rows (column position doesn't change for row totals)
722736
for row_idx, cols_to_append in enumerate(extract.grand_totals[1]):
723737
data[row_idx].extend(cols_to_append)
724738

725739
return data
740+
return data
726741

727742

728743
def _merge_grand_total_headers_into_headers(extract: _DataWithHeaders) -> _HeadersByAxis:
@@ -757,6 +772,7 @@ def convert_execution_response_to_dataframe(
757772
use_primary_labels_in_attributes: bool = False,
758773
page_size: int = _DEFAULT_PAGE_SIZE,
759774
optimized: bool = False,
775+
grand_totals_position: Optional[Literal["pinnedBottom", "pinnedTop", "bottom", "top"]] = "bottom",
760776
) -> tuple[pandas.DataFrame, DataFrameMetadata]:
761777
"""
762778
Converts execution result to a pandas dataframe, maintaining the dimensionality of the result.
@@ -776,6 +792,9 @@ def convert_execution_response_to_dataframe(
776792
headers in memory as lists of dicts, which can consume a lot of memory for large results.
777793
Optimized accumulator stores only unique values and story only reference to them in the list,
778794
which can significantly reduce memory usage.
795+
grand_totals_position (Literal["pinnedBottom", "pinnedTop", "bottom", "top"], optional):
796+
Position where grand totals should be placed. "pinnedBottom" and "bottom" append totals,
797+
"pinnedTop" and "top" prepend totals. Defaults to "bottom".
779798
780799
Returns:
781800
Tuple[pandas.DataFrame, DataFrameMetadata]: A tuple containing the created dataframe and its metadata.
@@ -789,7 +808,7 @@ def convert_execution_response_to_dataframe(
789808
optimized=optimized,
790809
)
791810

792-
full_data = _merge_grand_totals_into_data(extract)
811+
full_data = _merge_grand_totals_into_data(extract=extract, grand_totals_position=grand_totals_position)
793812
full_headers = _merge_grand_total_headers_into_headers(extract)
794813

795814
index, primary_labels_from_index = _headers_to_index(

packages/gooddata-pandas/tests/dataframe/test_dataframe_for_exec_def.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# (C) 2022 GoodData Corporation
22
from pathlib import Path
3-
from typing import Optional
3+
from typing import Literal, Optional
44

55
import pytest
66
from gooddata_pandas import DataFrameFactory
@@ -31,9 +31,12 @@ def _run_and_validate_results(
3131
expected_column_totals: Optional[list[list[int]]] = None,
3232
page_size: int = 100,
3333
optimized: bool = False,
34+
grand_totals_position: Optional[Literal["pinnedBottom", "pinnedTop", "bottom", "top"]] = "bottom",
3435
) -> str:
3536
# generate dataframe from exec_def
36-
result, result_metadata = gdf.for_exec_def(exec_def=exec_def, page_size=page_size)
37+
result, result_metadata = gdf.for_exec_def(
38+
exec_def=exec_def, page_size=page_size, grand_totals_position=grand_totals_position
39+
)
3740
assert result.values.shape == expected
3841

3942
# use result ID from computation above and generate dataframe just from it

0 commit comments

Comments
 (0)