From 2edfee4e736eb77821b052353787850103584374 Mon Sep 17 00:00:00 2001 From: Tenzin Choedon Date: Thu, 8 Jan 2026 09:31:36 -0700 Subject: [PATCH 1/9] CoDICE: getting non-DE production ready --- imap_processing/codice/codice_l1a.py | 13 +- .../codice_packet_definition.xml | 247 ++++++++++++------ 2 files changed, 172 insertions(+), 88 deletions(-) diff --git a/imap_processing/codice/codice_l1a.py b/imap_processing/codice/codice_l1a.py index 5bc0a3a9a3..732efc3875 100644 --- a/imap_processing/codice/codice_l1a.py +++ b/imap_processing/codice/codice_l1a.py @@ -7,7 +7,6 @@ from imap_processing import imap_module_directory from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes -from imap_processing.codice.codice_l1a_de import l1a_direct_event from imap_processing.codice.codice_l1a_hi_counters_aggregated import ( l1a_hi_counters_aggregated, ) @@ -87,11 +86,15 @@ def process_l1a( # noqa: PLR0912 logger.info("Processing Hi Sectored Species Counts") datasets.append(l1a_hi_sectored(datasets_by_apid[apid], lut_file)) elif apid == CODICEAPID.COD_HI_PHA: - logger.info("Processing Direct Events for Hi") - datasets.append(l1a_direct_event(datasets_by_apid[apid], apid=apid)) + logger.info("Skip processing Direct Events for Hi") + continue + # TODO: undo this in coming DE segmented work + # datasets.append(l1a_direct_event(datasets_by_apid[apid], apid=apid)) elif apid == CODICEAPID.COD_LO_PHA: - logger.info("Processing Direct Events for Lo") - datasets.append(l1a_direct_event(datasets_by_apid[apid], apid=apid)) + logger.info("Skip processing Direct Events for Lo") + continue + # TODO: undo this in coming DE segmented work + # datasets.append(l1a_direct_event(datasets_by_apid[apid], apid=apid)) elif apid in [ CODICEAPID.COD_LO_SW_PRIORITY_COUNTS, CODICEAPID.COD_LO_NSW_PRIORITY_COUNTS, diff --git a/imap_processing/codice/packet_definitions/codice_packet_definition.xml b/imap_processing/codice/packet_definitions/codice_packet_definition.xml index a50eb3d4b6..ea88c7a756 100644 --- a/imap_processing/codice/packet_definitions/codice_packet_definition.xml +++ b/imap_processing/codice/packet_definitions/codice_packet_definition.xml @@ -1,6 +1,6 @@ - + @@ -45,7 +45,7 @@ - + @@ -297,8 +297,17 @@ + + + + + + + + + - + @@ -358,7 +367,7 @@ - + @@ -439,7 +448,8 @@ - + + @@ -567,46 +577,28 @@ - - - - - - - + - - - - - - - + - - - - - - - + - - - - - - - + - + + + + + + + @@ -615,7 +607,8 @@ - + + @@ -624,7 +617,8 @@ - + + @@ -633,7 +627,8 @@ - + + @@ -688,7 +683,7 @@ - + @@ -697,8 +692,7 @@ - - + @@ -707,8 +701,7 @@ - - + @@ -717,7 +710,7 @@ - + @@ -726,7 +719,7 @@ - + @@ -796,19 +789,19 @@ - + - + - + - + - + @@ -835,15 +828,55 @@ - + - + + + + + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -1115,7 +1148,7 @@ - + @@ -1145,7 +1178,13 @@ - + + + + + + + @@ -1154,7 +1193,13 @@ - + + + + + + + @@ -1163,7 +1208,13 @@ - + + + + + + + @@ -1172,7 +1223,13 @@ - + + + + + + + @@ -1181,7 +1238,13 @@ - + + + + + + + @@ -1737,7 +1800,6 @@ - @@ -3104,6 +3166,12 @@ + + INDICATES THE CURRENT OPERATIONAL STATE OF THE LO ESA SWEEP: +- NORMAL - BOTH ESAS ARE TRACKING TOGETHER +- RGFO - REDUCED GAIN FACTOR OPERATION; ESA-A IS REDUCED IN ORDER TO REDUCE THE GAIN FACTOR AND ALLOW FEWER IONS INTO THE DETECTOR +- NSO - NO SCAN OPERATION; BOTH ESAS ARE RETURNED TO A HIGH-ENERGY SETTING AND NO SCANNING IS DONE FOR THE REMAINDER OF THE ESA SWEEP + @@ -3127,17 +3195,27 @@ - + - - - - - - - - - + + ALARM PERSISTENCE = 3 IN OASIS + + + ALARM PERSISTENCE = 3 IN OASIS + + + ALARM PERSISTENCE = 3 IN OASIS + + + ALARM PERSISTENCE = 3 IN OASIS + + + ALARM PERSISTENCE = 3 IN OASIS + + + + + @@ -3159,11 +3237,13 @@ - - - - - + + + + + + EACH BIT INDICATES WHETHER THE CORRESPONDING MACRO IS CURRENTLY RUNNING (E.G. BIT 1 WILL BE SET IF MACRO 1 IS RUNNING) + INDICATES WHETHER ANY CATEGORY 1 LIMITS HAVE TRIGGERED. @@ -3194,7 +3274,7 @@ INDICATES WHETHER THE MOST RECENT TRIGGER WAS A MINIMUM OR MAXIMUM LIMIT - INDICATES THE ID OF THE MOST RECENT FDC TRIGGER + INDICATES THE TABLE INDEX OF THE MOST RECENT FDC TRIGGER INDICATES THE ACTION THAT WAS TAKEN FOR THE MOST RECENT FDC TRIGGER @@ -3208,7 +3288,7 @@ INDICATES WHETHER FSW CONTROL OF THE OPERATIONAL HEATER IS ENABLED - + INDICATES THE CURRENT STATE OF THE PHYSICAL HEATER OUTPUT @@ -4491,6 +4571,7 @@ WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE + @@ -4546,11 +4627,11 @@ WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE - - - - - + + + + + @@ -4565,7 +4646,7 @@ WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE - + From 083d3a1d55faffd2356fe15a9d319bb4a789509d Mon Sep 17 00:00:00 2001 From: Tenzin Choedon Date: Thu, 8 Jan 2026 09:52:32 -0700 Subject: [PATCH 2/9] DE changes --- .../codice_packet_definition.xml | 28 ++----------------- 1 file changed, 2 insertions(+), 26 deletions(-) diff --git a/imap_processing/codice/packet_definitions/codice_packet_definition.xml b/imap_processing/codice/packet_definitions/codice_packet_definition.xml index ea88c7a756..153476ea88 100644 --- a/imap_processing/codice/packet_definitions/codice_packet_definition.xml +++ b/imap_processing/codice/packet_definitions/codice_packet_definition.xml @@ -1604,7 +1604,7 @@ - + @@ -2551,7 +2551,7 @@ - + @@ -4713,18 +4713,6 @@ WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE - - - - - - - - - - - - @@ -4989,18 +4977,6 @@ WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE - - - - - - - - - - - - From 6e14f32701dc524e5162a054d2a9a382a7ead0f6 Mon Sep 17 00:00:00 2001 From: Tenzin Choedon Date: Thu, 8 Jan 2026 10:55:11 -0700 Subject: [PATCH 3/9] bring back remaining changes --- .../codice_packet_definition.xml | 256 +++--------------- 1 file changed, 34 insertions(+), 222 deletions(-) diff --git a/imap_processing/codice/packet_definitions/codice_packet_definition.xml b/imap_processing/codice/packet_definitions/codice_packet_definition.xml index 153476ea88..47bf7415a5 100644 --- a/imap_processing/codice/packet_definitions/codice_packet_definition.xml +++ b/imap_processing/codice/packet_definitions/codice_packet_definition.xml @@ -1537,68 +1537,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -2484,68 +2422,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -3393,60 +3269,28 @@ SECONDARY HEADER - WHOLE-SECONDS PART OF SCLK - - PACKET VERSION - THIS WILL BE INCREMENTED EACH TIME THE FORMAT OF THE PACKET CHANGESCOD_LO_PHA. - - - SPIN PERIOD REPORTED BY THE SPACECRAFT IN THE TIME AND STATUS MESSAGE. REPORTED PERIOD IS THE PERIOD THAT WAS ACTIVE WHEN THE 16-SPIN ACQUISITION CYCLE STARTED. - - - FULL-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED - - - SUB-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED - - - SPARE FOR ALIGNMENT - - - BIAS GAIN MODE FOR THE SUPRATHERMAL SECTOR - - - BIAS GAIN MODE FOR THE SOLARWIND SECTOR - - - - INDICATES THAT THERE WAS SOME ERROR DETECTED DURING ACQUISITION OR PROCESSING OF THE DATA. ERRORS COULD INCLUDE CORRUPTED ACQUISITION MEMORY (I.E. EDAC ERRORS), TIMING VIOLATIONS, OR OTHER EVENTS THAT INTERRUPTED OR OTHERWISE AFFECTED DATA COLLECTION. - - - WHETHER THE EVENT DATA IS COMPRESSED. IF 1/YES, EVENT_DATA ARRAY IS COMPRESSED USING THE LZMA COMPRESSION ALGORITHM. - - - NUMBER OF EVENTS SELECTED FOR DOWNLINK (I.E. NUMBER OF EVENTS IN THE EVENT_DATA ARRAY) - - - NUMBER OF BYTES IN THE EVENT_DATA ARRAY. IF COMPRESSED, THIS VALUE REPRESENTS THE LENGTH OF THE COMPRESSED DATA. - - OPTIONALLY COMPRESSED ARRAY OF EVENT DATA + + OPTIONALLY COMPRESSED ARRAY OF EVENT DATA -FORMAT IS TBD; SOME CONSIDERATIONS/OPTIONS: -- FULL EVENTS HAVE A LOT OF REDUNDANT DATA (E.G. WILL HAVE MANY EVENTS WITH THE SAME PRIORITY/E-STEP/SPIN PHASE INFORMATION). HOW WELL DOES COMPRESSION TO DEAL WITH THE REDUNDANCY? -- COULD INCLUDE MINI-HEADERS FOR EACH (PRIORITY,E-STEP, SPIN-PHASE) GROUP AND STRIP THE REDUNDANT DATA FROM THE EVENTS -- SHOULD EVENTS BE TIGHTLY PACKED, OR CAN WE PAD OUT TO 64-BIT WORD BOUNDARIES? HOW WELL DOES COMPRESSION COMPENSATE FOR THE EXTRA BITS? + FORMAT IS TBD; SOME CONSIDERATIONS/OPTIONS: + - FULL EVENTS HAVE A LOT OF REDUNDANT DATA (E.G. WILL HAVE MANY EVENTS WITH THE SAME PRIORITY/E-STEP/SPIN PHASE INFORMATION). HOW WELL DOES COMPRESSION TO DEAL WITH THE REDUNDANCY? + - COULD INCLUDE MINI-HEADERS FOR EACH (PRIORITY,E-STEP, SPIN-PHASE) GROUP AND STRIP THE REDUNDANT DATA FROM THE EVENTS + - SHOULD EVENTS BE TIGHTLY PACKED, OR CAN WE PAD OUT TO 64-BIT WORD BOUNDARIES? HOW WELL DOES COMPRESSION COMPENSATE FOR THE EXTRA BITS? -EACH EVENT CONSISTS OF: -- 7-BIT E-STEP -- 10-BIT TOF -- 9-BIT APD ENERGY -- 7-BIT SPIN ANGLE -- 5-BIT POSITION -- 5-BIT APD-ID -- 1-BIT APD-GAIN -- 2-BIT PHA TYPE -- 3-BIT PRIORITY RANGE + EACH EVENT CONSISTS OF: + - 7-BIT E-STEP + - 10-BIT TOF + - 9-BIT APD ENERGY + - 7-BIT SPIN ANGLE + - 5-BIT POSITION + - 5-BIT APD-ID + - 1-BIT APD-GAIN + - 2-BIT PHA TYPE + - 3-BIT PRIORITY RANGE -TBD: EVENTS MAY BE TIGHTLY PACKED, OR MAY HAVE SPARES ADDED TO KEEP EACH EVENT BYTE-ALIGNED. IN EITHER CASE, THERE MAY BE UP TO 1 BYTE OF PADDING TO KEEP THE TOTAL SIZE OF THE PACKET EVEN. + TBD: EVENTS MAY BE TIGHTLY PACKED, OR MAY HAVE SPARES ADDED TO KEEP EACH EVENT BYTE-ALIGNED. IN EITHER CASE, THERE MAY BE UP TO 1 BYTE OF PADDING TO KEEP THE TOTAL SIZE OF THE PACKET EVEN. + PACKET CHECKSUM @@ -4079,58 +3923,26 @@ WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE SECONDARY HEADER - WHOLE-SECONDS PART OF SCLK - - PACKET VERSION - THIS WILL BE INCREMENTED EACH TIME THE FORMAT OF THE PACKET CHANGESCOD_LO_PHA. - - - SPIN PERIOD REPORTED BY THE SPACECRAFT IN THE TIME AND STATUS MESSAGE. REPORTED PERIOD IS THE PERIOD THAT WAS ACTIVE WHEN THE 16-SPIN ACQUISITION CYCLE STARTED. - - - FULL-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED - - - SUB-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED - - - SPARE FOR ALIGNMENT - - - BIAS GAIN MODE FOR THE SUPRATHERMAL SECTOR - - - BIAS GAIN MODE FOR THE SOLARWIND SECTOR - - - - INDICATES THAT THERE WAS SOME ERROR DETECTED DURING ACQUISITION OR PROCESSING OF THE DATA. ERRORS COULD INCLUDE CORRUPTED ACQUISITION MEMORY (I.E. EDAC ERRORS), TIMING VIOLATIONS, OR OTHER EVENTS THAT INTERRUPTED OR OTHERWISE AFFECTED DATA COLLECTION. - - - WHETHER THE EVENT DATA IS COMPRESSED. IF 1/YES, EVENT_DATA ARRAY IS COMPRESSED USING THE RICE COMPRESSION ALGORITHM. - - - NUMBER OF EVENTS SELECTED FOR DOWNLINK (I.E. NUMBER OF EVENTS IN THE EVENT_DATA ARRAY) - - - NUMBER OF BYTES IN THE EVENT_DATA ARRAY. IF COMPRESSED, THIS VALUE REPRESENTS THE LENGTH OF THE COMPRESSED DATA. - - OPTIONALLY COMPRESSED ARRAY OF EVENT DATA + + OPTIONALLY COMPRESSED ARRAY OF EVENT DATA -FORMAT IS TBD; SOME CONSIDERATIONS/OPTIONS: -- FULL EVENTS HAVE A LOT OF REDUNDANT DATA (E.G. WILL HAVE MANY EVENTS WITH THE SAME PRIORITY/SPIN/SPIN PHASE INFORMATION). HOW WELL DOES COMPRESSION TO DEAL WITH THE REDUNDANCY? -- COULD INCLUDE MINI-HEADERS FOR EACH (PRIORITY,SPIN, SPIN-PHASE) GROUP AND STRIP THE REDUNDANT DATA FROM THE EVENTS -- SHOULD EVENTS BE TIGHTLY PACKED, OR CAN WE PAD OUT TO 64-BIT WORD BOUNDARIES? HOW WELL DOES COMPRESSION COMPENSATE FOR THE EXTRA BITS? + FORMAT IS TBD; SOME CONSIDERATIONS/OPTIONS: + - FULL EVENTS HAVE A LOT OF REDUNDANT DATA (E.G. WILL HAVE MANY EVENTS WITH THE SAME PRIORITY/SPIN/SPIN PHASE INFORMATION). HOW WELL DOES COMPRESSION TO DEAL WITH THE REDUNDANCY? + - COULD INCLUDE MINI-HEADERS FOR EACH (PRIORITY,SPIN, SPIN-PHASE) GROUP AND STRIP THE REDUNDANT DATA FROM THE EVENTS + - SHOULD EVENTS BE TIGHTLY PACKED, OR CAN WE PAD OUT TO 64-BIT WORD BOUNDARIES? HOW WELL DOES COMPRESSION COMPENSATE FOR THE EXTRA BITS? -EACH EVENT CONSISTS OF: -- 10-BIT TOF -- 9-BIT SSD ENERGY -- 2-BIT ENERGY RANGE -- 7-BIT SPIN ANGLE -- 4-BIT SSD POSITION -- 4-BIT SPIN NUMBER -- 2-BIT PHA TYPE + EACH EVENT CONSISTS OF: + - 10-BIT TOF + - 9-BIT SSD ENERGY + - 2-BIT ENERGY RANGE + - 7-BIT SPIN ANGLE + - 4-BIT SSD POSITION + - 4-BIT SPIN NUMBER + - 2-BIT PHA TYPE -TBD: EVENTS MAY BE TIGHTLY PACKED, OR MAY HAVE SPARES ADDED TO KEEP EACH EVENT BYTE-ALIGNED. IN EITHER CASE, THERE MAY BE UP TO 1 BYTE OF PADDING TO KEEP THE TOTAL SIZE OF THE PACKET EVEN. + TBD: EVENTS MAY BE TIGHTLY PACKED, OR MAY HAVE SPARES ADDED TO KEEP EACH EVENT BYTE-ALIGNED. IN EITHER CASE, THERE MAY BE UP TO 1 BYTE OF PADDING TO KEEP THE TOTAL SIZE OF THE PACKET EVEN. + PACKET CHECKSUM From 807e7a9821e71b38a8a8ce55f2b699ff38d12ee9 Mon Sep 17 00:00:00 2001 From: Luisa Date: Thu, 8 Jan 2026 14:05:12 -0700 Subject: [PATCH 4/9] mark direct event validation tests with xfail --- imap_processing/tests/codice/test_codice_l1a.py | 2 ++ imap_processing/tests/codice/test_codice_l2.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/imap_processing/tests/codice/test_codice_l1a.py b/imap_processing/tests/codice/test_codice_l1a.py index 03bc6fc436..e96096d912 100644 --- a/imap_processing/tests/codice/test_codice_l1a.py +++ b/imap_processing/tests/codice/test_codice_l1a.py @@ -654,6 +654,7 @@ def test_hi_priority(mock_get_file_paths, codice_lut_path): ) +@pytest.mark.xfail(reason="Known issue with l1a HI Direct Event processing") @patch("imap_data_access.processing_input.ProcessingInputCollection.get_file_paths") def test_lo_direct_events(mock_get_file_paths, codice_lut_path): """Tests lo-direct-events.""" @@ -704,6 +705,7 @@ def test_lo_direct_events(mock_get_file_paths, codice_lut_path): ) +@pytest.mark.xfail(reason="Known issue with l1a HI Direct Event processing") @patch("imap_data_access.processing_input.ProcessingInputCollection.get_file_paths") def test_hi_direct_events(mock_get_file_paths, codice_lut_path): """Tests hi-direct-events.""" diff --git a/imap_processing/tests/codice/test_codice_l2.py b/imap_processing/tests/codice/test_codice_l2.py index 01ae6227ec..becc34cb39 100644 --- a/imap_processing/tests/codice/test_codice_l2.py +++ b/imap_processing/tests/codice/test_codice_l2.py @@ -524,6 +524,7 @@ def test_codice_l2_sw_angular_intensity(mock_get_file_paths, codice_lut_path): write_cdf(processed_2_ds) +@pytest.mark.xfail(reason="Known issue with l1a HI Direct Event processing") @patch("imap_data_access.processing_input.ProcessingInputCollection.get_file_paths") def test_codice_l2_lo_de(mock_get_file_paths, codice_lut_path): mock_get_file_paths.side_effect = [ @@ -586,6 +587,7 @@ def test_codice_l2_lo_de(mock_get_file_paths, codice_lut_path): load_cdf(file) +@pytest.mark.xfail(reason="Known issue with l1a HI Direct Event processing") @patch("imap_data_access.processing_input.ProcessingInputCollection.get_file_paths") def test_codice_l2_hi_de(mock_get_file_paths, codice_lut_path): mock_get_file_paths.side_effect = [ From 264ef86e4ff0815a10b7dcbfe986e5ba2466ded8 Mon Sep 17 00:00:00 2001 From: Luisa Date: Fri, 9 Jan 2026 16:17:08 -0700 Subject: [PATCH 5/9] initialize nans in array --- imap_processing/codice/utils.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/imap_processing/codice/utils.py b/imap_processing/codice/utils.py index 581044f1e8..45ebc29282 100644 --- a/imap_processing/codice/utils.py +++ b/imap_processing/codice/utils.py @@ -360,7 +360,9 @@ def get_codice_epoch_time( return center_times_seconds, delta_times -def calculate_acq_time_per_step(low_stepping_tab: dict) -> np.ndarray: +def calculate_acq_time_per_step( + low_stepping_tab: dict, esa_step_dim: int = 128 +) -> np.ndarray: """ Calculate acquisition time per step from low stepping table. @@ -368,6 +370,8 @@ def calculate_acq_time_per_step(low_stepping_tab: dict) -> np.ndarray: ---------- low_stepping_tab : dict The low stepping table from the SCI-LUT JSON. + esa_step_dim : int + The ESA step dimension size. Returns ------- @@ -397,10 +401,11 @@ def calculate_acq_time_per_step(low_stepping_tab: dict) -> np.ndarray: hv_settle_per_step = np.minimum( np.maximum(non_adjusted_hv_settle_per_step, min_hv_settle_ms), max_hv_settle_ms ) - + # initialize array of nans for acquisition time per step + acq_time_per_step = np.full(esa_step_dim, np.nan, dtype=np.float64) # acquisition time per step in milliseconds # sector_time - sector_margin_ms / num_steps - hv_settle_per_step - acq_time_per_step = ( + acq_time_per_step[: len(num_steps_data)] = ( (sector_time - sector_margin_ms) / num_steps_data ) - hv_settle_per_step # Convert to seconds From b6a90a9e3ce1f1b045f3e5ccf77ef3029dc6be8e Mon Sep 17 00:00:00 2001 From: Luisa Date: Tue, 13 Jan 2026 16:15:49 -0700 Subject: [PATCH 6/9] rename var and add fillvals where half spin is greater than nso_half_spin --- .../imap_codice_l1a_variable_attrs.yaml | 3 ++- .../codice/codice_l1a_lo_angular.py | 15 ++++++++--- .../codice_l1a_lo_counters_aggregated.py | 15 ++++++++--- .../codice/codice_l1a_lo_counters_singles.py | 15 ++++++++--- .../codice/codice_l1a_lo_priority.py | 15 ++++++++--- .../codice/codice_l1a_lo_species.py | 26 ++++++++++++++++--- imap_processing/codice/codice_l1b.py | 14 +++++----- imap_processing/codice/utils.py | 10 +++++++ imap_processing/tests/codice/conftest.py | 2 +- .../tests/ialirt/unit/test_process_codice.py | 4 +-- 10 files changed, 88 insertions(+), 31 deletions(-) diff --git a/imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml b/imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml index 5df4b9f553..e79c4ac751 100644 --- a/imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +++ b/imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml @@ -172,8 +172,9 @@ energy_species_label: VAR_TYPE: metadata # <=== Dataset Variable Attributes ===> -acquisition_time_per_step: +acquisition_time_per_esa_step: CATDESC: Acquisition time for each step of energy + DEPEND_0: epoch DEPEND_1: esa_step FIELDNAM: Acquisition Time FILLVAL: *real_fillval diff --git a/imap_processing/codice/codice_l1a_lo_angular.py b/imap_processing/codice/codice_l1a_lo_angular.py index f9e0e6cd4e..dfeefbae63 100644 --- a/imap_processing/codice/codice_l1a_lo_angular.py +++ b/imap_processing/codice/codice_l1a_lo_angular.py @@ -309,11 +309,18 @@ def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: dims=("epoch",), attrs=cdf_attrs.get_variable_attributes("data_quality"), ) - l1a_dataset["acquisition_time_per_step"] = xr.DataArray( - calculate_acq_time_per_step(sci_lut_data["lo_stepping_tab"]), - dims=("esa_step",), + # TODO: Handle epoch dependent acquisition time per esa step + # For now, just tile the same array for all epochs. + # Eventually we may have data from a day where the LUT changed. If this is the + # case, we need to split the data by epoch and assign different acquisition times + l1a_dataset["acquisition_time_per_esa_step"] = xr.DataArray( + np.tile( + np.asarray(calculate_acq_time_per_step(sci_lut_data["lo_stepping_tab"])), + (len(epoch_center), 1), + ), + dims=("epoch", "esa_step"), attrs=cdf_attrs.get_variable_attributes( - "acquisition_time_per_step", check_schema=False + "acquisition_time_per_esa_step", check_schema=False ), ) diff --git a/imap_processing/codice/codice_l1a_lo_counters_aggregated.py b/imap_processing/codice/codice_l1a_lo_counters_aggregated.py index 0e84b2e184..d98d97856b 100644 --- a/imap_processing/codice/codice_l1a_lo_counters_aggregated.py +++ b/imap_processing/codice/codice_l1a_lo_counters_aggregated.py @@ -207,11 +207,18 @@ def l1a_lo_counters_aggregated( dims=("epoch",), attrs=cdf_attrs.get_variable_attributes("data_quality"), ) - l1a_dataset["acquisition_time_per_step"] = xr.DataArray( - calculate_acq_time_per_step(sci_lut_data["lo_stepping_tab"]), - dims=("esa_step",), + # TODO: Handle epoch dependent acquisition time per esa step + # For now, just tile the same array for all epochs. + # Eventually we may have data from a day where the LUT changed. If this is the + # case, we need to split the data by epoch and assign different acquisition times + l1a_dataset["acquisition_time_per_esa_step"] = xr.DataArray( + np.tile( + np.asarray(calculate_acq_time_per_step(sci_lut_data["lo_stepping_tab"])), + (len(epoch_center), 1), + ), + dims=("epoch", "esa_step"), attrs=cdf_attrs.get_variable_attributes( - "acquisition_time_per_step", check_schema=False + "acquisition_time_per_esa_step", check_schema=False ), ) diff --git a/imap_processing/codice/codice_l1a_lo_counters_singles.py b/imap_processing/codice/codice_l1a_lo_counters_singles.py index 624465ef01..13b5a7fe6f 100644 --- a/imap_processing/codice/codice_l1a_lo_counters_singles.py +++ b/imap_processing/codice/codice_l1a_lo_counters_singles.py @@ -216,11 +216,18 @@ def l1a_lo_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr. dims=("epoch",), attrs=cdf_attrs.get_variable_attributes("data_quality"), ) - l1a_dataset["acquisition_time_per_step"] = xr.DataArray( - calculate_acq_time_per_step(sci_lut_data["lo_stepping_tab"]), - dims=("esa_step",), + # TODO: Handle epoch dependent acquisition time per esa step + # For now, just tile the same array for all epochs. + # Eventually we may have data from a day where the LUT changed. If this is the + # case, we need to split the data by epoch and assign different acquisition times + l1a_dataset["acquisition_time_per_esa_step"] = xr.DataArray( + np.tile( + np.asarray(calculate_acq_time_per_step(sci_lut_data["lo_stepping_tab"])), + (len(epoch_center), 1), + ), + dims=("epoch", "esa_step"), attrs=cdf_attrs.get_variable_attributes( - "acquisition_time_per_step", check_schema=False + "acquisition_time_per_esa_step", check_schema=False ), ) diff --git a/imap_processing/codice/codice_l1a_lo_priority.py b/imap_processing/codice/codice_l1a_lo_priority.py index 9b28d86a96..16ac69105a 100644 --- a/imap_processing/codice/codice_l1a_lo_priority.py +++ b/imap_processing/codice/codice_l1a_lo_priority.py @@ -223,11 +223,18 @@ def l1a_lo_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: dims=("epoch",), attrs=cdf_attrs.get_variable_attributes("data_quality"), ) - l1a_dataset["acquisition_time_per_step"] = xr.DataArray( - calculate_acq_time_per_step(sci_lut_data["lo_stepping_tab"]), - dims=("esa_step",), + # TODO: Handle epoch dependent acquisition time per esa step + # For now, just tile the same array for all epochs. + # Eventually we may have data from a day where the LUT changed. If this is the + # case, we need to split the data by epoch and assign different acquisition times + l1a_dataset["acquisition_time_per_esa_step"] = xr.DataArray( + np.tile( + np.asarray(calculate_acq_time_per_step(sci_lut_data["lo_stepping_tab"])), + (len(epoch_center), 1), + ), + dims=("epoch", "esa_step"), attrs=cdf_attrs.get_variable_attributes( - "acquisition_time_per_step", check_schema=False + "acquisition_time_per_esa_step", check_schema=False ), ) diff --git a/imap_processing/codice/codice_l1a_lo_species.py b/imap_processing/codice/codice_l1a_lo_species.py index 72333c5030..27fac6db1f 100644 --- a/imap_processing/codice/codice_l1a_lo_species.py +++ b/imap_processing/codice/codice_l1a_lo_species.py @@ -22,6 +22,8 @@ logger = logging.getLogger(__name__) +UINT32_FILLVAL = 4294967294 + def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: """ @@ -120,6 +122,15 @@ def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: species_data = np.array(decompressed_data, dtype=np.uint32).reshape( num_packets, num_species, esa_steps, *collapsed_shape ) + row_numbers = np.array( + sci_lut_data["lo_stepping_tab"]["row_number"].get("data"), dtype=np.int64 + ) + # For every energy after nso_half_spin, set data to nan + nso_half_spin = unpacked_dataset["nso_half_spin"].values + mask = row_numbers > nso_half_spin[:, np.newaxis] + mask = mask[:, np.newaxis, :, np.newaxis] + mask = np.repeat(mask, num_species, 1) + species_data[mask] = UINT32_FILLVAL # ========== Get Voltage Data from LUT =========== # Use plan id and plan step to get voltage data's table_number in ESA sweep table. @@ -228,11 +239,18 @@ def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: dims=("epoch",), attrs=cdf_attrs.get_variable_attributes("data_quality"), ) - l1a_dataset["acquisition_time_per_step"] = xr.DataArray( - calculate_acq_time_per_step(sci_lut_data["lo_stepping_tab"]), - dims=("esa_step",), + # TODO: Handle epoch dependent acquisition time per esa step + # For now, just tile the same array for all epochs. + # Eventually we may have data from a day where the LUT changed. If this is the + # case, we need to split the data by epoch and assign different acquisition times + l1a_dataset["acquisition_time_per_esa_step"] = xr.DataArray( + np.tile( + np.asarray(calculate_acq_time_per_step(sci_lut_data["lo_stepping_tab"])), + (len(epoch_center), 1), + ), + dims=("epoch", "esa_step"), attrs=cdf_attrs.get_variable_attributes( - "acquisition_time_per_step", check_schema=False + "acquisition_time_per_esa_step", check_schema=False ), ) diff --git a/imap_processing/codice/codice_l1b.py b/imap_processing/codice/codice_l1b.py index 6cdc44a1a4..d32c470122 100644 --- a/imap_processing/codice/codice_l1b.py +++ b/imap_processing/codice/codice_l1b.py @@ -75,7 +75,7 @@ def convert_to_rates(dataset: xr.Dataset, descriptor: str) -> np.ndarray: ]: # Denominator to convert counts to rates denominator = ( - dataset.acquisition_time_per_step + dataset.acquisition_time_per_esa_step * constants.L1B_DATA_PRODUCT_CONFIGURATIONS[descriptor]["num_spin_sectors"] ) @@ -88,7 +88,7 @@ def convert_to_rates(dataset: xr.Dataset, descriptor: str) -> np.ndarray: "spin_period", "voltage_table", # TODO: undo this when I get new validation file from Joey - # "acquisition_time_per_step", + # "acquisition_time_per_esa_step", ] dataset = dataset.drop_vars(drop_variables) elif descriptor in [ @@ -97,16 +97,16 @@ def convert_to_rates(dataset: xr.Dataset, descriptor: str) -> np.ndarray: "lo-ialirt", ]: # Create n_sector with 'esa_step' dimension. This is done by xr.full_like - # with input dataset.acquisition_time_per_step. This ensures that the resulting - # n_sector has the same dimensions as acquisition_time_per_step. + # with input dataset.acquisition_time_per_esa_step. This ensures that the + # resulting n_sector has the same dimensions as acquisition_time_per_esa_step. # Per CoDICE, fill first 127 with default value of 12. Then fill last with 11. n_sector = xr.full_like( - dataset.acquisition_time_per_step, 12.0, dtype=np.float64 + dataset.acquisition_time_per_esa_step, 12.0, dtype=np.float64 ) n_sector[-1] = 11.0 # Denominator to convert counts to rates - denominator = dataset.acquisition_time_per_step * n_sector + denominator = dataset.acquisition_time_per_esa_step * n_sector # Do not carry these variable attributes from L1a to L1b for above products drop_variables = [ "k_factor", @@ -116,7 +116,7 @@ def convert_to_rates(dataset: xr.Dataset, descriptor: str) -> np.ndarray: "spin_period", "voltage_table", # TODO: undo this when I get new validation file from Joey - # "acquisition_time_per_step", + # "acquisition_time_per_esa_step", ] dataset = dataset.drop_vars(drop_variables) diff --git a/imap_processing/codice/utils.py b/imap_processing/codice/utils.py index 45ebc29282..d801c6df84 100644 --- a/imap_processing/codice/utils.py +++ b/imap_processing/codice/utils.py @@ -378,6 +378,16 @@ def calculate_acq_time_per_step( np.ndarray Array of acquisition times per step of shape (num_esa_steps,). """ + # TODO: Handle time-varying num_steps_data length + # The num_steps_data length can change over time (e.g., 6 → 3 steps) and is not + # constant. E.g. at a day where the LUT changes we need to handle that. Update the + # computation to: + # Use the actual length of num_steps_data at each point in time instead of assuming + # a constant value + # - Make the calculation time-varying with epoch dependency + # - Ensure values are divided by their corresponding epoch in L1B processing + # - These tunable values are used to calculate acquisition time per step + # These tunable values are used to calculate acquisition time per step tunable_values = low_stepping_tab["tunable_values"] diff --git a/imap_processing/tests/codice/conftest.py b/imap_processing/tests/codice/conftest.py index e4ca7d5abe..f49c29857f 100644 --- a/imap_processing/tests/codice/conftest.py +++ b/imap_processing/tests/codice/conftest.py @@ -9,7 +9,7 @@ TEST_L0_FILE = TEST_DATA_L0_PATH / "imap_codice_l0_raw_20241110_v001.pkts" VALIDATION_FILE_DATE = "20250814" -VALIDATION_FILE_VERSION = "v013" +VALIDATION_FILE_VERSION = "v015" @pytest.fixture(scope="session") diff --git a/imap_processing/tests/ialirt/unit/test_process_codice.py b/imap_processing/tests/ialirt/unit/test_process_codice.py index 476376475b..2217614312 100644 --- a/imap_processing/tests/ialirt/unit/test_process_codice.py +++ b/imap_processing/tests/ialirt/unit/test_process_codice.py @@ -195,9 +195,9 @@ def make_codice_lo_ialirt_dataset(cod_lo_l1a_test_data, descriptor): "k_factor": ("dim0", cod_lo_l1a_test_data["k_factor"].data), "voltage_table": ("esa_step", cod_lo_l1a_test_data["voltage_table"].data), "data_quality": ("epoch", cod_lo_l1a_test_data["data_quality"].data), - "acquisition_time_per_step": ( + "acquisition_time_per_esa_step": ( "esa_step", - cod_lo_l1a_test_data["acquisition_time_per_step"].data, + cod_lo_l1a_test_data["acquisition_time_per_esa_step"].data, ), "epoch_delta_minus": ("epoch", cod_lo_l1a_test_data["epoch_delta_minus"].data), "epoch_delta_plus": ("epoch", cod_lo_l1a_test_data["epoch_delta_plus"].data), From 8ee7fb2e6ba78f779d9934a33b3282b653712942 Mon Sep 17 00:00:00 2001 From: Luisa Date: Thu, 15 Jan 2026 07:35:43 -0700 Subject: [PATCH 7/9] mask out values where energy is below nso_half_spin --- .../imap_codice_l1a_variable_attrs.yaml | 1 + .../codice/codice_l1a_lo_angular.py | 65 +++++++++++++------ .../codice_l1a_lo_counters_aggregated.py | 45 ++++++++++--- .../codice/codice_l1a_lo_counters_singles.py | 45 ++++++++++--- .../codice/codice_l1a_lo_priority.py | 45 ++++++++++--- .../codice/codice_l1a_lo_species.py | 56 ++++++++++------ imap_processing/codice/constants.py | 2 + imap_processing/codice/utils.py | 18 ++--- 8 files changed, 200 insertions(+), 77 deletions(-) diff --git a/imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml b/imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml index e79c4ac751..61faff9ab6 100644 --- a/imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +++ b/imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml @@ -188,6 +188,7 @@ acquisition_time_per_esa_step: half_spin_per_esa_step: CATDESC: Half spin number for each step of energy + DEPEND_0: epoch DEPEND_1: esa_step FIELDNAM: Half Spin Number FILLVAL: 255 diff --git a/imap_processing/codice/codice_l1a_lo_angular.py b/imap_processing/codice/codice_l1a_lo_angular.py index dfeefbae63..c053d248c2 100644 --- a/imap_processing/codice/codice_l1a_lo_angular.py +++ b/imap_processing/codice/codice_l1a_lo_angular.py @@ -8,6 +8,7 @@ from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes from imap_processing.codice import constants +from imap_processing.codice.constants import UINT32_FILLVAL from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import ( CODICEAPID, @@ -67,28 +68,28 @@ def _despin_species_data( # index_to_position gets the position from collapse table. Eg. # [1, 2, 3, 23, 24] for SW angular angular_position = index_to_position(sci_lut_data, 0, view_tab_obj.collapse_table) - orientation_a = pixel_orientation == "A" - orientation_b = pixel_orientation == "B" + orientation_a_indices = np.where(pixel_orientation == "A")[0] + orientation_b_indices = np.where(pixel_orientation == "B")[0] # Despin data based on orientation and angular position for pos_idx, position in enumerate(angular_position): if position <= 12: # Case 1: position 0-12, orientation A, append to first half - despun_data[:, :, orientation_a, :12, pos_idx] = species_data[ - :, :, orientation_a, :, pos_idx + despun_data[:, :, orientation_a_indices, :12, pos_idx] = species_data[ + :, :, orientation_a_indices, :, pos_idx ] # Case 2: position 13-24, orientation B, append to second half - despun_data[:, :, orientation_b, 12:, pos_idx] = species_data[ - :, :, orientation_b, :, pos_idx + despun_data[:, :, orientation_b_indices, 12:, pos_idx] = species_data[ + :, :, orientation_b_indices, :, pos_idx ] else: # Case 3: position 13-24, orientation A, append to second half - despun_data[:, :, orientation_a, 12:, pos_idx] = species_data[ - :, :, orientation_a, :, pos_idx + despun_data[:, :, orientation_a_indices, 12:, pos_idx] = species_data[ + :, :, orientation_a_indices, :, pos_idx ] # Case 4: position 0-12, orientation B, append to first half - despun_data[:, :, orientation_b, :12, pos_idx] = species_data[ - :, :, orientation_b, :, pos_idx + despun_data[:, :, orientation_b_indices, :12, pos_idx] = species_data[ + :, :, orientation_b_indices, :, pos_idx ] return despun_data @@ -198,6 +199,34 @@ def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: ] voltage_data = sci_lut_data["esa_sweep_tab"][f"{esa_table_number}"] + half_spin_per_esa_step = sci_lut_data["lo_stepping_tab"]["row_number"].get("data") + # TODO: Handle epoch dependent acquisition time and half spin per esa step + # For now, just tile the same array for all epochs. + # Eventually we may have data from a day where the LUT changed. If this is the + # case, we need to split the data by epoch and assign different acquisition times + half_spin_per_esa_step = np.tile( + np.asarray(half_spin_per_esa_step), + (len(unpacked_dataset["acq_start_seconds"]), 1), + ) + # Get acquisition time per esa step + acquisition_time_per_step = calculate_acq_time_per_step( + sci_lut_data["lo_stepping_tab"] + ) + acquisition_time_per_step = np.tile( + np.asarray(acquisition_time_per_step), + (len(unpacked_dataset["acq_start_seconds"]), 1), + ) + # For every energy after nso_half_spin, set data to fill values + nso_half_spin = unpacked_dataset["nso_half_spin"].values + nso_mask = half_spin_per_esa_step > nso_half_spin[:, np.newaxis] + species_mask = nso_mask[:, np.newaxis, :, np.newaxis, np.newaxis] + species_mask = np.broadcast_to(species_mask, species_data.shape) + species_data[species_mask] = UINT32_FILLVAL + # Set half_spin_per_esa_step to 63 which is the fill value + # half_spin_per_esa_step[nso_mask] = 63 + # # Set acquisition time per esa step to nan where nso_mask is True + # acquisition_time_per_step[nso_mask] = np.nan + # ========= Get Epoch Time Data =========== # Epoch center time and delta epoch_center, deltas = get_codice_epoch_time( @@ -239,8 +268,11 @@ def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: attrs=cdf_attrs.get_variable_attributes("esa_step", check_schema=False), ), "half_spin_per_esa_step": xr.DataArray( - sci_lut_data["lo_stepping_tab"]["row_number"].get("data"), - dims=("esa_step",), + half_spin_per_esa_step, + dims=( + "epoch", + "esa_step", + ), attrs=cdf_attrs.get_variable_attributes( "half_spin_per_esa_step", check_schema=False ), @@ -309,15 +341,8 @@ def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: dims=("epoch",), attrs=cdf_attrs.get_variable_attributes("data_quality"), ) - # TODO: Handle epoch dependent acquisition time per esa step - # For now, just tile the same array for all epochs. - # Eventually we may have data from a day where the LUT changed. If this is the - # case, we need to split the data by epoch and assign different acquisition times l1a_dataset["acquisition_time_per_esa_step"] = xr.DataArray( - np.tile( - np.asarray(calculate_acq_time_per_step(sci_lut_data["lo_stepping_tab"])), - (len(epoch_center), 1), - ), + acquisition_time_per_step, dims=("epoch", "esa_step"), attrs=cdf_attrs.get_variable_attributes( "acquisition_time_per_esa_step", check_schema=False diff --git a/imap_processing/codice/codice_l1a_lo_counters_aggregated.py b/imap_processing/codice/codice_l1a_lo_counters_aggregated.py index d98d97856b..dafcbdb4a7 100644 --- a/imap_processing/codice/codice_l1a_lo_counters_aggregated.py +++ b/imap_processing/codice/codice_l1a_lo_counters_aggregated.py @@ -8,6 +8,7 @@ from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes from imap_processing.codice import constants +from imap_processing.codice.constants import UINT32_FILLVAL from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import ( ViewTabInfo, @@ -114,6 +115,34 @@ def l1a_lo_counters_aggregated( -1, esa_step, num_variables, spin_sector_pairs ) + half_spin_per_esa_step = sci_lut_data["lo_stepping_tab"]["row_number"].get("data") + # TODO: Handle epoch dependent acquisition time and half spin per esa step + # For now, just tile the same array for all epochs. + # Eventually we may have data from a day where the LUT changed. If this is the + # case, we need to split the data by epoch and assign different acquisition times + half_spin_per_esa_step = np.tile( + np.asarray(half_spin_per_esa_step), + (len(unpacked_dataset["acq_start_seconds"]), 1), + ) + # Get acquisition time per esa step + acquisition_time_per_step = calculate_acq_time_per_step( + sci_lut_data["lo_stepping_tab"] + ) + acquisition_time_per_step = np.tile( + np.asarray(acquisition_time_per_step), + (len(unpacked_dataset["acq_start_seconds"]), 1), + ) + # For every energy after nso_half_spin, set data to fill values + nso_half_spin = unpacked_dataset["nso_half_spin"].values + nso_mask = half_spin_per_esa_step > nso_half_spin[:, np.newaxis] + counters_mask = nso_mask[:, :, np.newaxis, np.newaxis] + counters_mask = np.broadcast_to(counters_mask, counters_data.shape) + counters_data[counters_mask] = UINT32_FILLVAL + # Set half_spin_per_esa_step to 63 which is the fill value + # half_spin_per_esa_step[nso_mask] = 63 + # # Set acquisition time per esa step to nan where nso_mask is True + # acquisition_time_per_step[nso_mask] = np.nan + # ========= Get Epoch Time Data =========== # Epoch center time and delta epoch_center, deltas = get_codice_epoch_time( @@ -155,8 +184,11 @@ def l1a_lo_counters_aggregated( attrs=cdf_attrs.get_variable_attributes("esa_step", check_schema=False), ), "half_spin_per_esa_step": xr.DataArray( - sci_lut_data["lo_stepping_tab"]["row_number"].get("data"), - dims=("esa_step",), + half_spin_per_esa_step, + dims=( + "epoch", + "esa_step", + ), attrs=cdf_attrs.get_variable_attributes( "half_spin_per_esa_step", check_schema=False ), @@ -207,15 +239,8 @@ def l1a_lo_counters_aggregated( dims=("epoch",), attrs=cdf_attrs.get_variable_attributes("data_quality"), ) - # TODO: Handle epoch dependent acquisition time per esa step - # For now, just tile the same array for all epochs. - # Eventually we may have data from a day where the LUT changed. If this is the - # case, we need to split the data by epoch and assign different acquisition times l1a_dataset["acquisition_time_per_esa_step"] = xr.DataArray( - np.tile( - np.asarray(calculate_acq_time_per_step(sci_lut_data["lo_stepping_tab"])), - (len(epoch_center), 1), - ), + acquisition_time_per_step, dims=("epoch", "esa_step"), attrs=cdf_attrs.get_variable_attributes( "acquisition_time_per_esa_step", check_schema=False diff --git a/imap_processing/codice/codice_l1a_lo_counters_singles.py b/imap_processing/codice/codice_l1a_lo_counters_singles.py index 13b5a7fe6f..b19ee6e0c9 100644 --- a/imap_processing/codice/codice_l1a_lo_counters_singles.py +++ b/imap_processing/codice/codice_l1a_lo_counters_singles.py @@ -8,6 +8,7 @@ from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes from imap_processing.codice import constants +from imap_processing.codice.constants import UINT32_FILLVAL from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import ( ViewTabInfo, @@ -111,6 +112,34 @@ def l1a_lo_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr. .transpose(0, 1, 3, 2) ) + half_spin_per_esa_step = sci_lut_data["lo_stepping_tab"]["row_number"].get("data") + # TODO: Handle epoch dependent acquisition time and half spin per esa step + # For now, just tile the same array for all epochs. + # Eventually we may have data from a day where the LUT changed. If this is the + # case, we need to split the data by epoch and assign different acquisition times + half_spin_per_esa_step = np.tile( + np.asarray(half_spin_per_esa_step), + (len(unpacked_dataset["acq_start_seconds"]), 1), + ) + # Get acquisition time per esa step + acquisition_time_per_step = calculate_acq_time_per_step( + sci_lut_data["lo_stepping_tab"] + ) + acquisition_time_per_step = np.tile( + np.asarray(acquisition_time_per_step), + (len(unpacked_dataset["acq_start_seconds"]), 1), + ) + # For every energy after nso_half_spin, set data to fill values + nso_half_spin = unpacked_dataset["nso_half_spin"].values + nso_mask = half_spin_per_esa_step > nso_half_spin[:, np.newaxis] + counters_mask = nso_mask[:, :, np.newaxis, np.newaxis] + counters_mask = np.broadcast_to(counters_mask, counters_data.shape) + counters_data[counters_mask] = UINT32_FILLVAL + # Set half_spin_per_esa_step to 63 which is the fill value + # half_spin_per_esa_step[nso_mask] = 63 + # # Set acquisition time per esa step to nan where nso_mask is True + # acquisition_time_per_step[nso_mask] = np.nan + # ========= Get Epoch Time Data =========== # Epoch center time and delta epoch_center, deltas = get_codice_epoch_time( @@ -152,8 +181,11 @@ def l1a_lo_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr. attrs=cdf_attrs.get_variable_attributes("esa_step", check_schema=False), ), "half_spin_per_esa_step": xr.DataArray( - sci_lut_data["lo_stepping_tab"]["row_number"].get("data"), - dims=("esa_step",), + half_spin_per_esa_step, + dims=( + "epoch", + "esa_step", + ), attrs=cdf_attrs.get_variable_attributes( "half_spin_per_esa_step", check_schema=False ), @@ -216,15 +248,8 @@ def l1a_lo_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr. dims=("epoch",), attrs=cdf_attrs.get_variable_attributes("data_quality"), ) - # TODO: Handle epoch dependent acquisition time per esa step - # For now, just tile the same array for all epochs. - # Eventually we may have data from a day where the LUT changed. If this is the - # case, we need to split the data by epoch and assign different acquisition times l1a_dataset["acquisition_time_per_esa_step"] = xr.DataArray( - np.tile( - np.asarray(calculate_acq_time_per_step(sci_lut_data["lo_stepping_tab"])), - (len(epoch_center), 1), - ), + acquisition_time_per_step, dims=("epoch", "esa_step"), attrs=cdf_attrs.get_variable_attributes( "acquisition_time_per_esa_step", check_schema=False diff --git a/imap_processing/codice/codice_l1a_lo_priority.py b/imap_processing/codice/codice_l1a_lo_priority.py index 16ac69105a..f85e033028 100644 --- a/imap_processing/codice/codice_l1a_lo_priority.py +++ b/imap_processing/codice/codice_l1a_lo_priority.py @@ -8,6 +8,7 @@ from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes from imap_processing.codice import constants +from imap_processing.codice.constants import UINT32_FILLVAL from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import ( CODICEAPID, @@ -133,6 +134,34 @@ def l1a_lo_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: num_packets, num_species, esa_steps, collapse_shape[0] ) + half_spin_per_esa_step = sci_lut_data["lo_stepping_tab"]["row_number"].get("data") + # TODO: Handle epoch dependent acquisition time and half spin per esa step + # For now, just tile the same array for all epochs. + # Eventually we may have data from a day where the LUT changed. If this is the + # case, we need to split the data by epoch and assign different acquisition times + half_spin_per_esa_step = np.tile( + np.asarray(half_spin_per_esa_step), + (len(unpacked_dataset["acq_start_seconds"]), 1), + ) + # Get acquisition time per esa step + acquisition_time_per_step = calculate_acq_time_per_step( + sci_lut_data["lo_stepping_tab"] + ) + acquisition_time_per_step = np.tile( + np.asarray(acquisition_time_per_step), + (len(unpacked_dataset["acq_start_seconds"]), 1), + ) + # For every energy after nso_half_spin, set data to fill values + nso_half_spin = unpacked_dataset["nso_half_spin"].values + nso_mask = half_spin_per_esa_step > nso_half_spin[:, np.newaxis] + species_mask = nso_mask[:, np.newaxis, :, np.newaxis] + species_mask = np.broadcast_to(species_mask, species_data.shape) + species_data[species_mask] = UINT32_FILLVAL + # Set half_spin_per_esa_step to 63 which is the fill value + # half_spin_per_esa_step[nso_mask] = 63 + # # Set acquisition time per esa step to nan where nso_mask is True + # acquisition_time_per_step[nso_mask] = np.nan + # ========== Create CDF Dataset with Metadata =========== cdf_attrs = ImapCdfAttributes() cdf_attrs.add_instrument_global_attrs("codice") @@ -165,8 +194,11 @@ def l1a_lo_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: attrs=cdf_attrs.get_variable_attributes("esa_step", check_schema=False), ), "half_spin_per_esa_step": xr.DataArray( - sci_lut_data["lo_stepping_tab"]["row_number"].get("data"), - dims=("esa_step",), + half_spin_per_esa_step, + dims=( + "epoch", + "esa_step", + ), attrs=cdf_attrs.get_variable_attributes( "half_spin_per_esa_step", check_schema=False ), @@ -223,15 +255,8 @@ def l1a_lo_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: dims=("epoch",), attrs=cdf_attrs.get_variable_attributes("data_quality"), ) - # TODO: Handle epoch dependent acquisition time per esa step - # For now, just tile the same array for all epochs. - # Eventually we may have data from a day where the LUT changed. If this is the - # case, we need to split the data by epoch and assign different acquisition times l1a_dataset["acquisition_time_per_esa_step"] = xr.DataArray( - np.tile( - np.asarray(calculate_acq_time_per_step(sci_lut_data["lo_stepping_tab"])), - (len(epoch_center), 1), - ), + acquisition_time_per_step, dims=("epoch", "esa_step"), attrs=cdf_attrs.get_variable_attributes( "acquisition_time_per_esa_step", check_schema=False diff --git a/imap_processing/codice/codice_l1a_lo_species.py b/imap_processing/codice/codice_l1a_lo_species.py index 27fac6db1f..7091bbb757 100644 --- a/imap_processing/codice/codice_l1a_lo_species.py +++ b/imap_processing/codice/codice_l1a_lo_species.py @@ -8,6 +8,7 @@ from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes from imap_processing.codice import constants +from imap_processing.codice.constants import UINT32_FILLVAL from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import ( CODICEAPID, @@ -22,8 +23,6 @@ logger = logging.getLogger(__name__) -UINT32_FILLVAL = 4294967294 - def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: """ @@ -122,15 +121,38 @@ def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: species_data = np.array(decompressed_data, dtype=np.uint32).reshape( num_packets, num_species, esa_steps, *collapsed_shape ) - row_numbers = np.array( - sci_lut_data["lo_stepping_tab"]["row_number"].get("data"), dtype=np.int64 + + half_spin_per_esa_step = sci_lut_data["lo_stepping_tab"]["row_number"].get("data") + acquisition_time_per_step = calculate_acq_time_per_step( + sci_lut_data["lo_stepping_tab"] + ) + # Get acquisition time per esa step + # TODO: Handle epoch dependent acquisition time and half spin per esa step + # For now, just tile the same array for all epochs. + # Eventually we may have data from a day where the LUT changed. If this is the + # case, we need to split the data by epoch and assign different acquisition times + half_spin_per_esa_step = np.tile( + np.asarray( + half_spin_per_esa_step, + ), + (len(unpacked_dataset["acq_start_seconds"]), 1), + ) + acquisition_time_per_step = np.tile( + np.asarray(acquisition_time_per_step), + (len(unpacked_dataset["acq_start_seconds"]), 1), ) - # For every energy after nso_half_spin, set data to nan + + # For every energy after nso_half_spin, set data to fill values nso_half_spin = unpacked_dataset["nso_half_spin"].values - mask = row_numbers > nso_half_spin[:, np.newaxis] - mask = mask[:, np.newaxis, :, np.newaxis] - mask = np.repeat(mask, num_species, 1) - species_data[mask] = UINT32_FILLVAL + nso_mask = half_spin_per_esa_step > nso_half_spin[:, np.newaxis] + species_mask = nso_mask[:, np.newaxis, :, np.newaxis] + species_mask = np.repeat(species_mask, num_species, 1) + species_data[species_mask] = UINT32_FILLVAL + + # # Set half_spin_per_esa_step to 63 which is the fill value + # half_spin_per_esa_step[nso_mask] = 63 + # # Set acquisition time per esa step to nan where nso_mask is True + # acquisition_time_per_step[nso_mask] = np.nan # ========== Get Voltage Data from LUT =========== # Use plan id and plan step to get voltage data's table_number in ESA sweep table. @@ -181,8 +203,11 @@ def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: attrs=cdf_attrs.get_variable_attributes("esa_step", check_schema=False), ), "half_spin_per_esa_step": xr.DataArray( - sci_lut_data["lo_stepping_tab"]["row_number"].get("data"), - dims=("esa_step",), + half_spin_per_esa_step, + dims=( + "epoch", + "esa_step", + ), attrs=cdf_attrs.get_variable_attributes( "half_spin_per_esa_step", check_schema=False ), @@ -239,15 +264,8 @@ def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: dims=("epoch",), attrs=cdf_attrs.get_variable_attributes("data_quality"), ) - # TODO: Handle epoch dependent acquisition time per esa step - # For now, just tile the same array for all epochs. - # Eventually we may have data from a day where the LUT changed. If this is the - # case, we need to split the data by epoch and assign different acquisition times l1a_dataset["acquisition_time_per_esa_step"] = xr.DataArray( - np.tile( - np.asarray(calculate_acq_time_per_step(sci_lut_data["lo_stepping_tab"])), - (len(epoch_center), 1), - ), + acquisition_time_per_step, dims=("epoch", "esa_step"), attrs=cdf_attrs.get_variable_attributes( "acquisition_time_per_esa_step", check_schema=False diff --git a/imap_processing/codice/constants.py b/imap_processing/codice/constants.py index b89feb3671..647120f70f 100644 --- a/imap_processing/codice/constants.py +++ b/imap_processing/codice/constants.py @@ -1168,3 +1168,5 @@ 318.39, ] ) + +UINT32_FILLVAL = 4294967294 diff --git a/imap_processing/codice/utils.py b/imap_processing/codice/utils.py index d801c6df84..35485acd6d 100644 --- a/imap_processing/codice/utils.py +++ b/imap_processing/codice/utils.py @@ -379,14 +379,14 @@ def calculate_acq_time_per_step( Array of acquisition times per step of shape (num_esa_steps,). """ # TODO: Handle time-varying num_steps_data length - # The num_steps_data length can change over time (e.g., 6 → 3 steps) and is not - # constant. E.g. at a day where the LUT changes we need to handle that. Update the - # computation to: - # Use the actual length of num_steps_data at each point in time instead of assuming - # a constant value - # - Make the calculation time-varying with epoch dependency - # - Ensure values are divided by their corresponding epoch in L1B processing - # - These tunable values are used to calculate acquisition time per step + # The num_steps_data length can change over time (e.g., 6 → 3 steps) and is not + # constant. E.g. at a day where the LUT changes we need to handle that. Update the + # computation to: + # Use the actual length of num_steps_data at each point in time instead of + # assuming a constant value + # - Make the calculation time-varying with epoch dependency + # - Ensure values are divided by their corresponding epoch in L1B processing + # - These tunable values are used to calculate acquisition time per step # These tunable values are used to calculate acquisition time per step tunable_values = low_stepping_tab["tunable_values"] @@ -400,6 +400,8 @@ def calculate_acq_time_per_step( num_steps_data = np.array( low_stepping_tab["num_steps"].get("data"), dtype=np.float64 ) + print(sector_time) + print(float(sector_time)) # Total non-acquisition time is in column (BD) of science LUT dwell_fraction_percentage = float(sector_time) * (100.0 - dwell_fraction) / 100.0 From ba4b33e66ab502fd736ad453b9051f5202c65d7d Mon Sep 17 00:00:00 2001 From: Luisa Date: Thu, 15 Jan 2026 07:39:44 -0700 Subject: [PATCH 8/9] remove print statements --- imap_processing/codice/utils.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/imap_processing/codice/utils.py b/imap_processing/codice/utils.py index 35485acd6d..1727268529 100644 --- a/imap_processing/codice/utils.py +++ b/imap_processing/codice/utils.py @@ -400,8 +400,6 @@ def calculate_acq_time_per_step( num_steps_data = np.array( low_stepping_tab["num_steps"].get("data"), dtype=np.float64 ) - print(sector_time) - print(float(sector_time)) # Total non-acquisition time is in column (BD) of science LUT dwell_fraction_percentage = float(sector_time) * (100.0 - dwell_fraction) / 100.0 From 9dee1a4bca7555182a026c750bf54dcce18e5cd5 Mon Sep 17 00:00:00 2001 From: Luisa Date: Thu, 15 Jan 2026 15:56:24 -0700 Subject: [PATCH 9/9] fillval fixes for l1a and l1b --- .../imap_codice_l1a_variable_attrs.yaml | 48 +++++++++---------- .../codice_l1a_hi_counters_aggregated.py | 2 + .../codice/codice_l1a_hi_counters_singles.py | 3 +- imap_processing/codice/codice_l1a_hi_omni.py | 2 + .../codice/codice_l1a_hi_priority.py | 2 +- .../codice/codice_l1a_hi_sectored.py | 1 + .../codice/codice_l1a_lo_angular.py | 4 +- .../codice_l1a_lo_counters_aggregated.py | 4 +- .../codice/codice_l1a_lo_counters_singles.py | 4 +- .../codice/codice_l1a_lo_priority.py | 4 +- .../codice/codice_l1a_lo_species.py | 4 +- imap_processing/codice/codice_l1b.py | 11 +++-- imap_processing/codice/constants.py | 2 - 13 files changed, 48 insertions(+), 43 deletions(-) diff --git a/imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml b/imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml index 61faff9ab6..3e2ea8a6db 100644 --- a/imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +++ b/imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml @@ -1,7 +1,7 @@ # <=== Useful Variables ===> int_fillval: &int_fillval -9223372036854775808 uint32_fillval: &uint32_fillval 4294967295 -real_fillval: &real_fillval -1.0e+31 +real_fillval: &real_fillval -1.0E+31 min_int: &min_int -9223372036854775808 min_epoch: &min_epoch -315575942816000000 @@ -98,7 +98,7 @@ spin_sector_pairs: priority: CATDESC: Priority Level FIELDNAM: Priority Level - FILLVAL: -1 + FILLVAL: *uint32_fillval FORMAT: I2 LABLAXIS: " " SCALETYP: linear @@ -317,8 +317,8 @@ sw_bias_gain_mode: counters_base: &counters_base DEPEND_0: epoch DISPLAY_TYPE: time_series - FILLVAL: *uint32_fillval - FORMAT: I7 + FILLVAL: *real_fillval + FORMAT: F32.9 SCALETYP: linear UNITS: counts VALIDMIN: 0 @@ -412,8 +412,8 @@ hi-species-attrs: DEPEND_1: energy_{species} DISPLAY_TYPE: time_series FIELDNAM: Species {species} - FILLVAL: *uint32_fillval - FORMAT: I7 + FILLVAL: *real_fillval + FORMAT: F32.9 LABL_PTR_1: energy_{species}_label SCALETYP: linear UNITS: counts @@ -427,13 +427,13 @@ hi-species-unc-attrs: DEPEND_1: energy_{species} DISPLAY_TYPE: time_series FIELDNAM: Species {species} - FILLVAL: *uint32_fillval - FORMAT: I7 + FILLVAL: *real_fillval + FORMAT: F32.9 LABL_PTR_1: energy_{species}_label SCALETYP: linear UNITS: counts VALIDMAX: *max_uint32 - VALIDMIN: 0 + VALIDMIN: 0.0 VAR_TYPE: data hi-energy-attrs: @@ -470,11 +470,11 @@ hi-energy-delta-attrs: hi_priorities_attrs: &hi_priorities_default DEPEND_0: epoch DISPLAY_TYPE: time_series - FILLVAL: *uint32_fillval - FORMAT: I5 + FILLVAL: *real_fillval + FORMAT: F32.9 LABLAXIS: "events" UNITS: events - VALIDMAX: *max_uint32 + VALIDMAX: *real_fillval VALIDMIN: 0 VAR_TYPE: data @@ -655,8 +655,8 @@ lo_counters_singles: DEPEND_3: inst_az DISPLAY_TYPE: time_series FIELDNAM: Rates - Single (APD) - FILLVAL: *uint32_fillval - FORMAT: I7 + FILLVAL: *real_fillval + FORMAT: F32.9 LABL_PTR_1: esa_step_label LABL_PTR_2: spin_sector_pairs_label LABL_PTR_3: inst_az_label @@ -675,8 +675,8 @@ lo-angular-attrs: DEPEND_3: inst_az DISPLAY_TYPE: time_series FIELDNAM: "SW - {species}" - FILLVAL: *int_fillval - FORMAT: I7 + FILLVAL: *real_fillval + FORMAT: F32.9 LABL_PTR_1: esa_step_label LABL_PTR_2: spin_sector_label LABL_PTR_3: inst_az_label @@ -693,8 +693,8 @@ lo-angular-unc-attrs: DEPEND_3: inst_az DISPLAY_TYPE: time_series FIELDNAM: "NSW - {species}" - FILLVAL: *int_fillval - FORMAT: I7 + FILLVAL: *real_fillval + FORMAT: F19 LABL_PTR_1: esa_step_label LABL_PTR_2: spin_sector_label LABL_PTR_3: inst_az_label @@ -709,8 +709,8 @@ lo_priority_base: &lo_priority_base DEPEND_1: esa_step DEPEND_2: spin_sector DISPLAY_TYPE: time_series - FILLVAL: *uint32_fillval - FORMAT: I7 + FILLVAL: *real_fillval + FORMAT: F32.9 LABL_PTR_1: esa_step_label LABL_PTR_2: spin_sector_label SCALETYP: linear @@ -764,8 +764,8 @@ lo-species-attrs: DEPEND_2: spin_sector DISPLAY_TYPE: time_series FIELDNAM: "{direction} - {species}" - FILLVAL: *int_fillval - FORMAT: I7 + FILLVAL: *real_fillval + FORMAT: F32.9 LABL_PTR_1: esa_step_label LABL_PTR_2: spin_sector_label UNITS: counts @@ -780,8 +780,8 @@ lo-pui-species-attrs: DEPEND_2: spin_sector DISPLAY_TYPE: time_series FIELDNAM: "{direction} - {species}" - FILLVAL: *int_fillval - FORMAT: I7 + FILLVAL: *real_fillval + FORMAT: F32.9 LABL_PTR_1: esa_step_label LABL_PTR_2: spin_sector_label UNITS: counts diff --git a/imap_processing/codice/codice_l1a_hi_counters_aggregated.py b/imap_processing/codice/codice_l1a_hi_counters_aggregated.py index ffe3c33267..cfe50c6195 100644 --- a/imap_processing/codice/codice_l1a_hi_counters_aggregated.py +++ b/imap_processing/codice/codice_l1a_hi_counters_aggregated.py @@ -101,6 +101,8 @@ def l1a_hi_counters_aggregated( counters_data = np.array(decompressed_data, dtype=np.uint32).reshape( -1, num_variables ) + # Convert counters data to float + counters_data = counters_data.astype(np.float64) # ========= Get Epoch Time Data =========== # Epoch center time and delta diff --git a/imap_processing/codice/codice_l1a_hi_counters_singles.py b/imap_processing/codice/codice_l1a_hi_counters_singles.py index 16f3d16d63..93e450373d 100644 --- a/imap_processing/codice/codice_l1a_hi_counters_singles.py +++ b/imap_processing/codice/codice_l1a_hi_counters_singles.py @@ -96,7 +96,8 @@ def l1a_hi_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr. counters_data = np.array(decompressed_data, dtype=np.uint32).reshape( -1, len(variable_names), inst_az ) - + # Convert counters data to float + counters_data = counters_data.astype(np.float64) # ========= Get Epoch Time Data =========== # Epoch center time and delta epoch_center, deltas = get_codice_epoch_time( diff --git a/imap_processing/codice/codice_l1a_hi_omni.py b/imap_processing/codice/codice_l1a_hi_omni.py index 9698e9a580..656380c6a0 100644 --- a/imap_processing/codice/codice_l1a_hi_omni.py +++ b/imap_processing/codice/codice_l1a_hi_omni.py @@ -237,6 +237,8 @@ def l1a_hi_omni(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: species_attrs = apply_replacements_to_attrs( species_attrs, {"species": species_name} ) + # Convert to float + species_data = species_data.astype(np.float64) l1a_dataset[species_name] = xr.DataArray( species_data, dims=("epoch", f"energy_{species_name}"), diff --git a/imap_processing/codice/codice_l1a_hi_priority.py b/imap_processing/codice/codice_l1a_hi_priority.py index e481a040c4..7de581978d 100644 --- a/imap_processing/codice/codice_l1a_hi_priority.py +++ b/imap_processing/codice/codice_l1a_hi_priority.py @@ -109,7 +109,7 @@ def l1a_hi_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: species_data = np.array(decompressed_data, dtype=np.uint32).reshape( num_packets, collapse_shape[1] ) - + species_data = species_data.astype(np.float64) # ========== Create CDF Dataset with Metadata =========== cdf_attrs = ImapCdfAttributes() cdf_attrs.add_instrument_global_attrs("codice") diff --git a/imap_processing/codice/codice_l1a_hi_sectored.py b/imap_processing/codice/codice_l1a_hi_sectored.py index 6615eae720..1a147e2b40 100644 --- a/imap_processing/codice/codice_l1a_hi_sectored.py +++ b/imap_processing/codice/codice_l1a_hi_sectored.py @@ -237,6 +237,7 @@ def l1a_hi_sectored(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: species_attrs = apply_replacements_to_attrs( species_attrs, {"species": species_name} ) + species_data = species_data.astype(np.float64) # Add DEPEND_2, DEPEND_3 species_attrs["DEPEND_2"] = "spin_sector" species_attrs["LABL_PTR_2"] = "spin_sector_label" diff --git a/imap_processing/codice/codice_l1a_lo_angular.py b/imap_processing/codice/codice_l1a_lo_angular.py index c053d248c2..776fe80563 100644 --- a/imap_processing/codice/codice_l1a_lo_angular.py +++ b/imap_processing/codice/codice_l1a_lo_angular.py @@ -8,7 +8,6 @@ from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes from imap_processing.codice import constants -from imap_processing.codice.constants import UINT32_FILLVAL from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import ( CODICEAPID, @@ -221,7 +220,8 @@ def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: nso_mask = half_spin_per_esa_step > nso_half_spin[:, np.newaxis] species_mask = nso_mask[:, np.newaxis, :, np.newaxis, np.newaxis] species_mask = np.broadcast_to(species_mask, species_data.shape) - species_data[species_mask] = UINT32_FILLVAL + species_data = species_data.astype(np.float64) + species_data[species_mask] = np.nan # Set half_spin_per_esa_step to 63 which is the fill value # half_spin_per_esa_step[nso_mask] = 63 # # Set acquisition time per esa step to nan where nso_mask is True diff --git a/imap_processing/codice/codice_l1a_lo_counters_aggregated.py b/imap_processing/codice/codice_l1a_lo_counters_aggregated.py index dafcbdb4a7..a6cbab8ee4 100644 --- a/imap_processing/codice/codice_l1a_lo_counters_aggregated.py +++ b/imap_processing/codice/codice_l1a_lo_counters_aggregated.py @@ -8,7 +8,6 @@ from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes from imap_processing.codice import constants -from imap_processing.codice.constants import UINT32_FILLVAL from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import ( ViewTabInfo, @@ -137,7 +136,8 @@ def l1a_lo_counters_aggregated( nso_mask = half_spin_per_esa_step > nso_half_spin[:, np.newaxis] counters_mask = nso_mask[:, :, np.newaxis, np.newaxis] counters_mask = np.broadcast_to(counters_mask, counters_data.shape) - counters_data[counters_mask] = UINT32_FILLVAL + counters_data = counters_data.astype(np.float64) + counters_data[counters_mask] = np.nan # Set half_spin_per_esa_step to 63 which is the fill value # half_spin_per_esa_step[nso_mask] = 63 # # Set acquisition time per esa step to nan where nso_mask is True diff --git a/imap_processing/codice/codice_l1a_lo_counters_singles.py b/imap_processing/codice/codice_l1a_lo_counters_singles.py index b19ee6e0c9..2c8f83fda6 100644 --- a/imap_processing/codice/codice_l1a_lo_counters_singles.py +++ b/imap_processing/codice/codice_l1a_lo_counters_singles.py @@ -8,7 +8,6 @@ from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes from imap_processing.codice import constants -from imap_processing.codice.constants import UINT32_FILLVAL from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import ( ViewTabInfo, @@ -134,7 +133,8 @@ def l1a_lo_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr. nso_mask = half_spin_per_esa_step > nso_half_spin[:, np.newaxis] counters_mask = nso_mask[:, :, np.newaxis, np.newaxis] counters_mask = np.broadcast_to(counters_mask, counters_data.shape) - counters_data[counters_mask] = UINT32_FILLVAL + counters_data = counters_data.astype(np.float64) + counters_data[counters_mask] = np.nan # Set half_spin_per_esa_step to 63 which is the fill value # half_spin_per_esa_step[nso_mask] = 63 # # Set acquisition time per esa step to nan where nso_mask is True diff --git a/imap_processing/codice/codice_l1a_lo_priority.py b/imap_processing/codice/codice_l1a_lo_priority.py index f85e033028..8f1d8eba3a 100644 --- a/imap_processing/codice/codice_l1a_lo_priority.py +++ b/imap_processing/codice/codice_l1a_lo_priority.py @@ -8,7 +8,6 @@ from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes from imap_processing.codice import constants -from imap_processing.codice.constants import UINT32_FILLVAL from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import ( CODICEAPID, @@ -156,7 +155,8 @@ def l1a_lo_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: nso_mask = half_spin_per_esa_step > nso_half_spin[:, np.newaxis] species_mask = nso_mask[:, np.newaxis, :, np.newaxis] species_mask = np.broadcast_to(species_mask, species_data.shape) - species_data[species_mask] = UINT32_FILLVAL + species_data = species_data.astype(np.float64) + species_data[species_mask] = np.nan # Set half_spin_per_esa_step to 63 which is the fill value # half_spin_per_esa_step[nso_mask] = 63 # # Set acquisition time per esa step to nan where nso_mask is True diff --git a/imap_processing/codice/codice_l1a_lo_species.py b/imap_processing/codice/codice_l1a_lo_species.py index 7091bbb757..e4158b5131 100644 --- a/imap_processing/codice/codice_l1a_lo_species.py +++ b/imap_processing/codice/codice_l1a_lo_species.py @@ -8,7 +8,6 @@ from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes from imap_processing.codice import constants -from imap_processing.codice.constants import UINT32_FILLVAL from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import ( CODICEAPID, @@ -147,7 +146,8 @@ def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: nso_mask = half_spin_per_esa_step > nso_half_spin[:, np.newaxis] species_mask = nso_mask[:, np.newaxis, :, np.newaxis] species_mask = np.repeat(species_mask, num_species, 1) - species_data[species_mask] = UINT32_FILLVAL + species_data = species_data.astype(np.float64) + species_data[species_mask] = np.nan # # Set half_spin_per_esa_step to 63 which is the fill value # half_spin_per_esa_step[nso_mask] = 63 diff --git a/imap_processing/codice/codice_l1b.py b/imap_processing/codice/codice_l1b.py index d32c470122..2e109e4203 100644 --- a/imap_processing/codice/codice_l1b.py +++ b/imap_processing/codice/codice_l1b.py @@ -96,14 +96,15 @@ def convert_to_rates(dataset: xr.Dataset, descriptor: str) -> np.ndarray: "lo-sw-species", "lo-ialirt", ]: - # Create n_sector with 'esa_step' dimension. This is done by xr.full_like - # with input dataset.acquisition_time_per_esa_step. This ensures that the - # resulting n_sector has the same dimensions as acquisition_time_per_esa_step. - # Per CoDICE, fill first 127 with default value of 12. Then fill last with 11. + # Create n_sector with 'epoch' and 'esa_step' dimension. This is done by + # xr.full_like with input dataset.acquisition_time_per_esa_step. This ensures + # that the resulting n_sector has the same dimensions as + # acquisition_time_per_esa_step. Per CoDICE, fill first 127 with default value + # of 12. Then fill last with 11. In your SDC processing n_sector = xr.full_like( dataset.acquisition_time_per_esa_step, 12.0, dtype=np.float64 ) - n_sector[-1] = 11.0 + n_sector[:, -1] = 11.0 # Denominator to convert counts to rates denominator = dataset.acquisition_time_per_esa_step * n_sector diff --git a/imap_processing/codice/constants.py b/imap_processing/codice/constants.py index 647120f70f..b89feb3671 100644 --- a/imap_processing/codice/constants.py +++ b/imap_processing/codice/constants.py @@ -1168,5 +1168,3 @@ 318.39, ] ) - -UINT32_FILLVAL = 4294967294