diff --git a/imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml b/imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml index 5df4b9f553..3e2ea8a6db 100644 --- a/imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +++ b/imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml @@ -1,7 +1,7 @@ # <=== Useful Variables ===> int_fillval: &int_fillval -9223372036854775808 uint32_fillval: &uint32_fillval 4294967295 -real_fillval: &real_fillval -1.0e+31 +real_fillval: &real_fillval -1.0E+31 min_int: &min_int -9223372036854775808 min_epoch: &min_epoch -315575942816000000 @@ -98,7 +98,7 @@ spin_sector_pairs: priority: CATDESC: Priority Level FIELDNAM: Priority Level - FILLVAL: -1 + FILLVAL: *uint32_fillval FORMAT: I2 LABLAXIS: " " SCALETYP: linear @@ -172,8 +172,9 @@ energy_species_label: VAR_TYPE: metadata # <=== Dataset Variable Attributes ===> -acquisition_time_per_step: +acquisition_time_per_esa_step: CATDESC: Acquisition time for each step of energy + DEPEND_0: epoch DEPEND_1: esa_step FIELDNAM: Acquisition Time FILLVAL: *real_fillval @@ -187,6 +188,7 @@ acquisition_time_per_step: half_spin_per_esa_step: CATDESC: Half spin number for each step of energy + DEPEND_0: epoch DEPEND_1: esa_step FIELDNAM: Half Spin Number FILLVAL: 255 @@ -315,8 +317,8 @@ sw_bias_gain_mode: counters_base: &counters_base DEPEND_0: epoch DISPLAY_TYPE: time_series - FILLVAL: *uint32_fillval - FORMAT: I7 + FILLVAL: *real_fillval + FORMAT: F32.9 SCALETYP: linear UNITS: counts VALIDMIN: 0 @@ -410,8 +412,8 @@ hi-species-attrs: DEPEND_1: energy_{species} DISPLAY_TYPE: time_series FIELDNAM: Species {species} - FILLVAL: *uint32_fillval - FORMAT: I7 + FILLVAL: *real_fillval + FORMAT: F32.9 LABL_PTR_1: energy_{species}_label SCALETYP: linear UNITS: counts @@ -425,13 +427,13 @@ hi-species-unc-attrs: DEPEND_1: energy_{species} DISPLAY_TYPE: time_series FIELDNAM: Species {species} - FILLVAL: *uint32_fillval - FORMAT: I7 + FILLVAL: *real_fillval + FORMAT: F32.9 LABL_PTR_1: energy_{species}_label SCALETYP: linear UNITS: counts VALIDMAX: *max_uint32 - VALIDMIN: 0 + VALIDMIN: 0.0 VAR_TYPE: data hi-energy-attrs: @@ -468,11 +470,11 @@ hi-energy-delta-attrs: hi_priorities_attrs: &hi_priorities_default DEPEND_0: epoch DISPLAY_TYPE: time_series - FILLVAL: *uint32_fillval - FORMAT: I5 + FILLVAL: *real_fillval + FORMAT: F32.9 LABLAXIS: "events" UNITS: events - VALIDMAX: *max_uint32 + VALIDMAX: *real_fillval VALIDMIN: 0 VAR_TYPE: data @@ -653,8 +655,8 @@ lo_counters_singles: DEPEND_3: inst_az DISPLAY_TYPE: time_series FIELDNAM: Rates - Single (APD) - FILLVAL: *uint32_fillval - FORMAT: I7 + FILLVAL: *real_fillval + FORMAT: F32.9 LABL_PTR_1: esa_step_label LABL_PTR_2: spin_sector_pairs_label LABL_PTR_3: inst_az_label @@ -673,8 +675,8 @@ lo-angular-attrs: DEPEND_3: inst_az DISPLAY_TYPE: time_series FIELDNAM: "SW - {species}" - FILLVAL: *int_fillval - FORMAT: I7 + FILLVAL: *real_fillval + FORMAT: F32.9 LABL_PTR_1: esa_step_label LABL_PTR_2: spin_sector_label LABL_PTR_3: inst_az_label @@ -691,8 +693,8 @@ lo-angular-unc-attrs: DEPEND_3: inst_az DISPLAY_TYPE: time_series FIELDNAM: "NSW - {species}" - FILLVAL: *int_fillval - FORMAT: I7 + FILLVAL: *real_fillval + FORMAT: F19 LABL_PTR_1: esa_step_label LABL_PTR_2: spin_sector_label LABL_PTR_3: inst_az_label @@ -707,8 +709,8 @@ lo_priority_base: &lo_priority_base DEPEND_1: esa_step DEPEND_2: spin_sector DISPLAY_TYPE: time_series - FILLVAL: *uint32_fillval - FORMAT: I7 + FILLVAL: *real_fillval + FORMAT: F32.9 LABL_PTR_1: esa_step_label LABL_PTR_2: spin_sector_label SCALETYP: linear @@ -762,8 +764,8 @@ lo-species-attrs: DEPEND_2: spin_sector DISPLAY_TYPE: time_series FIELDNAM: "{direction} - {species}" - FILLVAL: *int_fillval - FORMAT: I7 + FILLVAL: *real_fillval + FORMAT: F32.9 LABL_PTR_1: esa_step_label LABL_PTR_2: spin_sector_label UNITS: counts @@ -778,8 +780,8 @@ lo-pui-species-attrs: DEPEND_2: spin_sector DISPLAY_TYPE: time_series FIELDNAM: "{direction} - {species}" - FILLVAL: *int_fillval - FORMAT: I7 + FILLVAL: *real_fillval + FORMAT: F32.9 LABL_PTR_1: esa_step_label LABL_PTR_2: spin_sector_label UNITS: counts diff --git a/imap_processing/codice/codice_l1a.py b/imap_processing/codice/codice_l1a.py index 5bc0a3a9a3..732efc3875 100644 --- a/imap_processing/codice/codice_l1a.py +++ b/imap_processing/codice/codice_l1a.py @@ -7,7 +7,6 @@ from imap_processing import imap_module_directory from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes -from imap_processing.codice.codice_l1a_de import l1a_direct_event from imap_processing.codice.codice_l1a_hi_counters_aggregated import ( l1a_hi_counters_aggregated, ) @@ -87,11 +86,15 @@ def process_l1a( # noqa: PLR0912 logger.info("Processing Hi Sectored Species Counts") datasets.append(l1a_hi_sectored(datasets_by_apid[apid], lut_file)) elif apid == CODICEAPID.COD_HI_PHA: - logger.info("Processing Direct Events for Hi") - datasets.append(l1a_direct_event(datasets_by_apid[apid], apid=apid)) + logger.info("Skip processing Direct Events for Hi") + continue + # TODO: undo this in coming DE segmented work + # datasets.append(l1a_direct_event(datasets_by_apid[apid], apid=apid)) elif apid == CODICEAPID.COD_LO_PHA: - logger.info("Processing Direct Events for Lo") - datasets.append(l1a_direct_event(datasets_by_apid[apid], apid=apid)) + logger.info("Skip processing Direct Events for Lo") + continue + # TODO: undo this in coming DE segmented work + # datasets.append(l1a_direct_event(datasets_by_apid[apid], apid=apid)) elif apid in [ CODICEAPID.COD_LO_SW_PRIORITY_COUNTS, CODICEAPID.COD_LO_NSW_PRIORITY_COUNTS, diff --git a/imap_processing/codice/codice_l1a_hi_counters_aggregated.py b/imap_processing/codice/codice_l1a_hi_counters_aggregated.py index ffe3c33267..cfe50c6195 100644 --- a/imap_processing/codice/codice_l1a_hi_counters_aggregated.py +++ b/imap_processing/codice/codice_l1a_hi_counters_aggregated.py @@ -101,6 +101,8 @@ def l1a_hi_counters_aggregated( counters_data = np.array(decompressed_data, dtype=np.uint32).reshape( -1, num_variables ) + # Convert counters data to float + counters_data = counters_data.astype(np.float64) # ========= Get Epoch Time Data =========== # Epoch center time and delta diff --git a/imap_processing/codice/codice_l1a_hi_counters_singles.py b/imap_processing/codice/codice_l1a_hi_counters_singles.py index 16f3d16d63..93e450373d 100644 --- a/imap_processing/codice/codice_l1a_hi_counters_singles.py +++ b/imap_processing/codice/codice_l1a_hi_counters_singles.py @@ -96,7 +96,8 @@ def l1a_hi_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr. counters_data = np.array(decompressed_data, dtype=np.uint32).reshape( -1, len(variable_names), inst_az ) - + # Convert counters data to float + counters_data = counters_data.astype(np.float64) # ========= Get Epoch Time Data =========== # Epoch center time and delta epoch_center, deltas = get_codice_epoch_time( diff --git a/imap_processing/codice/codice_l1a_hi_omni.py b/imap_processing/codice/codice_l1a_hi_omni.py index 9698e9a580..656380c6a0 100644 --- a/imap_processing/codice/codice_l1a_hi_omni.py +++ b/imap_processing/codice/codice_l1a_hi_omni.py @@ -237,6 +237,8 @@ def l1a_hi_omni(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: species_attrs = apply_replacements_to_attrs( species_attrs, {"species": species_name} ) + # Convert to float + species_data = species_data.astype(np.float64) l1a_dataset[species_name] = xr.DataArray( species_data, dims=("epoch", f"energy_{species_name}"), diff --git a/imap_processing/codice/codice_l1a_hi_priority.py b/imap_processing/codice/codice_l1a_hi_priority.py index e481a040c4..7de581978d 100644 --- a/imap_processing/codice/codice_l1a_hi_priority.py +++ b/imap_processing/codice/codice_l1a_hi_priority.py @@ -109,7 +109,7 @@ def l1a_hi_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: species_data = np.array(decompressed_data, dtype=np.uint32).reshape( num_packets, collapse_shape[1] ) - + species_data = species_data.astype(np.float64) # ========== Create CDF Dataset with Metadata =========== cdf_attrs = ImapCdfAttributes() cdf_attrs.add_instrument_global_attrs("codice") diff --git a/imap_processing/codice/codice_l1a_hi_sectored.py b/imap_processing/codice/codice_l1a_hi_sectored.py index 6615eae720..1a147e2b40 100644 --- a/imap_processing/codice/codice_l1a_hi_sectored.py +++ b/imap_processing/codice/codice_l1a_hi_sectored.py @@ -237,6 +237,7 @@ def l1a_hi_sectored(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: species_attrs = apply_replacements_to_attrs( species_attrs, {"species": species_name} ) + species_data = species_data.astype(np.float64) # Add DEPEND_2, DEPEND_3 species_attrs["DEPEND_2"] = "spin_sector" species_attrs["LABL_PTR_2"] = "spin_sector_label" diff --git a/imap_processing/codice/codice_l1a_lo_angular.py b/imap_processing/codice/codice_l1a_lo_angular.py index f9e0e6cd4e..776fe80563 100644 --- a/imap_processing/codice/codice_l1a_lo_angular.py +++ b/imap_processing/codice/codice_l1a_lo_angular.py @@ -67,28 +67,28 @@ def _despin_species_data( # index_to_position gets the position from collapse table. Eg. # [1, 2, 3, 23, 24] for SW angular angular_position = index_to_position(sci_lut_data, 0, view_tab_obj.collapse_table) - orientation_a = pixel_orientation == "A" - orientation_b = pixel_orientation == "B" + orientation_a_indices = np.where(pixel_orientation == "A")[0] + orientation_b_indices = np.where(pixel_orientation == "B")[0] # Despin data based on orientation and angular position for pos_idx, position in enumerate(angular_position): if position <= 12: # Case 1: position 0-12, orientation A, append to first half - despun_data[:, :, orientation_a, :12, pos_idx] = species_data[ - :, :, orientation_a, :, pos_idx + despun_data[:, :, orientation_a_indices, :12, pos_idx] = species_data[ + :, :, orientation_a_indices, :, pos_idx ] # Case 2: position 13-24, orientation B, append to second half - despun_data[:, :, orientation_b, 12:, pos_idx] = species_data[ - :, :, orientation_b, :, pos_idx + despun_data[:, :, orientation_b_indices, 12:, pos_idx] = species_data[ + :, :, orientation_b_indices, :, pos_idx ] else: # Case 3: position 13-24, orientation A, append to second half - despun_data[:, :, orientation_a, 12:, pos_idx] = species_data[ - :, :, orientation_a, :, pos_idx + despun_data[:, :, orientation_a_indices, 12:, pos_idx] = species_data[ + :, :, orientation_a_indices, :, pos_idx ] # Case 4: position 0-12, orientation B, append to first half - despun_data[:, :, orientation_b, :12, pos_idx] = species_data[ - :, :, orientation_b, :, pos_idx + despun_data[:, :, orientation_b_indices, :12, pos_idx] = species_data[ + :, :, orientation_b_indices, :, pos_idx ] return despun_data @@ -198,6 +198,35 @@ def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: ] voltage_data = sci_lut_data["esa_sweep_tab"][f"{esa_table_number}"] + half_spin_per_esa_step = sci_lut_data["lo_stepping_tab"]["row_number"].get("data") + # TODO: Handle epoch dependent acquisition time and half spin per esa step + # For now, just tile the same array for all epochs. + # Eventually we may have data from a day where the LUT changed. If this is the + # case, we need to split the data by epoch and assign different acquisition times + half_spin_per_esa_step = np.tile( + np.asarray(half_spin_per_esa_step), + (len(unpacked_dataset["acq_start_seconds"]), 1), + ) + # Get acquisition time per esa step + acquisition_time_per_step = calculate_acq_time_per_step( + sci_lut_data["lo_stepping_tab"] + ) + acquisition_time_per_step = np.tile( + np.asarray(acquisition_time_per_step), + (len(unpacked_dataset["acq_start_seconds"]), 1), + ) + # For every energy after nso_half_spin, set data to fill values + nso_half_spin = unpacked_dataset["nso_half_spin"].values + nso_mask = half_spin_per_esa_step > nso_half_spin[:, np.newaxis] + species_mask = nso_mask[:, np.newaxis, :, np.newaxis, np.newaxis] + species_mask = np.broadcast_to(species_mask, species_data.shape) + species_data = species_data.astype(np.float64) + species_data[species_mask] = np.nan + # Set half_spin_per_esa_step to 63 which is the fill value + # half_spin_per_esa_step[nso_mask] = 63 + # # Set acquisition time per esa step to nan where nso_mask is True + # acquisition_time_per_step[nso_mask] = np.nan + # ========= Get Epoch Time Data =========== # Epoch center time and delta epoch_center, deltas = get_codice_epoch_time( @@ -239,8 +268,11 @@ def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: attrs=cdf_attrs.get_variable_attributes("esa_step", check_schema=False), ), "half_spin_per_esa_step": xr.DataArray( - sci_lut_data["lo_stepping_tab"]["row_number"].get("data"), - dims=("esa_step",), + half_spin_per_esa_step, + dims=( + "epoch", + "esa_step", + ), attrs=cdf_attrs.get_variable_attributes( "half_spin_per_esa_step", check_schema=False ), @@ -309,11 +341,11 @@ def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: dims=("epoch",), attrs=cdf_attrs.get_variable_attributes("data_quality"), ) - l1a_dataset["acquisition_time_per_step"] = xr.DataArray( - calculate_acq_time_per_step(sci_lut_data["lo_stepping_tab"]), - dims=("esa_step",), + l1a_dataset["acquisition_time_per_esa_step"] = xr.DataArray( + acquisition_time_per_step, + dims=("epoch", "esa_step"), attrs=cdf_attrs.get_variable_attributes( - "acquisition_time_per_step", check_schema=False + "acquisition_time_per_esa_step", check_schema=False ), ) diff --git a/imap_processing/codice/codice_l1a_lo_counters_aggregated.py b/imap_processing/codice/codice_l1a_lo_counters_aggregated.py index 0e84b2e184..a6cbab8ee4 100644 --- a/imap_processing/codice/codice_l1a_lo_counters_aggregated.py +++ b/imap_processing/codice/codice_l1a_lo_counters_aggregated.py @@ -114,6 +114,35 @@ def l1a_lo_counters_aggregated( -1, esa_step, num_variables, spin_sector_pairs ) + half_spin_per_esa_step = sci_lut_data["lo_stepping_tab"]["row_number"].get("data") + # TODO: Handle epoch dependent acquisition time and half spin per esa step + # For now, just tile the same array for all epochs. + # Eventually we may have data from a day where the LUT changed. If this is the + # case, we need to split the data by epoch and assign different acquisition times + half_spin_per_esa_step = np.tile( + np.asarray(half_spin_per_esa_step), + (len(unpacked_dataset["acq_start_seconds"]), 1), + ) + # Get acquisition time per esa step + acquisition_time_per_step = calculate_acq_time_per_step( + sci_lut_data["lo_stepping_tab"] + ) + acquisition_time_per_step = np.tile( + np.asarray(acquisition_time_per_step), + (len(unpacked_dataset["acq_start_seconds"]), 1), + ) + # For every energy after nso_half_spin, set data to fill values + nso_half_spin = unpacked_dataset["nso_half_spin"].values + nso_mask = half_spin_per_esa_step > nso_half_spin[:, np.newaxis] + counters_mask = nso_mask[:, :, np.newaxis, np.newaxis] + counters_mask = np.broadcast_to(counters_mask, counters_data.shape) + counters_data = counters_data.astype(np.float64) + counters_data[counters_mask] = np.nan + # Set half_spin_per_esa_step to 63 which is the fill value + # half_spin_per_esa_step[nso_mask] = 63 + # # Set acquisition time per esa step to nan where nso_mask is True + # acquisition_time_per_step[nso_mask] = np.nan + # ========= Get Epoch Time Data =========== # Epoch center time and delta epoch_center, deltas = get_codice_epoch_time( @@ -155,8 +184,11 @@ def l1a_lo_counters_aggregated( attrs=cdf_attrs.get_variable_attributes("esa_step", check_schema=False), ), "half_spin_per_esa_step": xr.DataArray( - sci_lut_data["lo_stepping_tab"]["row_number"].get("data"), - dims=("esa_step",), + half_spin_per_esa_step, + dims=( + "epoch", + "esa_step", + ), attrs=cdf_attrs.get_variable_attributes( "half_spin_per_esa_step", check_schema=False ), @@ -207,11 +239,11 @@ def l1a_lo_counters_aggregated( dims=("epoch",), attrs=cdf_attrs.get_variable_attributes("data_quality"), ) - l1a_dataset["acquisition_time_per_step"] = xr.DataArray( - calculate_acq_time_per_step(sci_lut_data["lo_stepping_tab"]), - dims=("esa_step",), + l1a_dataset["acquisition_time_per_esa_step"] = xr.DataArray( + acquisition_time_per_step, + dims=("epoch", "esa_step"), attrs=cdf_attrs.get_variable_attributes( - "acquisition_time_per_step", check_schema=False + "acquisition_time_per_esa_step", check_schema=False ), ) diff --git a/imap_processing/codice/codice_l1a_lo_counters_singles.py b/imap_processing/codice/codice_l1a_lo_counters_singles.py index 624465ef01..2c8f83fda6 100644 --- a/imap_processing/codice/codice_l1a_lo_counters_singles.py +++ b/imap_processing/codice/codice_l1a_lo_counters_singles.py @@ -111,6 +111,35 @@ def l1a_lo_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr. .transpose(0, 1, 3, 2) ) + half_spin_per_esa_step = sci_lut_data["lo_stepping_tab"]["row_number"].get("data") + # TODO: Handle epoch dependent acquisition time and half spin per esa step + # For now, just tile the same array for all epochs. + # Eventually we may have data from a day where the LUT changed. If this is the + # case, we need to split the data by epoch and assign different acquisition times + half_spin_per_esa_step = np.tile( + np.asarray(half_spin_per_esa_step), + (len(unpacked_dataset["acq_start_seconds"]), 1), + ) + # Get acquisition time per esa step + acquisition_time_per_step = calculate_acq_time_per_step( + sci_lut_data["lo_stepping_tab"] + ) + acquisition_time_per_step = np.tile( + np.asarray(acquisition_time_per_step), + (len(unpacked_dataset["acq_start_seconds"]), 1), + ) + # For every energy after nso_half_spin, set data to fill values + nso_half_spin = unpacked_dataset["nso_half_spin"].values + nso_mask = half_spin_per_esa_step > nso_half_spin[:, np.newaxis] + counters_mask = nso_mask[:, :, np.newaxis, np.newaxis] + counters_mask = np.broadcast_to(counters_mask, counters_data.shape) + counters_data = counters_data.astype(np.float64) + counters_data[counters_mask] = np.nan + # Set half_spin_per_esa_step to 63 which is the fill value + # half_spin_per_esa_step[nso_mask] = 63 + # # Set acquisition time per esa step to nan where nso_mask is True + # acquisition_time_per_step[nso_mask] = np.nan + # ========= Get Epoch Time Data =========== # Epoch center time and delta epoch_center, deltas = get_codice_epoch_time( @@ -152,8 +181,11 @@ def l1a_lo_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr. attrs=cdf_attrs.get_variable_attributes("esa_step", check_schema=False), ), "half_spin_per_esa_step": xr.DataArray( - sci_lut_data["lo_stepping_tab"]["row_number"].get("data"), - dims=("esa_step",), + half_spin_per_esa_step, + dims=( + "epoch", + "esa_step", + ), attrs=cdf_attrs.get_variable_attributes( "half_spin_per_esa_step", check_schema=False ), @@ -216,11 +248,11 @@ def l1a_lo_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr. dims=("epoch",), attrs=cdf_attrs.get_variable_attributes("data_quality"), ) - l1a_dataset["acquisition_time_per_step"] = xr.DataArray( - calculate_acq_time_per_step(sci_lut_data["lo_stepping_tab"]), - dims=("esa_step",), + l1a_dataset["acquisition_time_per_esa_step"] = xr.DataArray( + acquisition_time_per_step, + dims=("epoch", "esa_step"), attrs=cdf_attrs.get_variable_attributes( - "acquisition_time_per_step", check_schema=False + "acquisition_time_per_esa_step", check_schema=False ), ) diff --git a/imap_processing/codice/codice_l1a_lo_priority.py b/imap_processing/codice/codice_l1a_lo_priority.py index 9b28d86a96..8f1d8eba3a 100644 --- a/imap_processing/codice/codice_l1a_lo_priority.py +++ b/imap_processing/codice/codice_l1a_lo_priority.py @@ -133,6 +133,35 @@ def l1a_lo_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: num_packets, num_species, esa_steps, collapse_shape[0] ) + half_spin_per_esa_step = sci_lut_data["lo_stepping_tab"]["row_number"].get("data") + # TODO: Handle epoch dependent acquisition time and half spin per esa step + # For now, just tile the same array for all epochs. + # Eventually we may have data from a day where the LUT changed. If this is the + # case, we need to split the data by epoch and assign different acquisition times + half_spin_per_esa_step = np.tile( + np.asarray(half_spin_per_esa_step), + (len(unpacked_dataset["acq_start_seconds"]), 1), + ) + # Get acquisition time per esa step + acquisition_time_per_step = calculate_acq_time_per_step( + sci_lut_data["lo_stepping_tab"] + ) + acquisition_time_per_step = np.tile( + np.asarray(acquisition_time_per_step), + (len(unpacked_dataset["acq_start_seconds"]), 1), + ) + # For every energy after nso_half_spin, set data to fill values + nso_half_spin = unpacked_dataset["nso_half_spin"].values + nso_mask = half_spin_per_esa_step > nso_half_spin[:, np.newaxis] + species_mask = nso_mask[:, np.newaxis, :, np.newaxis] + species_mask = np.broadcast_to(species_mask, species_data.shape) + species_data = species_data.astype(np.float64) + species_data[species_mask] = np.nan + # Set half_spin_per_esa_step to 63 which is the fill value + # half_spin_per_esa_step[nso_mask] = 63 + # # Set acquisition time per esa step to nan where nso_mask is True + # acquisition_time_per_step[nso_mask] = np.nan + # ========== Create CDF Dataset with Metadata =========== cdf_attrs = ImapCdfAttributes() cdf_attrs.add_instrument_global_attrs("codice") @@ -165,8 +194,11 @@ def l1a_lo_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: attrs=cdf_attrs.get_variable_attributes("esa_step", check_schema=False), ), "half_spin_per_esa_step": xr.DataArray( - sci_lut_data["lo_stepping_tab"]["row_number"].get("data"), - dims=("esa_step",), + half_spin_per_esa_step, + dims=( + "epoch", + "esa_step", + ), attrs=cdf_attrs.get_variable_attributes( "half_spin_per_esa_step", check_schema=False ), @@ -223,11 +255,11 @@ def l1a_lo_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: dims=("epoch",), attrs=cdf_attrs.get_variable_attributes("data_quality"), ) - l1a_dataset["acquisition_time_per_step"] = xr.DataArray( - calculate_acq_time_per_step(sci_lut_data["lo_stepping_tab"]), - dims=("esa_step",), + l1a_dataset["acquisition_time_per_esa_step"] = xr.DataArray( + acquisition_time_per_step, + dims=("epoch", "esa_step"), attrs=cdf_attrs.get_variable_attributes( - "acquisition_time_per_step", check_schema=False + "acquisition_time_per_esa_step", check_schema=False ), ) diff --git a/imap_processing/codice/codice_l1a_lo_species.py b/imap_processing/codice/codice_l1a_lo_species.py index 72333c5030..e4158b5131 100644 --- a/imap_processing/codice/codice_l1a_lo_species.py +++ b/imap_processing/codice/codice_l1a_lo_species.py @@ -121,6 +121,39 @@ def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: num_packets, num_species, esa_steps, *collapsed_shape ) + half_spin_per_esa_step = sci_lut_data["lo_stepping_tab"]["row_number"].get("data") + acquisition_time_per_step = calculate_acq_time_per_step( + sci_lut_data["lo_stepping_tab"] + ) + # Get acquisition time per esa step + # TODO: Handle epoch dependent acquisition time and half spin per esa step + # For now, just tile the same array for all epochs. + # Eventually we may have data from a day where the LUT changed. If this is the + # case, we need to split the data by epoch and assign different acquisition times + half_spin_per_esa_step = np.tile( + np.asarray( + half_spin_per_esa_step, + ), + (len(unpacked_dataset["acq_start_seconds"]), 1), + ) + acquisition_time_per_step = np.tile( + np.asarray(acquisition_time_per_step), + (len(unpacked_dataset["acq_start_seconds"]), 1), + ) + + # For every energy after nso_half_spin, set data to fill values + nso_half_spin = unpacked_dataset["nso_half_spin"].values + nso_mask = half_spin_per_esa_step > nso_half_spin[:, np.newaxis] + species_mask = nso_mask[:, np.newaxis, :, np.newaxis] + species_mask = np.repeat(species_mask, num_species, 1) + species_data = species_data.astype(np.float64) + species_data[species_mask] = np.nan + + # # Set half_spin_per_esa_step to 63 which is the fill value + # half_spin_per_esa_step[nso_mask] = 63 + # # Set acquisition time per esa step to nan where nso_mask is True + # acquisition_time_per_step[nso_mask] = np.nan + # ========== Get Voltage Data from LUT =========== # Use plan id and plan step to get voltage data's table_number in ESA sweep table. # Voltage data is (128,) @@ -170,8 +203,11 @@ def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: attrs=cdf_attrs.get_variable_attributes("esa_step", check_schema=False), ), "half_spin_per_esa_step": xr.DataArray( - sci_lut_data["lo_stepping_tab"]["row_number"].get("data"), - dims=("esa_step",), + half_spin_per_esa_step, + dims=( + "epoch", + "esa_step", + ), attrs=cdf_attrs.get_variable_attributes( "half_spin_per_esa_step", check_schema=False ), @@ -228,11 +264,11 @@ def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: dims=("epoch",), attrs=cdf_attrs.get_variable_attributes("data_quality"), ) - l1a_dataset["acquisition_time_per_step"] = xr.DataArray( - calculate_acq_time_per_step(sci_lut_data["lo_stepping_tab"]), - dims=("esa_step",), + l1a_dataset["acquisition_time_per_esa_step"] = xr.DataArray( + acquisition_time_per_step, + dims=("epoch", "esa_step"), attrs=cdf_attrs.get_variable_attributes( - "acquisition_time_per_step", check_schema=False + "acquisition_time_per_esa_step", check_schema=False ), ) diff --git a/imap_processing/codice/codice_l1b.py b/imap_processing/codice/codice_l1b.py index 6cdc44a1a4..2e109e4203 100644 --- a/imap_processing/codice/codice_l1b.py +++ b/imap_processing/codice/codice_l1b.py @@ -75,7 +75,7 @@ def convert_to_rates(dataset: xr.Dataset, descriptor: str) -> np.ndarray: ]: # Denominator to convert counts to rates denominator = ( - dataset.acquisition_time_per_step + dataset.acquisition_time_per_esa_step * constants.L1B_DATA_PRODUCT_CONFIGURATIONS[descriptor]["num_spin_sectors"] ) @@ -88,7 +88,7 @@ def convert_to_rates(dataset: xr.Dataset, descriptor: str) -> np.ndarray: "spin_period", "voltage_table", # TODO: undo this when I get new validation file from Joey - # "acquisition_time_per_step", + # "acquisition_time_per_esa_step", ] dataset = dataset.drop_vars(drop_variables) elif descriptor in [ @@ -96,17 +96,18 @@ def convert_to_rates(dataset: xr.Dataset, descriptor: str) -> np.ndarray: "lo-sw-species", "lo-ialirt", ]: - # Create n_sector with 'esa_step' dimension. This is done by xr.full_like - # with input dataset.acquisition_time_per_step. This ensures that the resulting - # n_sector has the same dimensions as acquisition_time_per_step. - # Per CoDICE, fill first 127 with default value of 12. Then fill last with 11. + # Create n_sector with 'epoch' and 'esa_step' dimension. This is done by + # xr.full_like with input dataset.acquisition_time_per_esa_step. This ensures + # that the resulting n_sector has the same dimensions as + # acquisition_time_per_esa_step. Per CoDICE, fill first 127 with default value + # of 12. Then fill last with 11. In your SDC processing n_sector = xr.full_like( - dataset.acquisition_time_per_step, 12.0, dtype=np.float64 + dataset.acquisition_time_per_esa_step, 12.0, dtype=np.float64 ) - n_sector[-1] = 11.0 + n_sector[:, -1] = 11.0 # Denominator to convert counts to rates - denominator = dataset.acquisition_time_per_step * n_sector + denominator = dataset.acquisition_time_per_esa_step * n_sector # Do not carry these variable attributes from L1a to L1b for above products drop_variables = [ "k_factor", @@ -116,7 +117,7 @@ def convert_to_rates(dataset: xr.Dataset, descriptor: str) -> np.ndarray: "spin_period", "voltage_table", # TODO: undo this when I get new validation file from Joey - # "acquisition_time_per_step", + # "acquisition_time_per_esa_step", ] dataset = dataset.drop_vars(drop_variables) diff --git a/imap_processing/codice/packet_definitions/codice_packet_definition.xml b/imap_processing/codice/packet_definitions/codice_packet_definition.xml index a50eb3d4b6..47bf7415a5 100644 --- a/imap_processing/codice/packet_definitions/codice_packet_definition.xml +++ b/imap_processing/codice/packet_definitions/codice_packet_definition.xml @@ -1,6 +1,6 @@ - + @@ -45,7 +45,7 @@ - + @@ -297,8 +297,17 @@ + + + + + + + + + - + @@ -358,7 +367,7 @@ - + @@ -439,7 +448,8 @@ - + + @@ -567,46 +577,28 @@ - - - - - - - + - - - - - - - + - - - - - - - + - - - - - - - + - + + + + + + + @@ -615,7 +607,8 @@ - + + @@ -624,7 +617,8 @@ - + + @@ -633,7 +627,8 @@ - + + @@ -688,7 +683,7 @@ - + @@ -697,8 +692,7 @@ - - + @@ -707,8 +701,7 @@ - - + @@ -717,7 +710,7 @@ - + @@ -726,7 +719,7 @@ - + @@ -796,19 +789,19 @@ - + - + - + - + - + @@ -835,15 +828,55 @@ - + - + + + + + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -1115,7 +1148,7 @@ - + @@ -1145,7 +1178,13 @@ - + + + + + + + @@ -1154,7 +1193,13 @@ - + + + + + + + @@ -1163,7 +1208,13 @@ - + + + + + + + @@ -1172,7 +1223,13 @@ - + + + + + + + @@ -1181,7 +1238,13 @@ - + + + + + + + @@ -1474,74 +1537,12 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + @@ -1737,7 +1738,6 @@ - @@ -2422,74 +2422,12 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + @@ -3104,6 +3042,12 @@ + + INDICATES THE CURRENT OPERATIONAL STATE OF THE LO ESA SWEEP: +- NORMAL - BOTH ESAS ARE TRACKING TOGETHER +- RGFO - REDUCED GAIN FACTOR OPERATION; ESA-A IS REDUCED IN ORDER TO REDUCE THE GAIN FACTOR AND ALLOW FEWER IONS INTO THE DETECTOR +- NSO - NO SCAN OPERATION; BOTH ESAS ARE RETURNED TO A HIGH-ENERGY SETTING AND NO SCANNING IS DONE FOR THE REMAINDER OF THE ESA SWEEP + @@ -3127,17 +3071,27 @@ - + - - - - - - - - - + + ALARM PERSISTENCE = 3 IN OASIS + + + ALARM PERSISTENCE = 3 IN OASIS + + + ALARM PERSISTENCE = 3 IN OASIS + + + ALARM PERSISTENCE = 3 IN OASIS + + + ALARM PERSISTENCE = 3 IN OASIS + + + + + @@ -3159,11 +3113,13 @@ - - - - - + + + + + + EACH BIT INDICATES WHETHER THE CORRESPONDING MACRO IS CURRENTLY RUNNING (E.G. BIT 1 WILL BE SET IF MACRO 1 IS RUNNING) + INDICATES WHETHER ANY CATEGORY 1 LIMITS HAVE TRIGGERED. @@ -3194,7 +3150,7 @@ INDICATES WHETHER THE MOST RECENT TRIGGER WAS A MINIMUM OR MAXIMUM LIMIT - INDICATES THE ID OF THE MOST RECENT FDC TRIGGER + INDICATES THE TABLE INDEX OF THE MOST RECENT FDC TRIGGER INDICATES THE ACTION THAT WAS TAKEN FOR THE MOST RECENT FDC TRIGGER @@ -3208,7 +3164,7 @@ INDICATES WHETHER FSW CONTROL OF THE OPERATIONAL HEATER IS ENABLED - + INDICATES THE CURRENT STATE OF THE PHYSICAL HEATER OUTPUT @@ -3313,60 +3269,28 @@ SECONDARY HEADER - WHOLE-SECONDS PART OF SCLK - - PACKET VERSION - THIS WILL BE INCREMENTED EACH TIME THE FORMAT OF THE PACKET CHANGESCOD_LO_PHA. - - - SPIN PERIOD REPORTED BY THE SPACECRAFT IN THE TIME AND STATUS MESSAGE. REPORTED PERIOD IS THE PERIOD THAT WAS ACTIVE WHEN THE 16-SPIN ACQUISITION CYCLE STARTED. - - - FULL-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED - - - SUB-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED - - - SPARE FOR ALIGNMENT - - - BIAS GAIN MODE FOR THE SUPRATHERMAL SECTOR - - - BIAS GAIN MODE FOR THE SOLARWIND SECTOR - - - - INDICATES THAT THERE WAS SOME ERROR DETECTED DURING ACQUISITION OR PROCESSING OF THE DATA. ERRORS COULD INCLUDE CORRUPTED ACQUISITION MEMORY (I.E. EDAC ERRORS), TIMING VIOLATIONS, OR OTHER EVENTS THAT INTERRUPTED OR OTHERWISE AFFECTED DATA COLLECTION. - - - WHETHER THE EVENT DATA IS COMPRESSED. IF 1/YES, EVENT_DATA ARRAY IS COMPRESSED USING THE LZMA COMPRESSION ALGORITHM. - - - NUMBER OF EVENTS SELECTED FOR DOWNLINK (I.E. NUMBER OF EVENTS IN THE EVENT_DATA ARRAY) - - - NUMBER OF BYTES IN THE EVENT_DATA ARRAY. IF COMPRESSED, THIS VALUE REPRESENTS THE LENGTH OF THE COMPRESSED DATA. - - OPTIONALLY COMPRESSED ARRAY OF EVENT DATA + + OPTIONALLY COMPRESSED ARRAY OF EVENT DATA -FORMAT IS TBD; SOME CONSIDERATIONS/OPTIONS: -- FULL EVENTS HAVE A LOT OF REDUNDANT DATA (E.G. WILL HAVE MANY EVENTS WITH THE SAME PRIORITY/E-STEP/SPIN PHASE INFORMATION). HOW WELL DOES COMPRESSION TO DEAL WITH THE REDUNDANCY? -- COULD INCLUDE MINI-HEADERS FOR EACH (PRIORITY,E-STEP, SPIN-PHASE) GROUP AND STRIP THE REDUNDANT DATA FROM THE EVENTS -- SHOULD EVENTS BE TIGHTLY PACKED, OR CAN WE PAD OUT TO 64-BIT WORD BOUNDARIES? HOW WELL DOES COMPRESSION COMPENSATE FOR THE EXTRA BITS? + FORMAT IS TBD; SOME CONSIDERATIONS/OPTIONS: + - FULL EVENTS HAVE A LOT OF REDUNDANT DATA (E.G. WILL HAVE MANY EVENTS WITH THE SAME PRIORITY/E-STEP/SPIN PHASE INFORMATION). HOW WELL DOES COMPRESSION TO DEAL WITH THE REDUNDANCY? + - COULD INCLUDE MINI-HEADERS FOR EACH (PRIORITY,E-STEP, SPIN-PHASE) GROUP AND STRIP THE REDUNDANT DATA FROM THE EVENTS + - SHOULD EVENTS BE TIGHTLY PACKED, OR CAN WE PAD OUT TO 64-BIT WORD BOUNDARIES? HOW WELL DOES COMPRESSION COMPENSATE FOR THE EXTRA BITS? -EACH EVENT CONSISTS OF: -- 7-BIT E-STEP -- 10-BIT TOF -- 9-BIT APD ENERGY -- 7-BIT SPIN ANGLE -- 5-BIT POSITION -- 5-BIT APD-ID -- 1-BIT APD-GAIN -- 2-BIT PHA TYPE -- 3-BIT PRIORITY RANGE + EACH EVENT CONSISTS OF: + - 7-BIT E-STEP + - 10-BIT TOF + - 9-BIT APD ENERGY + - 7-BIT SPIN ANGLE + - 5-BIT POSITION + - 5-BIT APD-ID + - 1-BIT APD-GAIN + - 2-BIT PHA TYPE + - 3-BIT PRIORITY RANGE -TBD: EVENTS MAY BE TIGHTLY PACKED, OR MAY HAVE SPARES ADDED TO KEEP EACH EVENT BYTE-ALIGNED. IN EITHER CASE, THERE MAY BE UP TO 1 BYTE OF PADDING TO KEEP THE TOTAL SIZE OF THE PACKET EVEN. + TBD: EVENTS MAY BE TIGHTLY PACKED, OR MAY HAVE SPARES ADDED TO KEEP EACH EVENT BYTE-ALIGNED. IN EITHER CASE, THERE MAY BE UP TO 1 BYTE OF PADDING TO KEEP THE TOTAL SIZE OF THE PACKET EVEN. + PACKET CHECKSUM @@ -3999,58 +3923,26 @@ WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE SECONDARY HEADER - WHOLE-SECONDS PART OF SCLK - - PACKET VERSION - THIS WILL BE INCREMENTED EACH TIME THE FORMAT OF THE PACKET CHANGESCOD_LO_PHA. - - - SPIN PERIOD REPORTED BY THE SPACECRAFT IN THE TIME AND STATUS MESSAGE. REPORTED PERIOD IS THE PERIOD THAT WAS ACTIVE WHEN THE 16-SPIN ACQUISITION CYCLE STARTED. - - - FULL-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED - - - SUB-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED - - - SPARE FOR ALIGNMENT - - - BIAS GAIN MODE FOR THE SUPRATHERMAL SECTOR - - - BIAS GAIN MODE FOR THE SOLARWIND SECTOR - - - - INDICATES THAT THERE WAS SOME ERROR DETECTED DURING ACQUISITION OR PROCESSING OF THE DATA. ERRORS COULD INCLUDE CORRUPTED ACQUISITION MEMORY (I.E. EDAC ERRORS), TIMING VIOLATIONS, OR OTHER EVENTS THAT INTERRUPTED OR OTHERWISE AFFECTED DATA COLLECTION. - - - WHETHER THE EVENT DATA IS COMPRESSED. IF 1/YES, EVENT_DATA ARRAY IS COMPRESSED USING THE RICE COMPRESSION ALGORITHM. - - - NUMBER OF EVENTS SELECTED FOR DOWNLINK (I.E. NUMBER OF EVENTS IN THE EVENT_DATA ARRAY) - - - NUMBER OF BYTES IN THE EVENT_DATA ARRAY. IF COMPRESSED, THIS VALUE REPRESENTS THE LENGTH OF THE COMPRESSED DATA. - - OPTIONALLY COMPRESSED ARRAY OF EVENT DATA + + OPTIONALLY COMPRESSED ARRAY OF EVENT DATA -FORMAT IS TBD; SOME CONSIDERATIONS/OPTIONS: -- FULL EVENTS HAVE A LOT OF REDUNDANT DATA (E.G. WILL HAVE MANY EVENTS WITH THE SAME PRIORITY/SPIN/SPIN PHASE INFORMATION). HOW WELL DOES COMPRESSION TO DEAL WITH THE REDUNDANCY? -- COULD INCLUDE MINI-HEADERS FOR EACH (PRIORITY,SPIN, SPIN-PHASE) GROUP AND STRIP THE REDUNDANT DATA FROM THE EVENTS -- SHOULD EVENTS BE TIGHTLY PACKED, OR CAN WE PAD OUT TO 64-BIT WORD BOUNDARIES? HOW WELL DOES COMPRESSION COMPENSATE FOR THE EXTRA BITS? + FORMAT IS TBD; SOME CONSIDERATIONS/OPTIONS: + - FULL EVENTS HAVE A LOT OF REDUNDANT DATA (E.G. WILL HAVE MANY EVENTS WITH THE SAME PRIORITY/SPIN/SPIN PHASE INFORMATION). HOW WELL DOES COMPRESSION TO DEAL WITH THE REDUNDANCY? + - COULD INCLUDE MINI-HEADERS FOR EACH (PRIORITY,SPIN, SPIN-PHASE) GROUP AND STRIP THE REDUNDANT DATA FROM THE EVENTS + - SHOULD EVENTS BE TIGHTLY PACKED, OR CAN WE PAD OUT TO 64-BIT WORD BOUNDARIES? HOW WELL DOES COMPRESSION COMPENSATE FOR THE EXTRA BITS? -EACH EVENT CONSISTS OF: -- 10-BIT TOF -- 9-BIT SSD ENERGY -- 2-BIT ENERGY RANGE -- 7-BIT SPIN ANGLE -- 4-BIT SSD POSITION -- 4-BIT SPIN NUMBER -- 2-BIT PHA TYPE + EACH EVENT CONSISTS OF: + - 10-BIT TOF + - 9-BIT SSD ENERGY + - 2-BIT ENERGY RANGE + - 7-BIT SPIN ANGLE + - 4-BIT SSD POSITION + - 4-BIT SPIN NUMBER + - 2-BIT PHA TYPE -TBD: EVENTS MAY BE TIGHTLY PACKED, OR MAY HAVE SPARES ADDED TO KEEP EACH EVENT BYTE-ALIGNED. IN EITHER CASE, THERE MAY BE UP TO 1 BYTE OF PADDING TO KEEP THE TOTAL SIZE OF THE PACKET EVEN. + TBD: EVENTS MAY BE TIGHTLY PACKED, OR MAY HAVE SPARES ADDED TO KEEP EACH EVENT BYTE-ALIGNED. IN EITHER CASE, THERE MAY BE UP TO 1 BYTE OF PADDING TO KEEP THE TOTAL SIZE OF THE PACKET EVEN. + PACKET CHECKSUM @@ -4491,6 +4383,7 @@ WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE + @@ -4546,11 +4439,11 @@ WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE - - - - - + + + + + @@ -4565,7 +4458,7 @@ WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE - + @@ -4632,18 +4525,6 @@ WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE - - - - - - - - - - - - @@ -4908,18 +4789,6 @@ WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE - - - - - - - - - - - - diff --git a/imap_processing/codice/utils.py b/imap_processing/codice/utils.py index 581044f1e8..1727268529 100644 --- a/imap_processing/codice/utils.py +++ b/imap_processing/codice/utils.py @@ -360,7 +360,9 @@ def get_codice_epoch_time( return center_times_seconds, delta_times -def calculate_acq_time_per_step(low_stepping_tab: dict) -> np.ndarray: +def calculate_acq_time_per_step( + low_stepping_tab: dict, esa_step_dim: int = 128 +) -> np.ndarray: """ Calculate acquisition time per step from low stepping table. @@ -368,12 +370,24 @@ def calculate_acq_time_per_step(low_stepping_tab: dict) -> np.ndarray: ---------- low_stepping_tab : dict The low stepping table from the SCI-LUT JSON. + esa_step_dim : int + The ESA step dimension size. Returns ------- np.ndarray Array of acquisition times per step of shape (num_esa_steps,). """ + # TODO: Handle time-varying num_steps_data length + # The num_steps_data length can change over time (e.g., 6 → 3 steps) and is not + # constant. E.g. at a day where the LUT changes we need to handle that. Update the + # computation to: + # Use the actual length of num_steps_data at each point in time instead of + # assuming a constant value + # - Make the calculation time-varying with epoch dependency + # - Ensure values are divided by their corresponding epoch in L1B processing + # - These tunable values are used to calculate acquisition time per step + # These tunable values are used to calculate acquisition time per step tunable_values = low_stepping_tab["tunable_values"] @@ -397,10 +411,11 @@ def calculate_acq_time_per_step(low_stepping_tab: dict) -> np.ndarray: hv_settle_per_step = np.minimum( np.maximum(non_adjusted_hv_settle_per_step, min_hv_settle_ms), max_hv_settle_ms ) - + # initialize array of nans for acquisition time per step + acq_time_per_step = np.full(esa_step_dim, np.nan, dtype=np.float64) # acquisition time per step in milliseconds # sector_time - sector_margin_ms / num_steps - hv_settle_per_step - acq_time_per_step = ( + acq_time_per_step[: len(num_steps_data)] = ( (sector_time - sector_margin_ms) / num_steps_data ) - hv_settle_per_step # Convert to seconds diff --git a/imap_processing/tests/codice/conftest.py b/imap_processing/tests/codice/conftest.py index e4ca7d5abe..f49c29857f 100644 --- a/imap_processing/tests/codice/conftest.py +++ b/imap_processing/tests/codice/conftest.py @@ -9,7 +9,7 @@ TEST_L0_FILE = TEST_DATA_L0_PATH / "imap_codice_l0_raw_20241110_v001.pkts" VALIDATION_FILE_DATE = "20250814" -VALIDATION_FILE_VERSION = "v013" +VALIDATION_FILE_VERSION = "v015" @pytest.fixture(scope="session") diff --git a/imap_processing/tests/codice/test_codice_l1a.py b/imap_processing/tests/codice/test_codice_l1a.py index 03bc6fc436..e96096d912 100644 --- a/imap_processing/tests/codice/test_codice_l1a.py +++ b/imap_processing/tests/codice/test_codice_l1a.py @@ -654,6 +654,7 @@ def test_hi_priority(mock_get_file_paths, codice_lut_path): ) +@pytest.mark.xfail(reason="Known issue with l1a HI Direct Event processing") @patch("imap_data_access.processing_input.ProcessingInputCollection.get_file_paths") def test_lo_direct_events(mock_get_file_paths, codice_lut_path): """Tests lo-direct-events.""" @@ -704,6 +705,7 @@ def test_lo_direct_events(mock_get_file_paths, codice_lut_path): ) +@pytest.mark.xfail(reason="Known issue with l1a HI Direct Event processing") @patch("imap_data_access.processing_input.ProcessingInputCollection.get_file_paths") def test_hi_direct_events(mock_get_file_paths, codice_lut_path): """Tests hi-direct-events.""" diff --git a/imap_processing/tests/codice/test_codice_l2.py b/imap_processing/tests/codice/test_codice_l2.py index 01ae6227ec..becc34cb39 100644 --- a/imap_processing/tests/codice/test_codice_l2.py +++ b/imap_processing/tests/codice/test_codice_l2.py @@ -524,6 +524,7 @@ def test_codice_l2_sw_angular_intensity(mock_get_file_paths, codice_lut_path): write_cdf(processed_2_ds) +@pytest.mark.xfail(reason="Known issue with l1a HI Direct Event processing") @patch("imap_data_access.processing_input.ProcessingInputCollection.get_file_paths") def test_codice_l2_lo_de(mock_get_file_paths, codice_lut_path): mock_get_file_paths.side_effect = [ @@ -586,6 +587,7 @@ def test_codice_l2_lo_de(mock_get_file_paths, codice_lut_path): load_cdf(file) +@pytest.mark.xfail(reason="Known issue with l1a HI Direct Event processing") @patch("imap_data_access.processing_input.ProcessingInputCollection.get_file_paths") def test_codice_l2_hi_de(mock_get_file_paths, codice_lut_path): mock_get_file_paths.side_effect = [ diff --git a/imap_processing/tests/ialirt/unit/test_process_codice.py b/imap_processing/tests/ialirt/unit/test_process_codice.py index 476376475b..2217614312 100644 --- a/imap_processing/tests/ialirt/unit/test_process_codice.py +++ b/imap_processing/tests/ialirt/unit/test_process_codice.py @@ -195,9 +195,9 @@ def make_codice_lo_ialirt_dataset(cod_lo_l1a_test_data, descriptor): "k_factor": ("dim0", cod_lo_l1a_test_data["k_factor"].data), "voltage_table": ("esa_step", cod_lo_l1a_test_data["voltage_table"].data), "data_quality": ("epoch", cod_lo_l1a_test_data["data_quality"].data), - "acquisition_time_per_step": ( + "acquisition_time_per_esa_step": ( "esa_step", - cod_lo_l1a_test_data["acquisition_time_per_step"].data, + cod_lo_l1a_test_data["acquisition_time_per_esa_step"].data, ), "epoch_delta_minus": ("epoch", cod_lo_l1a_test_data["epoch_delta_minus"].data), "epoch_delta_plus": ("epoch", cod_lo_l1a_test_data["epoch_delta_plus"].data),