@@ -1820,6 +1820,7 @@ def test_read_parquet_gcs(
18201820 df_out = df_out .assign (
18211821 datetime_col = df_out ["datetime_col" ].astype ("timestamp[us][pyarrow]" ),
18221822 timestamp_col = df_out ["timestamp_col" ].astype ("timestamp[us, tz=UTC][pyarrow]" ),
1823+ duration_col = df_out ["duration_col" ].astype ("duration[us][pyarrow]" ),
18231824 )
18241825
18251826 # Make sure we actually have at least some values before comparing.
@@ -1868,7 +1869,8 @@ def test_read_parquet_gcs_compressed(
18681869 # DATETIME gets loaded as TIMESTAMP in parquet. See:
18691870 # https://cloud.google.com/bigquery/docs/exporting-data#parquet_export_details
18701871 df_out = df_out .assign (
1871- datetime_col = df_out ["datetime_col" ].astype ("timestamp[us][pyarrow]" )
1872+ datetime_col = df_out ["datetime_col" ].astype ("timestamp[us][pyarrow]" ),
1873+ duration_col = df_out ["duration_col" ].astype ("duration[us][pyarrow]" ),
18721874 )
18731875
18741876 # Make sure we actually have at least some values before comparing.
@@ -1926,9 +1928,23 @@ def test_read_json_gcs_bq_engine(session, scalars_dfs, gcs_folder):
19261928
19271929 # The auto detects of BigQuery load job have restrictions to detect the bytes,
19281930 # datetime, numeric and geometry types, so they're skipped here.
1929- df = df .drop (columns = ["bytes_col" , "datetime_col" , "numeric_col" , "geography_col" ])
1931+ df = df .drop (
1932+ columns = [
1933+ "bytes_col" ,
1934+ "datetime_col" ,
1935+ "numeric_col" ,
1936+ "geography_col" ,
1937+ "duration_col" ,
1938+ ]
1939+ )
19301940 scalars_df = scalars_df .drop (
1931- columns = ["bytes_col" , "datetime_col" , "numeric_col" , "geography_col" ]
1941+ columns = [
1942+ "bytes_col" ,
1943+ "datetime_col" ,
1944+ "numeric_col" ,
1945+ "geography_col" ,
1946+ "duration_col" ,
1947+ ]
19321948 )
19331949 assert df .shape [0 ] == scalars_df .shape [0 ]
19341950 pd .testing .assert_series_equal (
@@ -1962,11 +1978,15 @@ def test_read_json_gcs_default_engine(session, scalars_dfs, gcs_folder):
19621978 # The auto detects of BigQuery load job have restrictions to detect the bytes,
19631979 # numeric and geometry types, so they're skipped here.
19641980 df = df .drop (columns = ["bytes_col" , "numeric_col" , "geography_col" ])
1965- scalars_df = scalars_df .drop (columns = ["bytes_col" , "numeric_col" , "geography_col" ])
1981+ scalars_df = scalars_df .drop (
1982+ columns = ["bytes_col" , "numeric_col" , "geography_col" , "duration_col" ]
1983+ )
19661984
19671985 # pandas read_json does not respect the dtype overrides for these columns
19681986 df = df .drop (columns = ["date_col" , "datetime_col" , "time_col" ])
1969- scalars_df = scalars_df .drop (columns = ["date_col" , "datetime_col" , "time_col" ])
1987+ scalars_df = scalars_df .drop (
1988+ columns = ["date_col" , "datetime_col" , "time_col" , "duration_col" ]
1989+ )
19701990
19711991 assert df .shape [0 ] == scalars_df .shape [0 ]
19721992 pd .testing .assert_series_equal (df .dtypes , scalars_df .dtypes )
0 commit comments