@@ -4125,6 +4125,72 @@ def test_df_to_latex(scalars_df_index, scalars_pandas_df_index):
41254125 assert bf_result == pd_result
41264126
41274127
4128+ def test_df_to_json_local_str (scalars_df_index , scalars_pandas_df_index ):
4129+ bf_result = scalars_df_index .to_json ()
4130+ # default_handler for arrow types that have no default conversion
4131+ pd_result = scalars_pandas_df_index .to_json (default_handler = str )
4132+
4133+ assert bf_result == pd_result
4134+
4135+
4136+ @skip_legacy_pandas
4137+ def test_df_to_json_local_file (scalars_df_index , scalars_pandas_df_index ):
4138+ with tempfile .TemporaryFile () as bf_result_file , tempfile .TemporaryFile () as pd_result_file :
4139+ scalars_df_index .to_json (bf_result_file , orient = "table" )
4140+ # default_handler for arrow types that have no default conversion
4141+ scalars_pandas_df_index .to_json (
4142+ pd_result_file , orient = "table" , default_handler = str
4143+ )
4144+
4145+ bf_result = bf_result_file .read ()
4146+ pd_result = pd_result_file .read ()
4147+
4148+ assert bf_result == pd_result
4149+
4150+
4151+ def test_df_to_csv_local_str (scalars_df_index , scalars_pandas_df_index ):
4152+ bf_result = scalars_df_index .to_csv ()
4153+ # default_handler for arrow types that have no default conversion
4154+ pd_result = scalars_pandas_df_index .to_csv ()
4155+
4156+ assert bf_result == pd_result
4157+
4158+
4159+ def test_df_to_csv_local_file (scalars_df_index , scalars_pandas_df_index ):
4160+ with tempfile .TemporaryFile () as bf_result_file , tempfile .TemporaryFile () as pd_result_file :
4161+ scalars_df_index .to_csv (bf_result_file )
4162+ scalars_pandas_df_index .to_csv (pd_result_file )
4163+
4164+ bf_result = bf_result_file .read ()
4165+ pd_result = pd_result_file .read ()
4166+
4167+ assert bf_result == pd_result
4168+
4169+
4170+ def test_df_to_parquet_local_bytes (scalars_df_index , scalars_pandas_df_index ):
4171+ # GEOGRAPHY not supported in parquet export.
4172+ unsupported = ["geography_col" ]
4173+
4174+ bf_result = scalars_df_index .drop (columns = unsupported ).to_parquet ()
4175+ # default_handler for arrow types that have no default conversion
4176+ pd_result = scalars_pandas_df_index .drop (columns = unsupported ).to_parquet ()
4177+
4178+ assert bf_result == pd_result
4179+
4180+
4181+ def test_df_to_parquet_local_file (scalars_df_index , scalars_pandas_df_index ):
4182+ # GEOGRAPHY not supported in parquet export.
4183+ unsupported = ["geography_col" ]
4184+ with tempfile .TemporaryFile () as bf_result_file , tempfile .TemporaryFile () as pd_result_file :
4185+ scalars_df_index .drop (columns = unsupported ).to_parquet (bf_result_file )
4186+ scalars_pandas_df_index .drop (columns = unsupported ).to_parquet (pd_result_file )
4187+
4188+ bf_result = bf_result_file .read ()
4189+ pd_result = pd_result_file .read ()
4190+
4191+ assert bf_result == pd_result
4192+
4193+
41284194def test_df_to_records (scalars_df_index , scalars_pandas_df_index ):
41294195 unsupported = ["numeric_col" ]
41304196 bf_result = scalars_df_index .drop (columns = unsupported ).to_records ()
@@ -4166,7 +4232,7 @@ def test_df_to_pickle(scalars_df_index, scalars_pandas_df_index):
41664232 scalars_df_index .to_pickle (bf_result_file )
41674233 scalars_pandas_df_index .to_pickle (pd_result_file )
41684234 bf_result = bf_result_file .read ()
4169- pd_result = bf_result_file .read ()
4235+ pd_result = pd_result_file .read ()
41704236
41714237 assert bf_result == pd_result
41724238
0 commit comments