1+ #include " testing.h"
2+ #include " visp/ml.h"
3+
4+ #include < numeric>
5+
6+ namespace visp {
7+
8+ VISP_TEST (model_transfer_type_conversion) {
9+ model_weights src = model_init (2 );
10+
11+ tensor i = ggml_new_tensor_1d (src, GGML_TYPE_I32, 2 );
12+ ggml_set_name (i, " i32_tensor" );
13+ auto i32_data = std::array{4 , -1 };
14+ i->data = i32_data.data ();
15+
16+ tensor f = ggml_new_tensor_1d (src, GGML_TYPE_F16, 2 );
17+ ggml_set_name (f, " f16_tensor" );
18+ auto f16_data = std::array{ggml_fp32_to_fp16 (2 .5f ), ggml_fp32_to_fp16 (-0 .5f )};
19+ f->data = f16_data.data ();
20+
21+ backend_device dev = backend_init (backend_type::cpu);
22+ model_weights dst = model_init (2 );
23+ model_transfer (src, dst, dev, GGML_TYPE_F32); // f16 -> f32 conversion
24+
25+ int32_t const * i32_result = (int32_t const *)ggml_get_tensor (dst, " i32_tensor" )->data ;
26+ CHECK_EQUAL (i32_result[0 ], 4 );
27+ CHECK_EQUAL (i32_result[1 ], -1 );
28+
29+ tensor f_result = ggml_get_tensor (dst, " f16_tensor" );
30+ CHECK (f_result->type == GGML_TYPE_F32);
31+ float const * f32_result = (float const *)f_result->data ;
32+ CHECK_EQUAL (f32_result[0 ], 2 .5f );
33+ CHECK_EQUAL (f32_result[1 ], -0 .5f );
34+ }
35+
36+ VISP_TEST (model_transfer_layout_conversion) {
37+ model_weights src = model_init (3 );
38+
39+ tensor conv_dw = ggml_new_tensor_4d (src, GGML_TYPE_F32, 2 , 2 , 1 , 3 ); // wh1c
40+ ggml_set_name (conv_dw, " conv_dw" );
41+ auto conv_dw_data = std::array<float , 2 * 2 * 1 * 3 >{};
42+ std::iota (conv_dw_data.begin (), conv_dw_data.end (), 1 .0f );
43+ conv_dw->data = conv_dw_data.data ();
44+
45+ tensor conv = ggml_new_tensor_4d (src, GGML_TYPE_F32, 2 , 2 , 4 , 3 ); // whco
46+ ggml_set_name (conv, " conv" );
47+ auto conv_data = std::array<float , 2 * 2 * 3 * 4 >{};
48+ std::iota (conv_data.begin (), conv_data.end (), 1 .0f );
49+ conv->data = conv_data.data ();
50+
51+ tensor no_conv = ggml_new_tensor_1d (src, GGML_TYPE_F32, 2 );
52+ ggml_set_name (no_conv, " no_conv" );
53+ auto no_conv_data = std::array<float , 2 >{1 .0f , 2 .0f };
54+ no_conv->data = no_conv_data.data ();
55+
56+ auto conv_weights = std::array{0 , 1 };
57+ auto src_layout = tensor_data_layout::whcn;
58+ auto dst_layout = tensor_data_layout::cwhn;
59+
60+ backend_device dev = backend_init (backend_type::cpu);
61+ model_weights dst = model_init (3 );
62+ model_transfer (src, dst, dev, GGML_TYPE_COUNT, src_layout, dst_layout, conv_weights);
63+
64+ auto conv_dw_expected = std::array{
65+ 1 .0f , 5 .0f , 9 .0f , //
66+ 2 .0f , 6 .0f , 10 .0f , //
67+ 3 .0f , 7 .0f , 11 .0f , //
68+ 4 .0f , 8 .0f , 12 .0f //
69+ };
70+ float const * conv_dw_result = (float const *)ggml_get_tensor (dst, " conv_dw" )->data ;
71+ for (int i = 0 ; i < int (conv_dw_expected.size ()); ++i) {
72+ CHECK_EQUAL (conv_dw_result[i], conv_dw_expected[i]);
73+ }
74+
75+ auto conv_expected = std::array{
76+ 1 .0f , 5 .0f , 9 .0f , 13 .0f , 2 .0f , 6 .0f , 10 .0f , 14 .0f , //
77+ 3 .0f , 7 .0f , 11 .0f , 15 .0f , 4 .0f , 8 .0f , 12 .0f , 16 .0f , //
78+
79+ 17 .0f , 21 .0f , 25 .0f , 29 .0f , 18 .0f , 22 .0f , 26 .0f , 30 .0f , //
80+ 19 .0f , 23 .0f , 27 .0f , 31 .0f , 20 .0f , 24 .0f , 28 .0f , 32 .0f , //
81+
82+ 33 .0f , 37 .0f , 41 .0f , 45 .0f , 34 .0f , 38 .0f , 42 .0f , 46 .0f , //
83+ 35 .0f , 39 .0f , 43 .0f , 47 .0f , 36 .0f , 40 .0f , 44 .0f , 48 .0f //
84+ };
85+ float const * conv_result = (float const *)ggml_get_tensor (dst, " conv" )->data ;
86+ for (int i = 0 ; i < int (conv_expected.size ()); ++i) {
87+ CHECK_EQUAL (conv_result[i], conv_expected[i]);
88+ }
89+
90+ float const * no_conv_result = (float const *)ggml_get_tensor (dst, " no_conv" )->data ;
91+ CHECK_EQUAL (no_conv_result[0 ], 1 .0f );
92+ CHECK_EQUAL (no_conv_result[1 ], 2 .0f );
93+ }
94+
95+ } // namespace visp
0 commit comments