Skip to content

Commit 14da17a

Browse files
committed
fix: initialize some pointers to NULL
1 parent 78ad76f commit 14da17a

File tree

1 file changed

+14
-14
lines changed

1 file changed

+14
-14
lines changed

stable-diffusion.cpp

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1360,9 +1360,9 @@ struct FrozenCLIPEmbedderWithCustomWords {
13601360
CLIPTextModel text_model2;
13611361

13621362
// context and memory buffers
1363-
struct ggml_context* ctx;
1364-
ggml_backend_buffer_t params_buffer;
1365-
ggml_backend_buffer_t compute_buffer; // for compute
1363+
struct ggml_context* ctx = NULL;
1364+
ggml_backend_buffer_t params_buffer = NULL;
1365+
ggml_backend_buffer_t compute_buffer = NULL;; // for compute
13661366
struct ggml_allocr* compute_alloc = NULL;
13671367
size_t compute_memory_buffer_size = -1;
13681368

@@ -3512,9 +3512,9 @@ struct AutoEncoderKL {
35123512
Encoder encoder;
35133513
Decoder decoder;
35143514

3515-
struct ggml_context* ctx;
3516-
ggml_backend_buffer_t params_buffer;
3517-
ggml_backend_buffer_t compute_buffer; // for compute
3515+
struct ggml_context* ctx = NULL;
3516+
ggml_backend_buffer_t params_buffer = NULL;
3517+
ggml_backend_buffer_t compute_buffer = NULL; // for compute
35183518
struct ggml_allocr* compute_alloc = NULL;
35193519

35203520
int memory_buffer_size = 0;
@@ -4182,10 +4182,10 @@ struct TinyAutoEncoder {
41824182
TinyEncoder encoder;
41834183
TinyDecoder decoder;
41844184

4185-
ggml_context* ctx;
4185+
ggml_context* ctx = NULL;
41864186
bool decode_only = false;
4187-
ggml_backend_buffer_t params_buffer;
4188-
ggml_backend_buffer_t compute_buffer; // for compute
4187+
ggml_backend_buffer_t params_buffer = NULL;
4188+
ggml_backend_buffer_t compute_buffer = NULL; // for compute
41894189
struct ggml_allocr* compute_alloc = NULL;
41904190

41914191
int memory_buffer_size = 0;
@@ -4621,10 +4621,10 @@ struct ESRGAN {
46214621
ggml_tensor* conv_last_w; // [out_channels, num_features, 3, 3]
46224622
ggml_tensor* conv_last_b; // [out_channels]
46234623

4624-
ggml_context* ctx;
4624+
ggml_context* ctx = NULL;
46254625
bool decode_only = false;
4626-
ggml_backend_buffer_t params_buffer;
4627-
ggml_backend_buffer_t compute_buffer; // for compute
4626+
ggml_backend_buffer_t params_buffer = NULL;
4627+
ggml_backend_buffer_t compute_buffer = NULL; // for compute
46284628
struct ggml_allocr* compute_alloc = NULL;
46294629

46304630
int memory_buffer_size = 0;
@@ -4965,8 +4965,8 @@ struct LoraModel {
49654965
float multiplier = 1.0f;
49664966
std::map<std::string, struct ggml_tensor*> lora_tensors;
49674967

4968-
struct ggml_context* ctx;
4969-
ggml_backend_buffer_t params_buffer_lora;
4968+
struct ggml_context* ctx = NULL;
4969+
ggml_backend_buffer_t params_buffer_lora = NULL;
49704970
ggml_backend_t backend = NULL;
49714971

49724972
bool load(ggml_backend_t backend_, std::string file_path) {

0 commit comments

Comments
 (0)