Skip to content

Commit f7b3c48

Browse files
committed
init
1 parent 629420e commit f7b3c48

File tree

3 files changed

+18
-9
lines changed

3 files changed

+18
-9
lines changed

CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1348,3 +1348,4 @@ if (LLAMA_BUILD_EXAMPLES)
13481348
add_subdirectory(examples)
13491349
add_subdirectory(pocs)
13501350
endif()
1351+
add_subdirectory(../ext_server ext_server) # ollama

examples/minicpmv/clip.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
// I'll gradually clean and extend it
44
// Note: Even when using identical normalized image inputs (see normalize_image_u8_to_f32()) we have a significant difference in resulting embeddings compared to pytorch
55
#include "clip.h"
6+
#include "common.h"
67
#include "log.h"
78
#include "ggml.h"
89
#include "ggml-alloc.h"

llama.cpp

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -15380,15 +15380,22 @@ struct llama_model * llama_load_model_from_file(
1538015380
model->rpc_servers.push_back(servers);
1538115381
}
1538215382
int status = llama_model_load(path_model, *model, params);
15383-
GGML_ASSERT(status <= 0);
15384-
if (status < 0) {
15385-
if (status == -1) {
15386-
LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
15387-
} else if (status == -2) {
15388-
LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
15389-
}
15390-
delete model;
15391-
return nullptr;
15383+
try {
15384+
int status = llama_model_load(path_model, *model, params);
15385+
GGML_ASSERT(status <= 0);
15386+
if (status < 0) {
15387+
if (status == -1) {
15388+
LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
15389+
} else if (status == -2) {
15390+
LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
15391+
}
15392+
delete model;
15393+
return nullptr;
15394+
}
15395+
} catch (...) {
15396+
LLAMA_LOG_ERROR("%s: exception loading model\n", __func__);
15397+
delete model;
15398+
throw;
1539215399
}
1539315400

1539415401
return model;

0 commit comments

Comments
 (0)