Skip to content

Commit 576e821

Browse files
authored
chore(deps): bump llama.cpp to 'df36bce667bf14f8e538645547754386f9516326 (#6062)
chore(deps): bump llama.cpp to 'df36bce667bf14f8e538645547754386f9516326' Signed-off-by: Ettore Di Giacinto <[email protected]>
1 parent 7293f26 commit 576e821

File tree

2 files changed

+4
-4
lines changed

2 files changed

+4
-4
lines changed

backend/cpp/llama-cpp/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11

2-
LLAMA_VERSION?=29c8fbe4e05fd23c44950d0958299e25fbeabc5c
2+
LLAMA_VERSION?=df36bce667bf14f8e538645547754386f9516326
33
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
44

55
CMAKE_ARGS?=

backend/cpp/llama-cpp/grpc-server.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -53,9 +53,9 @@ static void start_llama_server(server_context& ctx_server) {
5353
LOG_INF("%s: model loaded\n", __func__);
5454

5555
// print sample chat example to make it clear which template is used
56-
LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
57-
common_chat_templates_source(ctx_server.chat_templates.get()),
58-
common_chat_format_example(ctx_server.chat_templates.get(), ctx_server.params_base.use_jinja).c_str());
56+
// LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
57+
// common_chat_templates_source(ctx_server.chat_templates.get()),
58+
// common_chat_format_example(ctx_server.chat_templates.get(), ctx_server.params_base.use_jinja).c_str(), ctx_server.params_base.default_template_kwargs);
5959

6060
// Reset the chat templates
6161
// TODO: We should make this configurable by respecting the option that is already present in LocalAI for vLLM

0 commit comments

Comments
 (0)