miqu-control-vectors / patches /llama /llama-repeng.diff
trollkotze's picture
Patch the patch
67f1230 verified
raw
history blame
34.5 kB
Parent: a56d09a4407f29c21e149b44fd5308f83aa1cb09
Author: Anon <anon>
Date: Tue Mar 19 02:46:47 2024 +0000
repeng: implement batching
diff --git a/Makefile b/Makefile
index c0f12503..d471c387 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
# Define the default target now so that it is always the first target
BUILD_TARGETS = \
- main quantize quantize-stats perplexity imatrix embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \
+ main repeng quantize quantize-stats perplexity imatrix embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \
simple batched batched-bench save-load-state server gguf llama-bench libllava.a llava-cli baby-llama beam-search \
speculative infill tokenize benchmark-matmult parallel finetune export-lora lookahead lookup passkey gritlm tests/test-c.o
@@ -744,6 +744,13 @@ server: examples/server/server.cpp examples/server/utils.hpp examples/server/htt
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
$(CXX) $(CXXFLAGS) $(filter-out %.h %.hpp $<,$^) -Iexamples/server $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) $(LWINSOCK2)
+repeng: examples/repeng/repeng.cpp ggml.o llama.o $(COMMON_DEPS) console.o grammar-parser.o $(OBJS)
+ $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
+ $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
+ @echo
+ @echo '==== Run ./repeng -h for help. ===='
+ @echo
+
gguf: examples/gguf/gguf.cpp ggml.o $(OBJS)
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
index e762cf8b..d46d9d17 100644
--- a/examples/CMakeLists.txt
+++ b/examples/CMakeLists.txt
@@ -46,4 +46,5 @@ else()
add_subdirectory(server)
endif()
add_subdirectory(export-lora)
+ add_subdirectory(repeng)
endif()
diff --git a/examples/repeng/CMakeLists.txt b/examples/repeng/CMakeLists.txt
new file mode 100644
index 00000000..9e20f806
--- /dev/null
+++ b/examples/repeng/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(TARGET repeng)
+add_executable(${TARGET} repeng.cpp)
+install(TARGETS ${TARGET} RUNTIME)
+target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
+target_compile_features(${TARGET} PRIVATE cxx_std_11)
diff --git a/examples/repeng/repeng.cpp b/examples/repeng/repeng.cpp
new file mode 100644
index 00000000..5863c8be
--- /dev/null
+++ b/examples/repeng/repeng.cpp
@@ -0,0 +1,797 @@
+#include "common.h"
+
+#include "console.h"
+#include "llama.h"
+
+#include <cassert>
+#include <cinttypes>
+#include <cmath>
+#include <cstdio>
+#include <cstring>
+#include <ctime>
+#include <fstream>
+#include <iostream>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
+#include <signal.h>
+#include <unistd.h>
+#elif defined (_WIN32)
+#define WIN32_LEAN_AND_MEAN
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif
+#include <windows.h>
+#include <signal.h>
+#endif
+
+#if defined(_MSC_VER)
+#pragma warning(disable: 4244 4267) // possible loss of data
+#endif
+
+static llama_context ** g_ctx;
+static llama_model ** g_model;
+static gpt_params * g_params;
+static std::vector<llama_token> * g_input_tokens;
+static std::ostringstream * g_output_ss;
+static std::vector<llama_token> * g_output_tokens;
+static bool is_interacting = false;
+
+static bool file_exists(const std::string &path) {
+ std::ifstream f(path.c_str());
+ return f.good();
+}
+
+static bool file_is_empty(const std::string &path) {
+ std::ifstream f;
+ f.exceptions(std::ifstream::failbit | std::ifstream::badbit);
+ f.open(path.c_str(), std::ios::in | std::ios::binary | std::ios::ate);
+ return f.tellg() == 0;
+}
+
+#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
+static void sigint_handler(int signo) {
+ if (signo == SIGINT) {
+ if (!is_interacting && g_params->interactive) {
+ is_interacting = true;
+ } else {
+ console::cleanup();
+ printf("\n");
+ llama_print_timings(*g_ctx);
+ //write_logfile(*g_ctx, *g_params, *g_model, *g_input_tokens, g_output_ss->str(), *g_output_tokens);
+ _exit(130);
+ }
+ }
+}
+#endif
+
+static void llama_log_callback_logTee(ggml_log_level level, const char * text, void * user_data) {
+ (void) level;
+ (void) user_data;
+ LOG_TEE("%s", text);
+}
+
+static std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_params_with_cb_eval(
+ gpt_params & params,
+ ggml_backend_sched_eval_callback cb_eval,
+ void * cb_eval_user_data) {
+ auto mparams = llama_model_params_from_gpt_params(params);
+
+ llama_model * model = llama_load_model_from_file(params.model.c_str(), mparams);
+ if (model == NULL) {
+ fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
+ return std::make_tuple(nullptr, nullptr);
+ }
+
+ auto cparams = llama_context_params_from_gpt_params(params);
+
+ cparams.cb_eval = cb_eval;
+ cparams.cb_eval_user_data = cb_eval_user_data;
+
+ llama_context * lctx = llama_new_context_with_model(model, cparams);
+ if (lctx == NULL) {
+ fprintf(stderr, "%s: error: failed to create context with model '%s'\n", __func__, params.model.c_str());
+ llama_free_model(model);
+ return std::make_tuple(nullptr, nullptr);
+ }
+
+ if (!params.control_vectors.empty()) {
+ if (params.control_vector_layer_start <= 0) params.control_vector_layer_start = 1;
+ if (params.control_vector_layer_end <= 0) params.control_vector_layer_end = llama_n_layer(model);
+
+ const auto cvec = llama_control_vector_load(params.control_vectors);
+ if (cvec.n_embd == -1) {
+ llama_free(lctx);
+ llama_free_model(model);
+ return std::make_tuple(nullptr, nullptr);
+ }
+
+ int err = llama_control_vector_apply(lctx,
+ cvec.data.data(),
+ cvec.data.size(),
+ cvec.n_embd,
+ params.control_vector_layer_start,
+ params.control_vector_layer_end);
+ if (err) {
+ llama_free(lctx);
+ llama_free_model(model);
+ return std::make_tuple(nullptr, nullptr);
+ }
+ }
+
+ for (unsigned int i = 0; i < params.lora_adapter.size(); ++i) {
+ const std::string& lora_adapter = std::get<0>(params.lora_adapter[i]);
+ float lora_scale = std::get<1>(params.lora_adapter[i]);
+ int err = llama_model_apply_lora_from_file(model,
+ lora_adapter.c_str(),
+ lora_scale,
+ ((i > 0) || params.lora_base.empty())
+ ? NULL
+ : params.lora_base.c_str(),
+ params.n_threads);
+ if (err != 0) {
+ fprintf(stderr, "%s: error: failed to apply lora adapter\n", __func__);
+ llama_free(lctx);
+ llama_free_model(model);
+ return std::make_tuple(nullptr, nullptr);
+ }
+ }
+
+ if (params.ignore_eos) {
+ params.sparams.logit_bias[llama_token_eos(model)] = -INFINITY;
+ }
+
+ {
+ LOG("warming up the model with an empty run\n");
+
+ std::vector<llama_token> tmp = { llama_token_bos(model), llama_token_eos(model), };
+ llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch), 0, 0));
+ llama_kv_cache_clear(lctx);
+ llama_synchronize(lctx);
+ llama_reset_timings(lctx);
+ }
+
+ return std::make_tuple(model, lctx);
+}
+
+struct eval_callback_state {
+ std::vector<ggml_tensor *> tensors;
+ int first_prompt_idx;
+ std::vector<int> extract_tokens;
+};
+
+static bool eval_callback(struct ggml_tensor * t, bool ask, void * user_data) {
+ struct eval_callback_state * eval_state = (eval_callback_state *)user_data;
+ if (ask) {
+ // Report whether we want to observe this tensor.
+ if (strncmp(t->name, "l_out-", 6) == 0) {
+ return true;
+ } else {
+ return false;
+ }
+ } else {
+ // Actually observe the tensor data.
+
+ if (eval_state->first_prompt_idx >= 0) {
+ // Find the tensor collecting hidden states for the current layer.
+ ggml_tensor * output_tensor = nullptr;
+ for (auto t2 : eval_state->tensors) {
+ if (strcmp(t2->name, t->name) == 0) {
+ output_tensor = t2;
+ break;
+ }
+ }
+
+ if (output_tensor != nullptr) {
+ int output_idx = eval_state->first_prompt_idx;
+ for (int input_idx : eval_state->extract_tokens) {
+ // Copy the hidden states for the last token into
+ size_t input_offset = t->nb[1] * input_idx;
+ size_t output_offset = output_tensor->nb[1] * output_idx;
+ assert(t->nb[0] == output_tensor->nb[0]);
+ assert(t->ne[0] == output_tensor->ne[0]);
+ ggml_backend_tensor_get(t,
+ (char *)output_tensor->data + output_offset,
+ input_offset,
+ t->nb[0] * t->ne[0]);
+ //memcpy((char *)output_tensor->data + output_offset,
+ // (char *)t->data + input_offset,
+ // t->nb[0] * t->ne[0]);
+ //std::cerr << "saved " << (t->nb[0] * t->ne[0]) << " bytes of tensor data "
+ // << " for " << t->name << " in slot " << output_idx << "\n";
+
+ //float * buf = (float *)((char *)t->data + input_offset);
+ //float * buf = (float *)((char *)output_tensor->data + output_offset);
+ //std::cerr << "prompt " << output_idx
+ // << " tensor contents for " << t->name << ": "
+ // << buf[0] << ", "
+ // << buf[1] << ", "
+ // << buf[2] << " ... "
+ // << buf[4093] << ", "
+ // << buf[4094] << ", "
+ // << buf[4095] << "\n";
+
+ ++output_idx;
+ }
+ }
+ }
+
+ // Continue running
+ return true;
+ }
+}
+
+int main(int argc, char ** argv) {
+ gpt_params params;
+ g_params = &params;
+
+ if (!gpt_params_parse(argc, argv, params)) {
+ return 1;
+ }
+ llama_sampling_params & sparams = params.sparams;
+
+#ifndef LOG_DISABLE_LOGS
+ log_set_target(log_filename_generator("main", "log"));
+ LOG_TEE("Log start\n");
+ log_dump_cmdline(argc, argv);
+ llama_log_set(llama_log_callback_logTee, nullptr);
+#endif // LOG_DISABLE_LOGS
+
+ // TODO: Dump params ?
+ //LOG("Params perplexity: %s\n", LOG_TOSTR(params.perplexity));
+
+ // save choice to use color for later
+ // (note for later: this is a slightly awkward choice)
+ console::init(params.simple_io, params.use_color);
+ atexit([]() { console::cleanup(); });
+
+ if (params.logits_all) {
+ printf("\n************\n");
+ printf("%s: please use the 'perplexity' tool for perplexity calculations\n", __func__);
+ printf("************\n\n");
+
+ return 0;
+ }
+
+ if (params.embedding) {
+ printf("\n************\n");
+ printf("%s: please use the 'embedding' tool for embedding calculations\n", __func__);
+ printf("************\n\n");
+
+ return 0;
+ }
+
+ if (params.n_ctx != 0 && params.n_ctx < 8) {
+ LOG_TEE("%s: warning: minimum context size is 8, using minimum size.\n", __func__);
+ params.n_ctx = 8;
+ }
+
+ if (params.rope_freq_base != 0.0) {
+ LOG_TEE("%s: warning: changing RoPE frequency base to %g.\n", __func__, params.rope_freq_base);
+ }
+
+ if (params.rope_freq_scale != 0.0) {
+ LOG_TEE("%s: warning: scaling RoPE frequency by %g.\n", __func__, params.rope_freq_scale);
+ }
+
+ LOG_TEE("%s: build = %d (%s)\n", __func__, LLAMA_BUILD_NUMBER, LLAMA_COMMIT);
+ LOG_TEE("%s: built with %s for %s\n", __func__, LLAMA_COMPILER, LLAMA_BUILD_TARGET);
+
+ if (params.seed == LLAMA_DEFAULT_SEED) {
+ params.seed = time(NULL);
+ }
+
+ LOG_TEE("%s: seed = %u\n", __func__, params.seed);
+
+ std::mt19937 rng(params.seed);
+ if (params.random_prompt) {
+ params.prompt = gpt_random_prompt(rng);
+ }
+
+ LOG("%s: llama backend init\n", __func__);
+ llama_backend_init();
+ llama_numa_init(params.numa);
+
+ llama_model * model;
+ llama_context * ctx;
+ llama_context * ctx_guidance = NULL;
+ g_model = &model;
+ g_ctx = &ctx;
+
+ ggml_context * eval_ctx = nullptr;
+ struct eval_callback_state eval_state;
+
+ // load the model and apply lora adapter, if any
+ LOG("%s: load the model and apply lora adapter, if any\n", __func__);
+ std::tie(model, ctx) = llama_init_from_gpt_params_with_cb_eval(
+ params,
+ eval_callback,
+ (void *)&eval_state);
+ /*
+ if (sparams.cfg_scale > 1.f) {
+ struct llama_context_params lparams = llama_context_params_from_gpt_params(params);
+ ctx_guidance = llama_new_context_with_model(model, lparams);
+ }
+ */
+
+ if (model == NULL) {
+ LOG_TEE("%s: error: unable to load model\n", __func__);
+ return 1;
+ }
+
+ const int n_ctx_train = llama_n_ctx_train(model);
+ const int n_ctx = llama_n_ctx(ctx);
+ LOG("n_ctx: %d\n", n_ctx);
+
+ if (n_ctx > n_ctx_train) {
+ LOG_TEE("%s: warning: model was trained on only %d context tokens (%d specified)\n",
+ __func__, n_ctx_train, n_ctx);
+ }
+
+ // print system information
+ {
+ LOG_TEE("\n");
+ LOG_TEE("%s\n", get_system_info(params).c_str());
+ }
+
+ std::string path_session = params.path_prompt_cache;
+ std::vector<llama_token> session_tokens;
+
+ if (!path_session.empty()) {
+ LOG_TEE("%s: attempting to load saved session from '%s'\n", __func__, path_session.c_str());
+ if (!file_exists(path_session)) {
+ LOG_TEE("%s: session file does not exist, will create.\n", __func__);
+ } else if (file_is_empty(path_session)) {
+ LOG_TEE("%s: The session file is empty. A new session will be initialized.\n", __func__);
+ } else {
+ // The file exists and is not empty
+ session_tokens.resize(n_ctx);
+ size_t n_token_count_out = 0;
+ if (!llama_load_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.capacity(), &n_token_count_out)) {
+ LOG_TEE("%s: error: failed to load session file '%s'\n", __func__, path_session.c_str());
+ return 1;
+ }
+ session_tokens.resize(n_token_count_out);
+ llama_set_rng_seed(ctx, params.seed);
+ LOG_TEE("%s: loaded a session with prompt size of %d tokens\n", __func__, (int)session_tokens.size());
+ }
+ }
+
+ const bool add_bos = llama_should_add_bos_token(model);
+ LOG("add_bos: %d\n", add_bos);
+
+ std::vector<llama_token> embd_inp;
+
+ if (params.interactive_first || params.instruct || params.chatml || !params.prompt.empty() || session_tokens.empty()) {
+ LOG("tokenize the prompt\n");
+ if (params.chatml) {
+ params.prompt = "<|im_start|>system\n" + params.prompt + "<|im_end|>";
+ }
+ embd_inp = ::llama_tokenize(ctx, params.prompt, add_bos, true);
+ } else {
+ LOG("use session tokens\n");
+ embd_inp = session_tokens;
+ }
+
+ LOG("prompt: \"%s\"\n", log_tostr(params.prompt));
+ LOG("tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str());
+
+ // Should not run without any tokens
+ if (embd_inp.empty()) {
+ embd_inp.push_back(llama_token_bos(model));
+ LOG("embd_inp was considered empty and bos was added: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str());
+ }
+
+ // Tokenize negative prompt
+ std::vector<llama_token> guidance_inp;
+ int guidance_offset = 0;
+ int original_prompt_len = 0;
+ /*
+ if (ctx_guidance) {
+ LOG("cfg_negative_prompt: \"%s\"\n", log_tostr(sparams.cfg_negative_prompt));
+
+ guidance_inp = ::llama_tokenize(ctx_guidance, sparams.cfg_negative_prompt, add_bos, true);
+ LOG("guidance_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx_guidance, guidance_inp).c_str());
+
+ std::vector<llama_token> original_inp = ::llama_tokenize(ctx, params.prompt, add_bos, true);
+ LOG("original_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, original_inp).c_str());
+
+ original_prompt_len = original_inp.size();
+ guidance_offset = (int)guidance_inp.size() - original_prompt_len;
+ LOG("original_prompt_len: %s", log_tostr(original_prompt_len));
+ LOG("guidance_offset: %s", log_tostr(guidance_offset));
+ }
+ */
+
+ /*
+ if ((int) embd_inp.size() > n_ctx - 4) {
+ LOG_TEE("%s: error: prompt is too long (%d tokens, max %d)\n", __func__, (int) embd_inp.size(), n_ctx - 4);
+ return 1;
+ }
+ */
+
+ // debug message about similarity of saved session, if applicable
+ size_t n_matching_session_tokens = 0;
+ if (!session_tokens.empty()) {
+ for (llama_token id : session_tokens) {
+ if (n_matching_session_tokens >= embd_inp.size() || id != embd_inp[n_matching_session_tokens]) {
+ break;
+ }
+ n_matching_session_tokens++;
+ }
+ if (params.prompt.empty() && n_matching_session_tokens == embd_inp.size()) {
+ LOG_TEE("%s: using full prompt from session file\n", __func__);
+ } else if (n_matching_session_tokens >= embd_inp.size()) {
+ LOG_TEE("%s: session file has exact match for prompt!\n", __func__);
+ } else if (n_matching_session_tokens < (embd_inp.size() / 2)) {
+ LOG_TEE("%s: warning: session file has low similarity to prompt (%zu / %zu tokens); will mostly be reevaluated\n",
+ __func__, n_matching_session_tokens, embd_inp.size());
+ } else {
+ LOG_TEE("%s: session file matches %zu / %zu tokens of prompt\n",
+ __func__, n_matching_session_tokens, embd_inp.size());
+ }
+
+ // remove any "future" tokens that we might have inherited from the previous session
+ llama_kv_cache_seq_rm(ctx, -1, n_matching_session_tokens, -1);
+ }
+
+ LOGLN(
+ "recalculate the cached logits (check): embd_inp.empty() %s, n_matching_session_tokens %zu, embd_inp.size() %zu, session_tokens.size() %zu, embd_inp.size() %zu",
+ log_tostr(embd_inp.empty()), n_matching_session_tokens, embd_inp.size(), session_tokens.size(), embd_inp.size());
+
+ // if we will use the cache for the full prompt without reaching the end of the cache, force
+ // reevaluation of the last token token to recalculate the cached logits
+ if (!embd_inp.empty() && n_matching_session_tokens == embd_inp.size() && session_tokens.size() > embd_inp.size()) {
+ LOGLN("recalculate the cached logits (do): session_tokens.resize( %zu )", embd_inp.size() - 1);
+
+ session_tokens.resize(embd_inp.size() - 1);
+ }
+
+ // number of tokens to keep when resetting context
+ if (params.n_keep < 0 || params.n_keep > (int) embd_inp.size() || params.instruct || params.chatml) {
+ params.n_keep = (int)embd_inp.size();
+ } else {
+ params.n_keep += add_bos; // always keep the BOS token
+ }
+
+ // prefix & suffix for instruct mode
+ const auto inp_pfx = ::llama_tokenize(ctx, "\n\n### Instruction:\n\n", add_bos, true);
+ const auto inp_sfx = ::llama_tokenize(ctx, "\n\n### Response:\n\n", false, true);
+
+ LOG("inp_pfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_pfx).c_str());
+ LOG("inp_sfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_sfx).c_str());
+
+ // chatml prefix & suffix
+ const auto cml_pfx = ::llama_tokenize(ctx, "\n<|im_start|>user\n", add_bos, true);
+ const auto cml_sfx = ::llama_tokenize(ctx, "<|im_end|>\n<|im_start|>assistant\n", false, true);
+
+ LOG("cml_pfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, cml_pfx).c_str());
+ LOG("cml_sfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, cml_sfx).c_str());
+
+ // in instruct mode, we inject a prefix and a suffix to each input by the user
+ if (params.instruct) {
+ params.interactive_first = true;
+ params.antiprompt.emplace_back("### Instruction:\n\n");
+ }
+ // similar for chatml mode
+ else if (params.chatml) {
+ params.interactive_first = true;
+ params.antiprompt.emplace_back("<|im_start|>user\n");
+ }
+
+ // enable interactive mode if interactive start is specified
+ if (params.interactive_first) {
+ params.interactive = true;
+ }
+
+ if (params.verbose_prompt) {
+ LOG_TEE("\n");
+ LOG_TEE("%s: prompt: '%s'\n", __func__, params.prompt.c_str());
+ LOG_TEE("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
+ for (int i = 0; i < (int) embd_inp.size(); i++) {
+ LOG_TEE("%6d -> '%s'\n", embd_inp[i], llama_token_to_piece(ctx, embd_inp[i]).c_str());
+ }
+
+ /*
+ if (ctx_guidance) {
+ LOG_TEE("\n");
+ LOG_TEE("%s: negative prompt: '%s'\n", __func__, sparams.cfg_negative_prompt.c_str());
+ LOG_TEE("%s: number of tokens in negative prompt = %zu\n", __func__, guidance_inp.size());
+ for (int i = 0; i < (int) guidance_inp.size(); i++) {
+ LOG_TEE("%6d -> '%s'\n", guidance_inp[i], llama_token_to_piece(ctx, guidance_inp[i]).c_str());
+ }
+ }
+ */
+
+ if (params.n_keep > add_bos) {
+ LOG_TEE("%s: static prompt based on n_keep: '", __func__);
+ for (int i = 0; i < params.n_keep; i++) {
+ LOG_TEE("%s", llama_token_to_piece(ctx, embd_inp[i]).c_str());
+ }
+ LOG_TEE("'\n");
+ }
+ LOG_TEE("\n");
+ }
+
+ // ctrl+C handling
+ {
+#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
+ struct sigaction sigint_action;
+ sigint_action.sa_handler = sigint_handler;
+ sigemptyset (&sigint_action.sa_mask);
+ sigint_action.sa_flags = 0;
+ sigaction(SIGINT, &sigint_action, NULL);
+#elif defined (_WIN32)
+ auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
+ return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
+ };
+ SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
+#endif
+ }
+
+ if (params.interactive) {
+ LOG_TEE("%s: interactive mode on.\n", __func__);
+
+ if (!params.antiprompt.empty()) {
+ for (const auto & antiprompt : params.antiprompt) {
+ LOG_TEE("Reverse prompt: '%s'\n", antiprompt.c_str());
+ if (params.verbose_prompt) {
+ auto tmp = ::llama_tokenize(ctx, antiprompt, false, true);
+ for (int i = 0; i < (int) tmp.size(); i++) {
+ LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
+ }
+ }
+ }
+ }
+
+ if (params.input_prefix_bos) {
+ LOG_TEE("Input prefix with BOS\n");
+ }
+
+ if (!params.input_prefix.empty()) {
+ LOG_TEE("Input prefix: '%s'\n", params.input_prefix.c_str());
+ if (params.verbose_prompt) {
+ auto tmp = ::llama_tokenize(ctx, params.input_prefix, true, true);
+ for (int i = 0; i < (int) tmp.size(); i++) {
+ LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
+ }
+ }
+ }
+
+ if (!params.input_suffix.empty()) {
+ LOG_TEE("Input suffix: '%s'\n", params.input_suffix.c_str());
+ if (params.verbose_prompt) {
+ auto tmp = ::llama_tokenize(ctx, params.input_suffix, false, true);
+ for (int i = 0; i < (int) tmp.size(); i++) {
+ LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
+ }
+ }
+ }
+ }
+ LOG_TEE("sampling: \n%s\n", llama_sampling_print(sparams).c_str());
+ LOG_TEE("sampling order: \n%s\n", llama_sampling_order_print(sparams).c_str());
+ LOG_TEE("generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
+
+ // group-attention state
+ // number of grouped KV tokens so far (used only if params.grp_attn_n > 1)
+ int ga_i = 0;
+
+ const int ga_n = params.grp_attn_n;
+ const int ga_w = params.grp_attn_w;
+
+ if (ga_n != 1) {
+ GGML_ASSERT(ga_n > 0 && "grp_attn_n must be positive"); // NOLINT
+ GGML_ASSERT(ga_w % ga_n == 0 && "grp_attn_w must be a multiple of grp_attn_n"); // NOLINT
+ //GGML_ASSERT(n_ctx_train % ga_w == 0 && "n_ctx_train must be a multiple of grp_attn_w"); // NOLINT
+ //GGML_ASSERT(n_ctx >= n_ctx_train * ga_n && "n_ctx must be at least n_ctx_train * grp_attn_n"); // NOLINT
+ LOG_TEE("self-extend: n_ctx_train = %d, grp_attn_n = %d, grp_attn_w = %d\n", n_ctx_train, ga_n, ga_w);
+ }
+ LOG_TEE("\n\n");
+
+ if (params.interactive) {
+ const char *control_message;
+ if (params.multiline_input) {
+ control_message = " - To return control to LLaMa, end your input with '\\'.\n"
+ " - To return control without starting a new line, end your input with '/'.\n";
+ } else {
+ control_message = " - Press Return to return control to LLaMa.\n"
+ " - To return control without starting a new line, end your input with '/'.\n"
+ " - If you want to submit another line, end your input with '\\'.\n";
+ }
+ LOG_TEE("== Running in interactive mode. ==\n");
+#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
+ LOG_TEE( " - Press Ctrl+C to interject at any time.\n");
+#endif
+ LOG_TEE( "%s\n", control_message);
+
+ is_interacting = params.interactive_first;
+ }
+
+ bool is_antiprompt = false;
+ bool input_echo = true;
+ bool display = true;
+ bool need_to_save_session = !path_session.empty() && n_matching_session_tokens < embd_inp.size();
+
+ int n_past = 0;
+ int n_remain = params.n_predict;
+ unsigned n_consumed = 0;
+ int n_session_consumed = 0;
+ int n_past_guidance = 0;
+
+ std::vector<int> input_tokens; g_input_tokens = &input_tokens;
+ std::vector<int> output_tokens; g_output_tokens = &output_tokens;
+ std::ostringstream output_ss; g_output_ss = &output_ss;
+
+ // the first thing we will do is to output the prompt, so set color accordingly
+ console::set_display(console::prompt);
+ display = params.display_prompt;
+
+ std::vector<llama_token> embd;
+ std::vector<llama_token> embd_guidance;
+
+ // tokenized antiprompts
+ std::vector<std::vector<llama_token>> antiprompt_ids;
+
+ antiprompt_ids.reserve(params.antiprompt.size());
+ for (const std::string & antiprompt : params.antiprompt) {
+ antiprompt_ids.emplace_back(::llama_tokenize(ctx, antiprompt, false, true));
+ }
+
+ struct llama_sampling_context * ctx_sampling = llama_sampling_init(sparams);
+
+
+
+ // Tokenized prompt is in embd_inp
+
+
+ // Record prompt boundaries
+ const int PROMPT_DELIMITER_TOKEN = 13;
+
+ // Index of each delimiter token in `embd_inp`. These mark the end of each
+ // prompt.
+ std::vector<size_t> delim_idxs;
+
+ for (size_t i = 0; i < embd_inp.size(); ++i) {
+ if (embd_inp[i] == PROMPT_DELIMITER_TOKEN) {
+ delim_idxs.push_back(i);
+ }
+ }
+
+ // If the last prompt is missing an ending delimiter, add it.
+ if (embd_inp.size() > 0 && embd_inp.back() != PROMPT_DELIMITER_TOKEN) {
+ delim_idxs.push_back(embd_inp.size());
+ embd_inp.push_back(PROMPT_DELIMITER_TOKEN);
+ }
+
+ size_t num_prompts = delim_idxs.size();
+
+
+ // Set up eval_state
+ gguf_context * eval_gguf = gguf_init_empty();
+ {
+ int n_embd = llama_n_embd(model);
+ int n_layer = llama_n_layer(model);
+ std::cerr << "build eval state: " << num_prompts << " prompts, "
+ << n_embd << " embd, " << n_layer << " layers\n";
+
+ struct ggml_init_params params = {};
+ params.mem_size = ((size_t)n_embd * num_prompts * sizeof(float) + 1024) * n_layer;
+ eval_ctx = ggml_init(params);
+
+ for (int i = 0; i < n_layer; ++i) {
+ ggml_tensor * t = ggml_new_tensor_2d(eval_ctx, GGML_TYPE_F32, n_embd, num_prompts);
+ snprintf(t->name, sizeof(t->name), "l_out-%d", i);
+ eval_state.tensors.push_back(t);
+ gguf_add_tensor(eval_gguf, t);
+ }
+ eval_state.first_prompt_idx = -1;
+ }
+
+
+ size_t batch_size = 32;
+
+ // Max tokens to include in a single batch.
+ //
+ // TODO: Not sure if this calculation for the limit makes sense, but it
+ // seems like the thing crashes if batch_max_tokens exceeds any of these
+ // three parameters.
+ int batch_max_tokens = std::min(params.n_ctx, std::min(params.n_batch, params.n_ubatch));
+
+ // FIXME: Something is not quite right with the batching setup. The
+ // embedding / hidden state values vary slightly depending on how the batch
+ // size is set. Uncomment the "tensor contents of xxx" debug print in
+ // `eval_callback` to see the actual numbers. A batch size of 1 produces
+ // the same results as the original, unbatched verion of this code, but
+ // higher batch sizes produce different values.
+
+ for (size_t batch_start = 0; batch_start < num_prompts; batch_start += batch_size) {
+ std::cerr << "start batch " << batch_start << "\n";
+ eval_state.first_prompt_idx = batch_start;
+ eval_state.extract_tokens.clear();
+
+ size_t max_i = batch_start + std::min(batch_size, num_prompts - batch_start);
+
+ struct llama_batch batch = llama_batch_init(batch_max_tokens, 0, max_i - batch_start);
+ llama_sampling_reset(ctx_sampling);
+
+ // Clear the KV cache of previous prompts
+ llama_kv_cache_seq_rm(ctx, -1, -1, -1);
+
+ for (size_t i = batch_start; i < max_i; ++i) {
+ //if (i % 100 == 0) {
+ // std::cerr << "start prompt " << i << " / " << num_prompts << "\n";
+ //}
+ size_t start = i == 0 ? 0 : delim_idxs[i - 1] + 1;
+ size_t end = delim_idxs[i];
+
+ for (size_t j = start; j < end; ++j) {
+ int id = embd_inp[j];
+
+ // push the prompt in the sampling context in order to apply
+ // repetition penalties later for the prompt, we don't apply
+ // grammar rules
+ //llama_sampling_accept(ctx_sampling, ctx, id, false);
+
+ if (batch.n_tokens >= batch_max_tokens) {
+ LOG_TEE("error: too many tokens (%d) in prompt batch; the max is %d\n",
+ batch.n_tokens, batch_max_tokens);
+ LOG_TEE("turn up -c, -b, and -ub sizes, or reduce `batch_size`\n");
+ exit(1);
+ }
+
+ // Add the token to the current batch. Its position within the
+ // context is relative to the start of the current prompt.
+ //
+ llama_batch_add(batch, id, j - start, {(int)(i - batch_start)}, false);
+
+ //const std::string token_str = llama_token_to_piece(ctx, id);
+ //std::cerr << "pos " << (j - start) << ": token "
+ // << id << " \"" << token_str << "\"\n";
+ }
+
+ eval_state.extract_tokens.push_back(batch.n_tokens - 1);
+ }
+
+ if (batch.n_tokens == 0) {
+ continue;
+ }
+
+ //std::cerr << "prompt " << eval_state.prompt_idx << ": " << batch.n_tokens << " tokens\n";
+
+ //batch.logits[batch.n_tokens - 1] = true;
+
+ if (llama_decode(ctx, batch)) {
+ LOG_TEE("%s : failed to eval\n", __func__);
+ return 1;
+ }
+
+ //const llama_token id = llama_sampling_sample(ctx_sampling, ctx, nullptr, batch.n_tokens - 1);
+ //const std::string token_str = llama_token_to_piece(ctx, id);
+ //LOG_TEE("sample token %d: \"%s\"\n", id, token_str.c_str());
+ }
+
+ gguf_write_to_file(eval_gguf, "control_vector_data.gguf", false);
+
+ if (!path_session.empty() && params.prompt_cache_all && !params.prompt_cache_ro) {
+ LOG_TEE("\n%s: saving final output to session file '%s'\n", __func__, path_session.c_str());
+ llama_save_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
+ }
+
+ llama_print_timings(ctx);
+ //write_logfile(ctx, params, model, input_tokens, output_ss.str(), output_tokens);
+
+ //if (ctx_guidance) { llama_free(ctx_guidance); }
+ llama_free(ctx);
+ llama_free_model(model);
+
+ llama_sampling_free(ctx_sampling);
+ llama_backend_free();
+
+#ifndef LOG_DISABLE_LOGS
+ LOG_TEE("Log end\n");
+#endif // LOG_DISABLE_LOGS
+
+ return 0;
+}