| | #pragma once |
| |
|
| | #include "common.h" |
| | #include "log.h" |
| | #include "llama.h" |
| | #include "common/base64.hpp" |
| |
|
| | |
| | #define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576 |
| | |
| | #define CPPHTTPLIB_TCP_NODELAY true |
| | #include "httplib.h" |
| |
|
| | |
| | #define JSON_ASSERT GGML_ASSERT |
| | #include "json.hpp" |
| | #include "chat.h" |
| |
|
| | #include <random> |
| | #include <sstream> |
| | #include <string> |
| | #include <vector> |
| | #include <memory> |
| |
|
| | #define DEFAULT_OAICOMPAT_MODEL "gpt-3.5-turbo" |
| |
|
| | using json = nlohmann::ordered_json; |
| |
|
| | #define SLT_INF(slot, fmt, ...) LOG_INF("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__) |
| | #define SLT_WRN(slot, fmt, ...) LOG_WRN("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__) |
| | #define SLT_ERR(slot, fmt, ...) LOG_ERR("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__) |
| | #define SLT_DBG(slot, fmt, ...) LOG_DBG("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__) |
| |
|
| | #define SRV_INF(fmt, ...) LOG_INF("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
| | #define SRV_WRN(fmt, ...) LOG_WRN("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
| | #define SRV_ERR(fmt, ...) LOG_ERR("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
| | #define SRV_DBG(fmt, ...) LOG_DBG("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
| |
|
| | #define QUE_INF(fmt, ...) LOG_INF("que %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
| | #define QUE_WRN(fmt, ...) LOG_WRN("que %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
| | #define QUE_ERR(fmt, ...) LOG_ERR("que %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
| | #define QUE_DBG(fmt, ...) LOG_DBG("que %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
| |
|
| | template <typename T> |
| | static T json_value(const json & body, const std::string & key, const T & default_value) { |
| | |
| | if (body.contains(key) && !body.at(key).is_null()) { |
| | try { |
| | return body.at(key); |
| | } catch (NLOHMANN_JSON_NAMESPACE::detail::type_error const &) { |
| | LOG_WRN("Wrong type supplied for parameter '%s'. Expected '%s', using default value\n", key.c_str(), json(default_value).type_name()); |
| | return default_value; |
| | } |
| | } else { |
| | return default_value; |
| | } |
| | } |
| |
|
| | const static std::string build_info("b" + std::to_string(LLAMA_BUILD_NUMBER) + "-" + LLAMA_COMMIT); |
| |
|
| | |
| | |
| | |
| |
|
| | static bool json_is_array_of_numbers(const json & data) { |
| | if (data.is_array()) { |
| | for (const auto & e : data) { |
| | if (!e.is_number_integer()) { |
| | return false; |
| | } |
| | } |
| | return true; |
| | } |
| | return false; |
| | } |
| |
|
| | |
| | static bool json_is_array_of_mixed_numbers_strings(const json & data) { |
| | bool seen_string = false; |
| | bool seen_number = false; |
| | if (data.is_array()) { |
| | for (const auto & e : data) { |
| | seen_string |= e.is_string(); |
| | seen_number |= e.is_number_integer(); |
| | if (seen_number && seen_string) { |
| | return true; |
| | } |
| | } |
| | } |
| | return false; |
| | } |
| |
|
| | |
| | static json json_get_nested_values(const std::vector<std::string> & paths, const json & js) { |
| | json result = json::object(); |
| |
|
| | for (const std::string & path : paths) { |
| | json current = js; |
| | const auto keys = string_split<std::string>(path, '/'); |
| | bool valid_path = true; |
| | for (const std::string & k : keys) { |
| | if (valid_path && current.is_object() && current.contains(k)) { |
| | current = current[k]; |
| | } else { |
| | valid_path = false; |
| | } |
| | } |
| | if (valid_path) { |
| | result[path] = current; |
| | } |
| | } |
| | return result; |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | static llama_tokens tokenize_mixed(const llama_vocab * vocab, const json & json_prompt, bool add_special, bool parse_special) { |
| | |
| | |
| | llama_tokens prompt_tokens; |
| |
|
| | if (json_prompt.is_array()) { |
| | bool first = true; |
| | for (const auto & p : json_prompt) { |
| | if (p.is_string()) { |
| | auto s = p.template get<std::string>(); |
| |
|
| | llama_tokens p; |
| | if (first) { |
| | p = common_tokenize(vocab, s, add_special, parse_special); |
| | first = false; |
| | } else { |
| | p = common_tokenize(vocab, s, false, parse_special); |
| | } |
| |
|
| | prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end()); |
| | } else { |
| | if (first) { |
| | first = false; |
| | } |
| |
|
| | prompt_tokens.push_back(p.template get<llama_token>()); |
| | } |
| | } |
| | } else { |
| | auto s = json_prompt.template get<std::string>(); |
| | prompt_tokens = common_tokenize(vocab, s, add_special, parse_special); |
| | } |
| |
|
| | return prompt_tokens; |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | static std::vector<llama_tokens> tokenize_input_prompts(const llama_vocab * vocab, const json & json_prompt, bool add_special, bool parse_special) { |
| | std::vector<llama_tokens> result; |
| | if (json_prompt.is_string() || json_is_array_of_mixed_numbers_strings(json_prompt)) { |
| | |
| | result.push_back(tokenize_mixed(vocab, json_prompt, add_special, parse_special)); |
| | } else if (json_is_array_of_numbers(json_prompt)) { |
| | |
| | result.push_back(json_prompt.get<llama_tokens>()); |
| | } else if (json_prompt.is_array()) { |
| | |
| | result.reserve(json_prompt.size()); |
| | for (const auto & p : json_prompt) { |
| | if (p.is_string() || json_is_array_of_mixed_numbers_strings(p)) { |
| | result.push_back(tokenize_mixed(vocab, p, add_special, parse_special)); |
| | } else if (json_is_array_of_numbers(p)) { |
| | |
| | result.push_back(p.get<llama_tokens>()); |
| | } else { |
| | throw std::runtime_error("element of \"prompt\" must be a string, an list of tokens, or a list of mixed strings & tokens"); |
| | } |
| | } |
| | } else { |
| | throw std::runtime_error("\"prompt\" must be a string, an list of tokens, a list of mixed strings & tokens, or a list of prompts"); |
| | } |
| | if (result.empty()) { |
| | throw std::runtime_error("\"prompt\" must not be empty"); |
| | } |
| | return result; |
| | } |
| |
|
| | |
| | |
| | |
| | static size_t validate_utf8(const std::string& text) { |
| | size_t len = text.size(); |
| | if (len == 0) return 0; |
| |
|
| | |
| | for (size_t i = 1; i <= 4 && i <= len; ++i) { |
| | unsigned char c = text[len - i]; |
| | |
| | if ((c & 0xE0) == 0xC0) { |
| | |
| | |
| | if (i < 2) return len - i; |
| | } else if ((c & 0xF0) == 0xE0) { |
| | |
| | |
| | if (i < 3) return len - i; |
| | } else if ((c & 0xF8) == 0xF0) { |
| | |
| | |
| | if (i < 4) return len - i; |
| | } |
| | } |
| |
|
| | |
| | return len; |
| | } |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | static llama_tokens format_rerank(const struct llama_vocab * vocab, const llama_tokens & query, const llama_tokens & doc) { |
| | llama_tokens result; |
| |
|
| | result.reserve(doc.size() + query.size() + 4); |
| | result.push_back(llama_vocab_bos(vocab)); |
| | result.insert(result.end(), query.begin(), query.end()); |
| | result.push_back(llama_vocab_eos(vocab)); |
| | result.push_back(llama_vocab_sep(vocab)); |
| | result.insert(result.end(), doc.begin(), doc.end()); |
| | result.push_back(llama_vocab_eos(vocab)); |
| |
|
| | return result; |
| | } |
| |
|
| | |
| | static llama_tokens format_infill( |
| | const llama_vocab * vocab, |
| | const json & input_prefix, |
| | const json & input_suffix, |
| | const json & input_extra, |
| | const int n_batch, |
| | const int n_predict, |
| | const int n_ctx, |
| | const bool spm_infill, |
| | const llama_tokens & tokens_prompt |
| | ) { |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | llama_tokens extra_tokens; |
| | extra_tokens.reserve(n_ctx); |
| |
|
| | auto tokens_prefix = tokenize_mixed(vocab, input_prefix, false, false); |
| | auto tokens_suffix = tokenize_mixed(vocab, input_suffix, false, false); |
| |
|
| | if (llama_vocab_fim_rep(vocab) != LLAMA_TOKEN_NULL) { |
| | |
| | static const auto k_fim_repo = common_tokenize(vocab, "myproject\n", false, false); |
| |
|
| | extra_tokens.push_back(llama_vocab_fim_rep(vocab)); |
| | extra_tokens.insert(extra_tokens.end(), k_fim_repo.begin(), k_fim_repo.end()); |
| | } |
| | for (const auto & chunk : input_extra) { |
| | |
| | const std::string text = json_value(chunk, "text", std::string()); |
| | const std::string filename = json_value(chunk, "filename", std::string("tmp")); |
| |
|
| | if (llama_vocab_fim_sep(vocab) != LLAMA_TOKEN_NULL) { |
| | const auto k_fim_file = common_tokenize(vocab, filename + "\n", false, false); |
| |
|
| | extra_tokens.insert(extra_tokens.end(), llama_vocab_fim_sep(vocab)); |
| | extra_tokens.insert(extra_tokens.end(), k_fim_file.begin(), k_fim_file.end()); |
| | } else { |
| | |
| | static const char k_chunk_prefix_str[] = {0x0a, 0x0a, 0x2d, 0x2d, 0x2d, 0x20, 0x73, 0x6e, 0x69, 0x70, 0x70, 0x65, 0x74, 0x20, 0x2d, 0x2d, 0x2d, 0x0a, 0x0a, 0x00}; |
| | static const auto k_chunk_prefix_tokens = common_tokenize(vocab, k_chunk_prefix_str, false, false); |
| |
|
| | extra_tokens.insert(extra_tokens.end(), k_chunk_prefix_tokens.begin(), k_chunk_prefix_tokens.end()); |
| | } |
| |
|
| | const auto chunk_tokens = common_tokenize(vocab, text, false, false); |
| | extra_tokens.insert(extra_tokens.end(), chunk_tokens.begin(), chunk_tokens.end()); |
| | } |
| |
|
| | if (llama_vocab_fim_sep(vocab) != LLAMA_TOKEN_NULL) { |
| | |
| | static const auto k_fim_file = common_tokenize(vocab, "filename\n", false, false); |
| |
|
| | extra_tokens.insert(extra_tokens.end(), llama_vocab_fim_sep(vocab)); |
| | extra_tokens.insert(extra_tokens.end(), k_fim_file.begin(), k_fim_file.end()); |
| | } |
| |
|
| | |
| | const int n_prefix_take = std::min<int>(tokens_prefix.size(), 3*(n_batch/4)); |
| | const int n_suffix_take = std::min<int>(tokens_suffix.size(), std::max<int>(0, (n_batch/4) - (2 + tokens_prompt.size()))); |
| |
|
| | SRV_DBG("n_prefix_take = %d, n_suffix_take = %d, total = %d\n", n_prefix_take, n_suffix_take, (n_prefix_take + n_suffix_take)); |
| |
|
| | |
| | const int n_extra_take = std::min<int>(std::max<int>(0, n_ctx - (n_batch) - 2*n_predict), extra_tokens.size()); |
| |
|
| | tokens_prefix.erase(tokens_prefix.begin(), tokens_prefix.begin() + tokens_prefix.size() - n_prefix_take); |
| | tokens_suffix.resize(n_suffix_take); |
| |
|
| | tokens_prefix.insert(tokens_prefix.begin(), llama_vocab_fim_pre(vocab)); |
| | tokens_prefix.insert(tokens_prefix.end(), tokens_prompt.begin(), tokens_prompt.end()); |
| | tokens_suffix.insert(tokens_suffix.begin(), llama_vocab_fim_suf(vocab)); |
| |
|
| | auto embd_inp = spm_infill ? tokens_suffix : tokens_prefix; |
| | auto embd_end = spm_infill ? tokens_prefix : tokens_suffix; |
| |
|
| | if (llama_vocab_get_add_bos(vocab)) { |
| | embd_inp.insert(embd_inp.begin(), llama_vocab_bos(vocab)); |
| | } |
| |
|
| | SRV_DBG("extra: n_ctx = %d, n_extra_take = %d, n_extra = %d\n", n_ctx, n_extra_take, (int) extra_tokens.size()); |
| |
|
| | |
| | embd_inp.insert(embd_inp.begin(), extra_tokens.end() - n_extra_take, extra_tokens.end()); |
| |
|
| | embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end()); |
| | embd_inp.push_back(llama_vocab_fim_mid(vocab)); |
| |
|
| | return embd_inp; |
| | } |
| |
|
| | |
| | |
| | |
| |
|
| | static const std::string base64_chars = |
| | "ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
| | "abcdefghijklmnopqrstuvwxyz" |
| | "0123456789+/"; |
| |
|
| | static inline bool is_base64(uint8_t c) { |
| | return (isalnum(c) || (c == '+') || (c == '/')); |
| | } |
| |
|
| | static inline std::vector<uint8_t> base64_decode(const std::string & encoded_string) { |
| | int i = 0; |
| | int j = 0; |
| | int in_ = 0; |
| |
|
| | int in_len = encoded_string.size(); |
| |
|
| | uint8_t char_array_4[4]; |
| | uint8_t char_array_3[3]; |
| |
|
| | std::vector<uint8_t> ret; |
| |
|
| | while (in_len-- && (encoded_string[in_] != '=') && is_base64(encoded_string[in_])) { |
| | char_array_4[i++] = encoded_string[in_]; in_++; |
| | if (i == 4) { |
| | for (i = 0; i < 4; i++) { |
| | char_array_4[i] = base64_chars.find(char_array_4[i]); |
| | } |
| |
|
| | char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4); |
| | char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2); |
| | char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3]; |
| |
|
| | for (i = 0; (i < 3); i++) { |
| | ret.push_back(char_array_3[i]); |
| | } |
| |
|
| | i = 0; |
| | } |
| | } |
| |
|
| | if (i) { |
| | for (j = i; j < 4; j++) { |
| | char_array_4[j] = 0; |
| | } |
| |
|
| | for (j = 0; j < 4; j++) { |
| | char_array_4[j] = base64_chars.find(char_array_4[j]); |
| | } |
| |
|
| | char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4); |
| | char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2); |
| | char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3]; |
| |
|
| | for (j = 0; j < i - 1; j++) { |
| | ret.push_back(char_array_3[j]); |
| | } |
| | } |
| |
|
| | return ret; |
| | } |
| |
|
| | |
| | |
| | |
| |
|
| | static std::string random_string() { |
| | static const std::string str("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"); |
| |
|
| | std::random_device rd; |
| | std::mt19937 generator(rd()); |
| |
|
| | std::string result(32, ' '); |
| |
|
| | for (int i = 0; i < 32; ++i) { |
| | result[i] = str[generator() % str.size()]; |
| | } |
| |
|
| | return result; |
| | } |
| |
|
| | static std::string gen_chatcmplid() { |
| | return "chatcmpl-" + random_string(); |
| | } |
| |
|
| | static std::string gen_tool_call_id() { |
| | return random_string(); |
| | } |
| |
|
| | |
| | |
| | |
| |
|
| | static bool ends_with(const std::string & str, const std::string & suffix) { |
| | return str.size() >= suffix.size() && 0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix); |
| | } |
| |
|
| | static size_t find_partial_stop_string(const std::string &stop, const std::string &text) { |
| | if (!text.empty() && !stop.empty()) { |
| | const char text_last_char = text.back(); |
| | for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--) { |
| | if (stop[char_index] == text_last_char) { |
| | const std::string current_partial = stop.substr(0, char_index + 1); |
| | if (ends_with(text, current_partial)) { |
| | return text.size() - char_index - 1; |
| | } |
| | } |
| | } |
| | } |
| |
|
| | return std::string::npos; |
| | } |
| |
|
| | |
| | template <class Iter> |
| | static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) { |
| | std::string ret; |
| | for (; begin != end; ++begin) { |
| | ret += common_token_to_piece(ctx, *begin); |
| | } |
| |
|
| | return ret; |
| | } |
| |
|
| | |
| | static std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token) { |
| | std::string out = token == LLAMA_TOKEN_NULL ? "" : common_token_to_piece(ctx, token); |
| |
|
| | |
| | |
| | if (out.size() == 1 && (out[0] & 0x80) == 0x80) { |
| | std::stringstream ss; |
| | ss << std::hex << (out[0] & 0xff); |
| | std::string res(ss.str()); |
| | out = "byte: \\x" + res; |
| | } |
| |
|
| | return out; |
| | } |
| |
|
| | static bool server_sent_event(httplib::DataSink & sink, const char * event, const json & data) { |
| | const std::string str = |
| | std::string(event) + ": " + |
| | data.dump(-1, ' ', false, json::error_handler_t::replace) + |
| | "\n\n"; |
| |
|
| | LOG_DBG("data stream, to_send: %s", str.c_str()); |
| |
|
| | return sink.write(str.c_str(), str.size()); |
| | } |
| |
|
| | |
| | |
| | |
| |
|
| | static json oaicompat_completion_params_parse(const json & body) { |
| | json llama_params; |
| |
|
| | if (!body.contains("prompt")) { |
| | throw std::runtime_error("\"prompt\" is required"); |
| | } |
| |
|
| | |
| | if (body.contains("stop") && body.at("stop").is_string()) { |
| | llama_params["stop"] = json::array({body.at("stop").get<std::string>()}); |
| | } else { |
| | llama_params["stop"] = json_value(body, "stop", json::array()); |
| | } |
| |
|
| | |
| | int n_choices = json_value(body, "n", 1); |
| | if (n_choices != 1) { |
| | throw std::runtime_error("Only one completion choice is allowed"); |
| | } |
| |
|
| | |
| | if (json_value(body, "echo", false)) { |
| | throw std::runtime_error("Only no echo is supported"); |
| | } |
| |
|
| | |
| | static const std::vector<std::string> unsupported_params { "best_of", "suffix" }; |
| | for (const auto & param : unsupported_params) { |
| | if (body.contains(param)) { |
| | throw std::runtime_error("Unsupported param: " + param); |
| | } |
| | } |
| |
|
| | |
| | for (const auto & item : body.items()) { |
| | |
| | if (!llama_params.contains(item.key()) || item.key() == "n_predict") { |
| | llama_params[item.key()] = item.value(); |
| | } |
| | } |
| |
|
| | return llama_params; |
| | } |
| |
|
| | static json oaicompat_completion_params_parse( |
| | const json & body, |
| | bool use_jinja, |
| | common_reasoning_format reasoning_format, |
| | const struct common_chat_templates * tmpls) |
| | { |
| | json llama_params; |
| |
|
| | auto tools = json_value(body, "tools", json()); |
| | auto stream = json_value(body, "stream", false); |
| |
|
| | if (tools.is_array() && !tools.empty()) { |
| | if (stream) { |
| | throw std::runtime_error("Cannot use tools with stream"); |
| | } |
| | if (!use_jinja) { |
| | throw std::runtime_error("tools param requires --jinja flag"); |
| | } |
| | } |
| | if (!use_jinja) { |
| | if (body.contains("tool_choice") && !body.at("tool_choice").is_null()) { |
| | throw std::runtime_error("Unsupported param: tool_choice"); |
| | } |
| | } |
| |
|
| | |
| | if (body.contains("stop") && body.at("stop").is_string()) { |
| | llama_params["stop"] = json::array({body.at("stop").get<std::string>()}); |
| | } else { |
| | llama_params["stop"] = json_value(body, "stop", json::array()); |
| | } |
| |
|
| | auto json_schema = json_value(body, "json_schema", json()); |
| | auto grammar = json_value(body, "grammar", std::string()); |
| | if (!json_schema.is_null() && !grammar.empty()) { |
| | throw std::runtime_error("Cannot use both json_schema and grammar"); |
| | } |
| |
|
| | |
| | if (body.contains("response_format")) { |
| | json response_format = json_value(body, "response_format", json::object()); |
| | std::string response_type = json_value(response_format, "type", std::string()); |
| | if (response_type == "json_object") { |
| | json_schema = json_value(response_format, "schema", json::object()); |
| | } else if (response_type == "json_schema") { |
| | auto schema_wrapper = json_value(response_format, "json_schema", json::object()); |
| | json_schema = json_value(schema_wrapper, "schema", json::object()); |
| | } else if (!response_type.empty() && response_type != "text") { |
| | throw std::runtime_error("response_format type must be one of \"text\" or \"json_object\", but got: " + response_type); |
| | } |
| | } |
| |
|
| | common_chat_templates_inputs inputs; |
| | inputs.messages = common_chat_msgs_parse_oaicompat(body.at("messages")); |
| | inputs.tools = common_chat_tools_parse_oaicompat(tools); |
| | inputs.tool_choice = common_chat_tool_choice_parse_oaicompat(json_value(body, "tool_choice", std::string("auto"))); |
| | inputs.json_schema = json_schema.is_null() ? "" : json_schema.dump(); |
| | inputs.grammar = grammar; |
| | inputs.add_generation_prompt = json_value(body, "add_generation_prompt", true); |
| | inputs.use_jinja = use_jinja; |
| | inputs.parallel_tool_calls = json_value(body, "parallel_tool_calls", false); |
| | inputs.extract_reasoning = reasoning_format != COMMON_REASONING_FORMAT_NONE; |
| | inputs.add_generation_prompt = json_value(body, "add_generation_prompt", true); |
| | if (!inputs.tools.empty() && inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_NONE && body.contains("grammar")) { |
| | throw std::runtime_error("Cannot use custom grammar constraints with tools."); |
| | } |
| |
|
| | |
| | auto chat_params = common_chat_templates_apply(tmpls, inputs); |
| |
|
| | llama_params["chat_format"] = static_cast<int>(chat_params.format); |
| | llama_params["prompt"] = chat_params.prompt; |
| | llama_params["grammar"] = chat_params.grammar; |
| | llama_params["grammar_lazy"] = chat_params.grammar_lazy; |
| | auto grammar_triggers = json::array(); |
| | for (const auto & trigger : chat_params.grammar_triggers) { |
| | grammar_triggers.push_back(trigger.to_json<json>()); |
| | } |
| | llama_params["grammar_triggers"] = grammar_triggers; |
| | llama_params["preserved_tokens"] = chat_params.preserved_tokens; |
| | for (const auto & stop : chat_params.additional_stops) { |
| | llama_params["stop"].push_back(stop); |
| | } |
| |
|
| | |
| | int n_choices = json_value(body, "n", 1); |
| | if (n_choices != 1) { |
| | throw std::runtime_error("Only one completion choice is allowed"); |
| | } |
| |
|
| | |
| | |
| | if (json_value(body, "logprobs", false)) { |
| | llama_params["n_probs"] = json_value(body, "top_logprobs", 20); |
| | } else if (body.contains("top_logprobs") && !body.at("top_logprobs").is_null()) { |
| | throw std::runtime_error("top_logprobs requires logprobs to be set to true"); |
| | } |
| |
|
| | |
| | |
| | |
| | for (const auto & item : body.items()) { |
| | |
| | if (!llama_params.contains(item.key()) || item.key() == "n_predict") { |
| | llama_params[item.key()] = item.value(); |
| | } |
| | } |
| |
|
| | return llama_params; |
| | } |
| |
|
| | static json format_embeddings_response_oaicompat(const json & request, const json & embeddings, bool use_base64 = false) { |
| | json data = json::array(); |
| | int32_t n_tokens = 0; |
| | int i = 0; |
| | for (const auto & elem : embeddings) { |
| | json embedding_obj; |
| |
|
| | if (use_base64) { |
| | const auto& vec = json_value(elem, "embedding", json::array()).get<std::vector<float>>(); |
| | const char* data_ptr = reinterpret_cast<const char*>(vec.data()); |
| | size_t data_size = vec.size() * sizeof(float); |
| | embedding_obj = { |
| | {"embedding", base64::encode(data_ptr, data_size)}, |
| | {"index", i++}, |
| | {"object", "embedding"}, |
| | {"encoding_format", "base64"} |
| | }; |
| | } else { |
| | embedding_obj = { |
| | {"embedding", json_value(elem, "embedding", json::array())}, |
| | {"index", i++}, |
| | {"object", "embedding"} |
| | }; |
| | } |
| | data.push_back(embedding_obj); |
| |
|
| | n_tokens += json_value(elem, "tokens_evaluated", 0); |
| | } |
| |
|
| | json res = json { |
| | {"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))}, |
| | {"object", "list"}, |
| | {"usage", json { |
| | {"prompt_tokens", n_tokens}, |
| | {"total_tokens", n_tokens} |
| | }}, |
| | {"data", data} |
| | }; |
| |
|
| | return res; |
| | } |
| |
|
| | static json format_response_rerank( |
| | const json & request, |
| | const json & ranks, |
| | bool is_tei_format, |
| | std::vector<std::string> & texts) { |
| | json res; |
| | if (is_tei_format) { |
| | |
| | res = json::array(); |
| | bool return_text = json_value(request, "return_text", false); |
| | for (const auto & rank : ranks) { |
| | int index = json_value(rank, "index", 0); |
| | json elem = json{ |
| | {"index", index}, |
| | {"score", json_value(rank, "score", 0.0)}, |
| | }; |
| | if (return_text) { |
| | elem["text"] = std::move(texts[index]); |
| | } |
| | res.push_back(elem); |
| | } |
| | } else { |
| | |
| | json results = json::array(); |
| | int32_t n_tokens = 0; |
| | for (const auto & rank : ranks) { |
| | results.push_back(json{ |
| | {"index", json_value(rank, "index", 0)}, |
| | {"relevance_score", json_value(rank, "score", 0.0)}, |
| | }); |
| |
|
| | n_tokens += json_value(rank, "tokens_evaluated", 0); |
| | } |
| |
|
| | res = json{ |
| | {"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))}, |
| | {"object", "list"}, |
| | {"usage", json{ |
| | {"prompt_tokens", n_tokens}, |
| | {"total_tokens", n_tokens} |
| | }}, |
| | {"results", results} |
| | }; |
| | } |
| |
|
| | return res; |
| | } |
| |
|
| | static bool is_valid_utf8(const std::string & str) { |
| | const unsigned char* bytes = reinterpret_cast<const unsigned char*>(str.data()); |
| | const unsigned char* end = bytes + str.length(); |
| |
|
| | while (bytes < end) { |
| | if (*bytes <= 0x7F) { |
| | |
| | bytes++; |
| | } else if ((*bytes & 0xE0) == 0xC0) { |
| | |
| | if (end - bytes < 2 || (bytes[1] & 0xC0) != 0x80) |
| | return false; |
| | bytes += 2; |
| | } else if ((*bytes & 0xF0) == 0xE0) { |
| | |
| | if (end - bytes < 3 || (bytes[1] & 0xC0) != 0x80 || (bytes[2] & 0xC0) != 0x80) |
| | return false; |
| | bytes += 3; |
| | } else if ((*bytes & 0xF8) == 0xF0) { |
| | |
| | if (end - bytes < 4 || (bytes[1] & 0xC0) != 0x80 || |
| | (bytes[2] & 0xC0) != 0x80 || (bytes[3] & 0xC0) != 0x80) |
| | return false; |
| | bytes += 4; |
| | } else { |
| | |
| | return false; |
| | } |
| | } |
| |
|
| | return true; |
| | } |
| |
|
| | static json format_tokenizer_response(const json & tokens) { |
| | return json { |
| | {"tokens", tokens} |
| | }; |
| | } |
| |
|
| | static json format_detokenized_response(const std::string & content) { |
| | return json { |
| | {"content", content} |
| | }; |
| | } |
| |
|
| | static json format_logit_bias(const std::vector<llama_logit_bias> & logit_bias) { |
| | json data = json::array(); |
| | for (const auto & lb : logit_bias) { |
| | data.push_back(json{ |
| | {"bias", lb.bias}, |
| | {"token", lb.token}, |
| | }); |
| | } |
| | return data; |
| | } |
| |
|
| | static std::string safe_json_to_str(const json & data) { |
| | return data.dump(-1, ' ', false, json::error_handler_t::replace); |
| | } |
| |
|
| | static std::vector<llama_token_data> get_token_probabilities(llama_context * ctx, int idx) { |
| | std::vector<llama_token_data> cur; |
| | const auto * logits = llama_get_logits_ith(ctx, idx); |
| |
|
| | const llama_model * model = llama_get_model(ctx); |
| | const llama_vocab * vocab = llama_model_get_vocab(model); |
| |
|
| | const int n_vocab = llama_vocab_n_tokens(vocab); |
| |
|
| | cur.resize(n_vocab); |
| | for (llama_token token_id = 0; token_id < n_vocab; token_id++) { |
| | cur[token_id] = llama_token_data{token_id, logits[token_id], 0.0f}; |
| | } |
| |
|
| | |
| | std::sort(cur.begin(), cur.end(), [](const llama_token_data & a, const llama_token_data & b) { |
| | return a.logit > b.logit; |
| | }); |
| |
|
| | |
| | float max_l = cur[0].logit; |
| | float cum_sum = 0.0f; |
| | for (size_t i = 0; i < cur.size(); ++i) { |
| | float p = expf(cur[i].logit - max_l); |
| | cur[i].p = p; |
| | cum_sum += p; |
| | } |
| | for (size_t i = 0; i < cur.size(); ++i) { |
| | cur[i].p /= cum_sum; |
| | } |
| |
|
| | return cur; |
| | } |
| |
|
| | static bool are_lora_equal( |
| | const std::vector<common_adapter_lora_info> & l1, |
| | const std::vector<common_adapter_lora_info> & l2) { |
| | if (l1.size() != l2.size()) { |
| | return false; |
| | } |
| | for (size_t i = 0; i < l1.size(); ++i) { |
| | |
| | if (l1[i].scale != l2[i].scale || l1[i].ptr != l2[i].ptr) { |
| | return false; |
| | } |
| | } |
| | return true; |
| | } |
| |
|
| | |
| | static std::vector<common_adapter_lora_info> parse_lora_request( |
| | const std::vector<common_adapter_lora_info> & lora_base, |
| | const json & data) { |
| | std::vector<common_adapter_lora_info> lora(lora_base); |
| | int max_idx = lora.size(); |
| |
|
| | |
| | for (auto & entry : lora) { |
| | entry.scale = 0.0f; |
| | } |
| |
|
| | |
| | for (const auto & entry : data) { |
| | int id = json_value(entry, "id", -1); |
| | float scale = json_value(entry, "scale", 0.0f); |
| | if (0 <= id && id < max_idx) { |
| | lora[id].scale = scale; |
| | } else { |
| | throw std::runtime_error("invalid adapter id"); |
| | } |
| | } |
| |
|
| | return lora; |
| | } |
| |
|