Dataset Viewer
Auto-converted to Parquet Duplicate
crossfile_context_retrievalwref
dict
prompt
stringlengths
252
32.6k
right_context
stringlengths
0
81.2k
metadata
dict
crossfile_context_retrieval
dict
groundtruth
stringlengths
5
208
{ "list": [ { "filename": "alt_generator.py", "retrieved_chunk": " if self.remaining_tokens == 0:\n self.sequence_str += self.held_text\n return self.held_text, True\n self.remaining_tokens -= 1\n # Decode the current tail end of the sequence\n old_tai...
import asyncio import websockets import json from sentencepiece import SentencePieceProcessor from model import ExLlama, ExLlamaCache, ExLlamaConfig from lora import ExLlamaLora from tokenizer import ExLlamaTokenizer from generator import ExLlamaGenerator import argparse import torch import sys import os import glob i...
next_token = generator.gen_single_token() # End on stop token if next_token in stop_tokens: return held_text, True, full_prompt + built_response, utilized_prompt + built_response, built_response # Get new text new_tail = tokenizer.decode(generator.sequence_actual[:, -(max_stop_string + ...
{ "context_start_lineno": 0, "file": "example_ws.py", "groundtruth_start_lineno": 103, "repository": "turboderp-exllama-a544085", "right_context_start_lineno": 104, "task_id": "project_cc_python/62" }
{ "list": [ { "filename": "alt_generator.py", "retrieved_chunk": " self.sequence_str += self.held_text\n return self.held_text, True\n # Decode the tail end of the sequence with the added token to get (actual) characters added\n new_tail = self.tokenizer.decode(self...
sequence_actual[:, -max_stop_string:])[0]
{ "list": [ { "filename": "generator.py", "retrieved_chunk": " logits = self.model.forward(self.sequence[:, -1:], self.cache, lora = self.lora, input_mask = mask)\n self.apply_rep_penalty(logits)\n logits[:, :, self.tokenizer.bos_token_id] = -10000.0\n if co...
from model import ExLlama, ExLlamaCache, ExLlamaConfig from tokenizer import ExLlamaTokenizer from generator import ExLlamaGenerator import torch import torch.nn.functional as F import os, glob import cuda_ext # Directory containing model, tokenizer, generator model_directory = "/mnt/str/models/_test_models/TheBloke...
output = tokenizer.decode(generator.sequence[0]) return output for i in range(10): alpha = i / 5.0 - 0.4 print() print(f"--------------------------------------") print(f"alpha = {alpha:.1f}") print(f"--------------------------------------") output = generate_cfg(prompts, alpha, 200) ...
{ "context_start_lineno": 0, "file": "example_cfg.py", "groundtruth_start_lineno": 78, "repository": "turboderp-exllama-a544085", "right_context_start_lineno": 79, "task_id": "project_cc_python/74" }
{ "list": [ { "filename": "generator.py", "retrieved_chunk": " self.settings.min_p + 0.01 if constraints is not None else 0.0,\n self.settings.typical)\n else:\n # bos = torch.Tensor([[self.tokenizer....
gen_accept_token(batch_token)
{ "list": [ { "filename": "webui/app.py", "retrieved_chunk": "def api_delete_session():\n global session\n data = request.get_json()\n session.api_delete_session(data)\n return json.dumps({\"result\": \"ok\"}) + \"\\n\"\n# Set fixed prompt settings\n@app.route(\"/api/set_fixed_prompt\", me...
from model import ExLlama, ExLlamaCache, ExLlamaConfig from flask import Flask, request from tokenizer import ExLlamaTokenizer from generator import ExLlamaGenerator import os, glob # Directory containing config.json, tokenizer.model and safetensors file for the model model_directory = "/mnt/str/models/llama-7b-4bit/"...
generator.settings.token_repetition_penalty_sustain = config.max_seq_len generator.settings.temperature = 0.7 generator.settings.top_p = 0.1 generator.settings.top_k = 40 generator.settings.typical = 0.0 # Disabled outputs = generator.generate_simple(prompt, max_new_tokens = 200) return...
{ "context_start_lineno": 0, "file": "example_flask.py", "groundtruth_start_lineno": 36, "repository": "turboderp-exllama-a544085", "right_context_start_lineno": 37, "task_id": "project_cc_python/76" }
{ "list": [ { "filename": "webui/app.py", "retrieved_chunk": "def home():\n return render_template(\"index.html\")\n# Get existing sessions\n@app.route(\"/api/populate\")\ndef api_populate():\n global session\n return session.api_populate()\n# Edit block\n@app.route(\"/api/edit_block\", metho...
settings.token_repetition_penalty_max = 1.176
{ "list": [ { "filename": "alt_generator.py", "retrieved_chunk": " # stop_conditions: List of strings or integer token IDs that will end the sequence\n # settings: ExLlamaAltGeneratorSettings\n # encode_special_characters: Set to true to tokenize \"</s>\" etc.\n def begin_stream(self, prom...
import asyncio import websockets import json from sentencepiece import SentencePieceProcessor from model import ExLlama, ExLlamaCache, ExLlamaConfig from lora import ExLlamaLora from tokenizer import ExLlamaTokenizer from generator import ExLlamaGenerator import argparse import torch import sys import os import glob i...
built_response = "" remaining_tokens = max_new_tokens # Settings stop_strings = [] stop_tokens = [] for t in stop_conditions: if isinstance(t, int): stop_tokens += [t] if isinstance(t, str): stop_strings += [t] held_text = "" max_stop_string = 2 for ss in stop_s...
{ "context_start_lineno": 0, "file": "example_ws.py", "groundtruth_start_lineno": 65, "repository": "turboderp-exllama-a544085", "right_context_start_lineno": 66, "task_id": "project_cc_python/60" }
{ "list": [ { "filename": "alt_generator.py", "retrieved_chunk": " self.sequence_str = self.tokenizer.decode(applied_input_ids)[0] if applied_input_ids.shape[0] < input_ids.shape[0] else prompt\n # Settings\n self.stop_strings = []\n self.stop_tokens = []\n for t in ...
decode(prompt_ids)[0]
{ "list": [ { "filename": "alt_generator.py", "retrieved_chunk": " self.sequence_str = self.tokenizer.decode(applied_input_ids)[0] if applied_input_ids.shape[0] < input_ids.shape[0] else prompt\n # Settings\n self.stop_strings = []\n self.stop_tokens = []\n for t in ...
import asyncio import websockets import json from sentencepiece import SentencePieceProcessor from model import ExLlama, ExLlamaCache, ExLlamaConfig from lora import ExLlamaLora from tokenizer import ExLlamaTokenizer from generator import ExLlamaGenerator import argparse import torch import sys import os import glob i...
def stream(): global model, cache, config, generator, tokenizer global stop_strings, stop_tokens, prompt_ids, held_text, max_stop_string, remaining_tokens global full_prompt, utilized_prompt, built_response # Check total response length if remaining_tokens == 0: return held_text, True, f...
{ "context_start_lineno": 0, "file": "example_ws.py", "groundtruth_start_lineno": 88, "repository": "turboderp-exllama-a544085", "right_context_start_lineno": 89, "task_id": "project_cc_python/61" }
{ "list": [ { "filename": "alt_generator.py", "retrieved_chunk": " for ss in self.stop_strings:\n self.max_stop_tokens = max(self.max_stop_tokens, self.get_num_tokens(ss) + 2)\n self.settings = gen_settings\n # Start generation\n self.gen_begin_reuse(applied_inpu...
gen_begin_reuse(input_ids)
{ "list": [ { "filename": "generator.py", "retrieved_chunk": " self.sequence = self.sequence[:, num_tokens:]\n self.gen_begin(self.sequence, mask = mask)\n def gen_num_tokens(self):\n return self.sequence_actual.shape[-1]\n # Simple generator function\n def genera...
from model import ExLlama, ExLlamaCache, ExLlamaConfig from tokenizer import ExLlamaTokenizer from generator import ExLlamaGenerator import torch import torch.nn.functional as F import os, glob import cuda_ext # Directory containing model, tokenizer, generator model_directory = "/mnt/str/models/_test_models/TheBloke...
generator.gen_begin(ids, mask = mask) # Sampling loop for _ in range(max_new_tokens): logits = model.forward(generator.sequence[:, -1:], cache, input_mask = mask) generator.apply_rep_penalty(logits) logits = F.log_softmax(logits, dim = -1) logits_mixed = (1 - alpha) * lo...
{ "context_start_lineno": 0, "file": "example_cfg.py", "groundtruth_start_lineno": 61, "repository": "turboderp-exllama-a544085", "right_context_start_lineno": 62, "task_id": "project_cc_python/67" }
{ "list": [ { "filename": "generator.py", "retrieved_chunk": " eos = torch.zeros((ids.shape[0],), dtype = torch.bool)\n for i in range(max_new_tokens):\n token = self.gen_single_token(mask = mask)\n for j in range(token.shape[0]):\n if token[j, 0].ite...
encode(prompts, return_mask = True)
{ "list": [ { "filename": "generator.py", "retrieved_chunk": " logits = self.model.forward(self.sequence[:, -1:], self.cache, lora = self.lora, input_mask = mask)\n self.apply_rep_penalty(logits)\n logits[:, :, self.tokenizer.bos_token_id] = -10000.0\n if co...
from model import ExLlama, ExLlamaCache, ExLlamaConfig from tokenizer import ExLlamaTokenizer from generator import ExLlamaGenerator import torch import torch.nn.functional as F import os, glob import cuda_ext # Directory containing model, tokenizer, generator model_directory = "/mnt/str/models/_test_models/TheBloke...
return output for i in range(10): alpha = i / 5.0 - 0.4 print() print(f"--------------------------------------") print(f"alpha = {alpha:.1f}") print(f"--------------------------------------") output = generate_cfg(prompts, alpha, 200) print(output[len(prompts[0]):].strip())
{ "context_start_lineno": 0, "file": "example_cfg.py", "groundtruth_start_lineno": 80, "repository": "turboderp-exllama-a544085", "right_context_start_lineno": 81, "task_id": "project_cc_python/75" }
{ "list": [ { "filename": "generator.py", "retrieved_chunk": " self.settings.min_p + 0.01 if constraints is not None else 0.0,\n self.settings.typical)\n else:\n # bos = torch.Tensor([[self.tokenizer....
decode(generator.sequence[0])
{ "list": [ { "filename": "example_alt_generator.py", "retrieved_chunk": " args.lora = os.path.join(args.lora_dir, \"adapter_model.bin\")\n # Model globals\n model_init.set_globals(args)\n # Instantiate model and generator\n config = model_init.make_config(args)\n model = ExLlama...
from model import ExLlama, ExLlamaCache, ExLlamaConfig from tokenizer import ExLlamaTokenizer import argparse, sys, os, glob from torch import version as torch_version from globals import set_affinity_str def add_args(parser): parser.add_argument("-t", "--tokenizer", type = str, help = "Tokenizer model path") ...
if args.flash_attn: config.use_flash_attn_2 = True try: config.max_input_len = int(args.flash_attn) except ValueError: pass config.matmul_recons_thd = args.matmul_recons_thd config.fused_mlp_thd = args.fused_mlp_thd config.sdp_thd = args.sdp_thd con...
{ "context_start_lineno": 0, "file": "model_init.py", "groundtruth_start_lineno": 122, "repository": "turboderp-exllama-a544085", "right_context_start_lineno": 123, "task_id": "project_cc_python/80" }
{ "list": [ { "filename": "example_alt_generator.py", "retrieved_chunk": " lora = None\n if args.lora:\n print(f\" -- LoRA config: {args.lora_config}\")\n print(f\" -- Loading LoRA: {args.lora}\")\n if args.lora_config is None:\n print(f\" ## Error: please specify...
calculate_rotary_embedding_base()
{ "list": [ { "filename": "example_basic.py", "retrieved_chunk": "generator.settings.token_repetition_penalty_max = 1.2\ngenerator.settings.temperature = 0.95\ngenerator.settings.top_p = 0.65\ngenerator.settings.top_k = 100\ngenerator.settings.typical = 0.5\n# Produce a simple generation\nprompt = \"O...
from model import ExLlama, ExLlamaCache, ExLlamaConfig from tokenizer import ExLlamaTokenizer from generator import ExLlamaGenerator import os, glob # Directory containing model, tokenizer, generator model_directory = "/mnt/str/models/llama-13b-4bit-128g/" # Locate files we need within that directory tokenizer_pat...
for line in output: print("---") print(line)
{ "context_start_lineno": 0, "file": "example_batch.py", "groundtruth_start_lineno": 51, "repository": "turboderp-exllama-a544085", "right_context_start_lineno": 52, "task_id": "project_cc_python/56" }
{ "list": [ { "filename": "example_basic.py", "retrieved_chunk": "generator.settings.token_repetition_penalty_max = 1.2\ngenerator.settings.temperature = 0.95\ngenerator.settings.top_p = 0.65\ngenerator.settings.top_k = 100\ngenerator.settings.typical = 0.5\n# Produce a simple generation\nprompt = \"O...
generate_simple(prompts, max_new_tokens = 200)
{ "list": [ { "filename": "example_chatbot.py", "retrieved_chunk": "print(f\" -- Sequence length: {args.length}\")\nprint(f\" -- Temperature: {args.temperature:.2f}\")\nprint(f\" -- Top-K: {args.top_k}\")\nprint(f\" -- Top-P: {args.top_p:.2f}\")\nprint(f\" -- Min-P: {args.min_p:.2f}\")\nprint(f\" -- R...
from model import ExLlama, ExLlamaCache, ExLlamaConfig from tokenizer import ExLlamaTokenizer import argparse, sys, os, glob from torch import version as torch_version from globals import set_affinity_str def add_args(parser): parser.add_argument("-t", "--tokenizer", type = str, help = "Tokenizer model path") ...
config.gpu_peer_fix = args.gpu_peer_fix config.alpha_value = args.alpha config.calculate_rotary_embedding_base() if args.flash_attn: config.use_flash_attn_2 = True try: config.max_input_len = int(args.flash_attn) except ValueError: pass config.matmu...
{ "context_start_lineno": 0, "file": "model_init.py", "groundtruth_start_lineno": 119, "repository": "turboderp-exllama-a544085", "right_context_start_lineno": 120, "task_id": "project_cc_python/79" }
{ "list": [ { "filename": "example_chatbot.py", "retrieved_chunk": "model_init.print_options(args, print_opts)\n# Globals\nmodel_init.set_globals(args)\n# Load prompt file\nusername = args.username\nbot_name = args.botname\nif args.prompt is not None:\n with open(args.prompt, \"r\") as f:\n ...
set_auto_map(args.gpu_split)
{ "list": [ { "filename": "generator.py", "retrieved_chunk": " self.sequence = self.sequence[:, num_tokens:]\n self.gen_begin(self.sequence, mask = mask)\n def gen_num_tokens(self):\n return self.sequence_actual.shape[-1]\n # Simple generator function\n def genera...
from model import ExLlama, ExLlamaCache, ExLlamaConfig from tokenizer import ExLlamaTokenizer from generator import ExLlamaGenerator import torch import torch.nn.functional as F import os, glob import cuda_ext # Directory containing model, tokenizer, generator model_directory = "/mnt/str/models/_test_models/TheBloke...
generator.apply_rep_penalty(logits) logits = F.log_softmax(logits, dim = -1) logits_mixed = (1 - alpha) * logits[0] + alpha * logits[1] sampled_token, _ = generator.sample_current(logits_mixed) if sampled_token.item() == tokenizer.eos_token_id: break batch_token = sam...
{ "context_start_lineno": 0, "file": "example_cfg.py", "groundtruth_start_lineno": 68, "repository": "turboderp-exllama-a544085", "right_context_start_lineno": 69, "task_id": "project_cc_python/69" }
{ "list": [ { "filename": "generator.py", "retrieved_chunk": " eos = torch.zeros((ids.shape[0],), dtype = torch.bool)\n for i in range(max_new_tokens):\n token = self.gen_single_token(mask = mask)\n for j in range(token.shape[0]):\n if token[j, 0].ite...
forward(generator.sequence[:, -1:], cache, input_mask = mask)
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
178