Erik commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,17 +2,18 @@ import gradio as gr
|
|
| 2 |
import torch
|
| 3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 4 |
|
|
|
|
| 5 |
MODEL_OPTIONS = {
|
| 6 |
-
"Llama-3.2-3B": "meta-llama/Llama-3.2-3B-Instruct",
|
| 7 |
-
"Llama-3.2-1B": "meta-llama/Llama-3.2-1B-Instruct",
|
| 8 |
"Mistral-7B-Instruct": "mistralai/Mistral-7B-Instruct-v0.1",
|
| 9 |
"Qwen2.5-3B-Instruct": "Qwen/Qwen2.5-3B-Instruct",
|
| 10 |
"Qwen2.5-1.5B-Instruct": "Qwen/Qwen2.5-1.5B-Instruct",
|
| 11 |
"StableLM2-1.6B": "stabilityai/stablelm-2-zephyr-1_6b",
|
|
|
|
|
|
|
| 12 |
}
|
| 13 |
|
| 14 |
loaded = {}
|
| 15 |
-
SYSTEM_PROMPT = "You are HugginGPT —
|
| 16 |
|
| 17 |
def load_model(model_key):
|
| 18 |
model_id = MODEL_OPTIONS[model_key]
|
|
@@ -23,14 +24,16 @@ def load_model(model_key):
|
|
| 23 |
model = AutoModelForCausalLM.from_pretrained(
|
| 24 |
model_id,
|
| 25 |
device_map="auto",
|
| 26 |
-
torch_dtype=torch.float16
|
| 27 |
)
|
|
|
|
| 28 |
loaded[model_key] = (tokenizer, model)
|
| 29 |
return tokenizer, model
|
| 30 |
|
| 31 |
def generate_response(message, history, model_choice):
|
| 32 |
tokenizer, model = load_model(model_choice)
|
| 33 |
|
|
|
|
| 34 |
context = f"system: {SYSTEM_PROMPT}\n"
|
| 35 |
if history:
|
| 36 |
for u, a in history:
|
|
@@ -54,7 +57,7 @@ with gr.Blocks() as demo:
|
|
| 54 |
|
| 55 |
model_selector = gr.Dropdown(
|
| 56 |
choices=list(MODEL_OPTIONS.keys()),
|
| 57 |
-
value="
|
| 58 |
label="Select model"
|
| 59 |
)
|
| 60 |
|
|
|
|
| 2 |
import torch
|
| 3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 4 |
|
| 5 |
+
# 🔥 Only open models that load in HF Spaces without gated access
|
| 6 |
MODEL_OPTIONS = {
|
|
|
|
|
|
|
| 7 |
"Mistral-7B-Instruct": "mistralai/Mistral-7B-Instruct-v0.1",
|
| 8 |
"Qwen2.5-3B-Instruct": "Qwen/Qwen2.5-3B-Instruct",
|
| 9 |
"Qwen2.5-1.5B-Instruct": "Qwen/Qwen2.5-1.5B-Instruct",
|
| 10 |
"StableLM2-1.6B": "stabilityai/stablelm-2-zephyr-1_6b",
|
| 11 |
+
"SmolLM3-3B": "HuggingFaceTB/SmolLM3-3B",
|
| 12 |
+
"BTLM-3B-8k-base": "cerebras/btlm-3b-8k-base"
|
| 13 |
}
|
| 14 |
|
| 15 |
loaded = {}
|
| 16 |
+
SYSTEM_PROMPT = "You are HugginGPT — helpful, friendly, and clear with memory."
|
| 17 |
|
| 18 |
def load_model(model_key):
|
| 19 |
model_id = MODEL_OPTIONS[model_key]
|
|
|
|
| 24 |
model = AutoModelForCausalLM.from_pretrained(
|
| 25 |
model_id,
|
| 26 |
device_map="auto",
|
| 27 |
+
torch_dtype=torch.float16
|
| 28 |
)
|
| 29 |
+
|
| 30 |
loaded[model_key] = (tokenizer, model)
|
| 31 |
return tokenizer, model
|
| 32 |
|
| 33 |
def generate_response(message, history, model_choice):
|
| 34 |
tokenizer, model = load_model(model_choice)
|
| 35 |
|
| 36 |
+
# build context with system + memory
|
| 37 |
context = f"system: {SYSTEM_PROMPT}\n"
|
| 38 |
if history:
|
| 39 |
for u, a in history:
|
|
|
|
| 57 |
|
| 58 |
model_selector = gr.Dropdown(
|
| 59 |
choices=list(MODEL_OPTIONS.keys()),
|
| 60 |
+
value="Mistral-7B-Instruct",
|
| 61 |
label="Select model"
|
| 62 |
)
|
| 63 |
|