Spaces:
Sleeping
Sleeping
File size: 4,039 Bytes
26e96a2 25af710 26e96a2 25af710 e7a54ec f39feff 25af710 26e96a2 c7126b6 25af710 c7126b6 25af710 c7126b6 25af710 c7126b6 26e96a2 c7126b6 26e96a2 c7126b6 25af710 c7126b6 25af710 26e96a2 c7126b6 e7a54ec c7126b6 25af710 c7126b6 25af710 c7126b6 26e96a2 c7126b6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 | import gradio as gr
import openai
# Initialize the OpenAI client with your proxy API
client = openai.OpenAI(
api_key="sk-hm35RR1E0dfB26C8873BT3BlBKFJE681B3d87a6c4B3e8C44",
base_url="https://aigptx.top/"
)
# Function to handle predictions
def predict(inputs, top_p, temperature, openai_api_key, system_prompt, chat_counter, chatbot=[], history=[]):
# Build the system prompt if provided
messages = []
if system_prompt:
messages.append({"role": "system", "content": system_prompt})
# Add previous conversation history
if chat_counter != 0:
for data in chatbot:
messages.append({"role": "user", "content": data[0]})
messages.append({"role": "assistant", "content": data[1]})
# Add the current user input to the messages
messages.append({"role": "user", "content": inputs})
payload = {
"model": "gpt-3.5-turbo",
"messages": messages,
"temperature": temperature,
"top_p": top_p,
"n": 1,
"stream": True,
"presence_penalty": 0,
"frequency_penalty": 0,
}
# Set the chat counter
chat_counter += 1
history.append(inputs)
# Using the proxy API to get the response
response = client.Completions.create(
model=payload["model"],
messages=payload["messages"],
temperature=payload["temperature"],
top_p=payload["top_p"],
stream=payload["stream"],
presence_penalty=payload["presence_penalty"],
frequency_penalty=payload["frequency_penalty"]
)
token_counter = 0
partial_words = ""
for chunk in response:
if 'choices' in chunk:
delta = chunk['choices'][0]['delta']
if 'content' in delta:
partial_words += delta['content']
if token_counter == 0:
history.append(" " + partial_words)
else:
history[-1] = partial_words
chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2)]
token_counter += 1
yield chat, history, chat_counter
# Function to reset the textbox
def reset_textbox():
return gr.update(value='')
# UI Components
title = """<h1 align="center">Customizable Chatbot with OpenAI API</h1>"""
description = """
Explore the outputs of a GPT-3.5 model, with the ability to customize system prompts, enter your OpenAI API key, and interact with a history of conversation logs.
"""
with gr.Blocks(css="""#col_container {width: 1000px; margin-left: auto; margin-right: auto;}
#chatbot {height: 520px; overflow: auto;}""") as demo:
gr.HTML(title)
with gr.Column(elem_id="col_container"):
openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here")
system_prompt = gr.Textbox(placeholder="Enter system prompt (optional)", label="System Prompt", lines=2)
chatbot = gr.Chatbot(elem_id='chatbot')
inputs = gr.Textbox(placeholder="Type your message here!", label="Input", lines=1)
send_btn = gr.Button("Send")
state = gr.State([])
chat_counter = gr.Number(value=0, visible=False, precision=0)
reset_btn = gr.Button("Reset Chat")
# Input parameters for OpenAI API
with gr.Accordion("Model Parameters", open=False):
top_p = gr.Slider(minimum=0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (Nucleus Sampling)")
temperature = gr.Slider(minimum=0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature")
# Submit input for model prediction with the send button
send_btn.click(predict, [inputs, top_p, temperature, openai_api_key, system_prompt, chat_counter, chatbot, state],
[chatbot, state, chat_counter])
reset_btn.click(reset_textbox, [], [inputs])
inputs.submit(reset_textbox, [], [inputs])
demo.queue().launch(debug=True)
|