# Importing required packages import streamlit as st import openai import uuid import time import os from PIL import Image import io import matplotlib.pyplot as plt st.set_page_config(page_title="OpenAI - Assistent API") # Initialize OpenAI client client = openai.Client() api_key = os.getenv("OPENAI_API_KEY") assistant_id = os.getenv("assistant_id") # Function to handle and display images def handle_and_display_image(file_id): api_response = client.files.with_raw_response.retrieve_content(file_id) if api_response.status_code == 200: content = api_response.content with open('image.png', 'wb') as f: f.write(content) return Image.open('image.png') else: print('Failed to download file.') return None # Initialize global session state if "session_id" not in st.session_state: st.session_state.session_id = str(uuid.uuid4()) if "run" not in st.session_state: st.session_state.run = {"status": None} if "messages" not in st.session_state: st.session_state.messages = [] if "retry_error" not in st.session_state: st.session_state.retry_error = 0 # Main function to manage pages def main(): # Sidebar for page selection with st.sidebar: st.title('BinDoc GmbH') st.markdown("Experience revolutionary interaction with BinDocs Chat App, leveraging state-of-the-art AI technology.") # Add more sidebar content as needed page = st.sidebar.selectbox("Choose a page", ["Analysis Bot", "Test Page"]) # Main area content based on page selection if page == "Analysis Bot": page1() # Call the function for the first page (to be defined) elif page == "Test Page": page2() # Call the function for the second page (to be defined) def page2(): try: hide_streamlit_style = """ """ st.markdown(hide_streamlit_style, unsafe_allow_html=True) # Create columns for layout col1, col2 = st.columns([3, 1]) # Adjust the ratio to your liking with col1: st.title("Test Page with Logo!") with col2: # Load and display the image in the right column, which will be the top-right corner of the page image = Image.open('BinDoc Logo (Quadratisch).png') st.image(image, use_column_width='always') except Exception as e: st.error(f"Upsi, an unexpected error occurred: {e}") def page1(): st.sidebar.title("OpenAI - Assistant API🤖") st.sidebar.markdown("**Model:** gpt-4-1106-preview API") st.sidebar.markdown("**Version:** 1.1") st.sidebar.markdown("**Session-ID:**") st.sidebar.markdown(st.session_state.session_id) st.sidebar.divider() # Create columns for layout col1, col2 = st.columns([3, 1]) # Adjust the ratio to your liking with col1: st.title('Welcome to MediMetrics!') st.subheader('powered by :orange[BinDoc GmbH] ') with col2: # Load and display the image in the right column image = Image.open('BinDoc Logo (Quadratisch).png') st.image(image, use_column_width='always') # Initialize chat history in session state if not exists if 'chat_history' not in st.session_state: st.session_state.chat_history = [] # Function to handle question submission def submit_question(question): # Add question to chat history st.session_state.chat_history.append(("User", question)) process_question(question) # Function to process the question def process_question(question): # Clear the previous run status and chat history st.session_state.run = None st.session_state.chat_history = [("User", question)] # Start with the current user question # Add message to the thread client.beta.threads.messages.create( thread_id=st.session_state.thread.id, role="user", content=question ) # Create a new run to process the messages in the thread st.session_state.run = client.beta.threads.runs.create( thread_id=st.session_state.thread.id, assistant_id=st.session_state.assistant.id, ) # Wait for the run to complete while not hasattr(st.session_state.run, 'status') or st.session_state.run.status not in ["completed", "failed"]: time.sleep(1) st.session_state.run = client.beta.threads.runs.retrieve( thread_id=st.session_state.thread.id, run_id=st.session_state.run.id, ) if st.session_state.run.status == "completed": # Retrieve the list of messages st.session_state.messages = client.beta.threads.messages.list( thread_id=st.session_state.thread.id ) # Get the latest message from the assistant last_assistant_message = next((message for message in reversed(st.session_state.messages.data) if message.role == "assistant"), None) if last_assistant_message: # Update chat history with the assistant's response st.session_state.chat_history.append(("Assistant", last_assistant_message.content[0].text.value)) # Example query buttons col3, col4 = st.columns(2) # Create two columns for buttons with col3: if st.button("Plotte mir die Anzahl der Kliniken von 2010 bis 2020."): submit_question("Plotte mir die Anzahl der Kliniken von 2010 bis 2020.") if st.button("Erstelle mir ein Bar Chart über die Bettenanzahl zwischen 1994 und 2002."): submit_question("Erstelle mir ein Bar Chart über die Bettenanzahl zwischen 1994 und 2002.") with col4: if st.button("Hat die Bettenanzahl zwischen 2018 und 2021 zu- oder abgenommen?"): submit_question("Hat die Bettenanzahl zwischen 2018 und 2021 zu- oder abgenommen?") if st.button("Wie hat sich die Fremdkapitalquote des Malteser Krankenhaus St. Franziskus-Hospital verändert?"): submit_question("Wie hat sich die Fremdkapitalquote des Malteser Krankenhaus St. Franziskus-Hospital verändert?") prompt = st.text_input("Wie kann ich dir helfen?") if st.button("Submit"): submit_question(prompt) if "assistant" not in st.session_state: openai.api_key = api_key # Load the previously created assistant st.session_state.assistant = openai.beta.assistants.retrieve(assistant_id) # Create a new thread for this session st.session_state.thread = client.beta.threads.create( metadata={'session_id': st.session_state.session_id} ) # Display chat history in Streamlit chat format for role, message in st.session_state.chat_history: if role == "User": with st.chat_message("User"): st.markdown(message) elif role == "Assistant": with st.chat_message("Assistant"): st.markdown(message) # Check if 'run' object has 'status' attribute if hasattr(st.session_state.run, 'status'): # Handle the 'running' status if st.session_state.run.status == "running": with st.chat_message('assistant'): st.write("Thinking ......") if st.session_state.retry_error < 3: time.sleep(1) # Short delay to prevent immediate rerun st.rerun() # Handle the 'failed' status elif st.session_state.run.status == "failed": st.session_state.retry_error += 1 with st.chat_message('assistant'): if st.session_state.retry_error < 3: st.write("Run failed, retrying ......") time.sleep(3) # Longer delay before retrying st.rerun() else: st.error("FAILED: The OpenAI API is currently processing too many requests. Please try again later ......") # Handle any status that is not 'completed' elif st.session_state.run.status != "completed": # Attempt to retrieve the run again st.session_state.run = client.beta.threads.runs.retrieve( thread_id=st.session_state.thread.id, run_id=st.session_state.run.id, ) if st.session_state.retry_error < 3: time.sleep(3) st.rerun() if __name__ == "__main__": main()