| | import gradio as gr |
| | from langchain_community.document_loaders import PyPDFLoader |
| | from langchain.text_splitter import RecursiveCharacterTextSplitter |
| | from langchain_community.vectorstores import Chroma |
| | from langchain_huggingface import HuggingFacePipeline |
| | from langchain_huggingface import HuggingFaceEmbeddings |
| | from langchain.chains import ConversationalRetrievalChain |
| | from langchain.memory import ConversationBufferMemory |
| |
|
| | from pathlib import Path |
| | import chromadb |
| | from unidecode import unidecode |
| | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline |
| | import re |
| |
|
| | |
| | LLM_MODEL = "t5-large" |
| | LLM_MAX_TOKEN = 1024 |
| | DB_CHUNK_SIZE = 512 |
| | CHUNK_OVERLAP = 24 |
| | TEMPERATURE = 0.1 |
| | MAX_TOKENS = 1024 |
| | TOP_K = 20 |
| | pdf_url = "https://huggingface.co/spaces/CCCDev/PDFChat/resolve/main/Privacy-Policy%20(1).pdf" |
| |
|
| | |
| | def load_doc(pdf_url, chunk_size, chunk_overlap): |
| | loader = PyPDFLoader(pdf_url) |
| | pages = loader.load() |
| | text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) |
| | doc_splits = text_splitter.split_documents(pages) |
| | return doc_splits |
| |
|
| | |
| | def create_db(splits, collection_name): |
| | embedding = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") |
| | new_client = chromadb.EphemeralClient() |
| | vectordb = Chroma.from_documents( |
| | documents=splits, |
| | embedding=embedding, |
| | client=new_client, |
| | collection_name=collection_name, |
| | ) |
| | return vectordb |
| |
|
| | |
| | def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()): |
| | progress(0.5, desc="Initializing HF Hub...") |
| | |
| | tokenizer = AutoTokenizer.from_pretrained(llm_model) |
| | model = AutoModelForSeq2SeqLM.from_pretrained(llm_model) |
| | summarization_pipeline = pipeline("summarization", model=model, tokenizer=tokenizer) |
| | pipe = HuggingFacePipeline(pipeline=summarization_pipeline) |
| |
|
| | progress(0.75, desc="Defining buffer memory...") |
| | memory = ConversationBufferMemory( |
| | memory_key="chat_history", |
| | output_key='answer', |
| | return_messages=True |
| | ) |
| | retriever = vector_db.as_retriever() |
| | progress(0.8, desc="Defining retrieval chain...") |
| | qa_chain = ConversationalRetrievalChain.from_llm( |
| | llm=pipe, |
| | retriever=retriever, |
| | chain_type="stuff", |
| | memory=memory, |
| | return_source_documents=True, |
| | verbose=False, |
| | ) |
| | progress(0.9, desc="Done!") |
| | return qa_chain |
| |
|
| | |
| | def create_collection_name(filepath): |
| | collection_name = Path(filepath).stem |
| | collection_name = collection_name.replace(" ", "-") |
| | collection_name = unidecode(collection_name) |
| | collection_name = re.sub('[^A-Za-z0-9]+', '-', collection_name) |
| | collection_name = collection_name[:50] |
| | if len(collection_name) < 3: |
| | collection_name = collection_name + 'xyz' |
| | if not collection_name[0].isalnum(): |
| | collection_name = 'A' + collection_name[1:] |
| | if not collection_name[-1].isalnum(): |
| | collection_name = collection_name[:-1] + 'Z' |
| | return collection_name |
| |
|
| | |
| | def initialize_database(pdf_url, chunk_size, chunk_overlap, progress=gr.Progress()): |
| | collection_name = create_collection_name(pdf_url) |
| | progress(0.25, desc="Loading document...") |
| | doc_splits = load_doc(pdf_url, chunk_size, chunk_overlap) |
| | progress(0.5, desc="Generating vector database...") |
| | vector_db = create_db(doc_splits, collection_name) |
| | progress(0.9, desc="Done!") |
| | return vector_db, collection_name, "Complete!" |
| |
|
| | def initialize_LLM(llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()): |
| | qa_chain = initialize_llmchain(LLM_MODEL, llm_temperature, max_tokens, top_k, vector_db, progress) |
| | return qa_chain, "Complete!" |
| |
|
| | def format_chat_history(message, chat_history): |
| | formatted_chat_history = [] |
| | for user_message, bot_message in chat_history: |
| | formatted_chat_history.append(f"User: {user_message}") |
| | formatted_chat_history.append(f"Assistant: {bot_message}") |
| | return formatted_chat_history |
| |
|
| | def conversation(qa_chain, message, history): |
| | formatted_chat_history = format_chat_history(message, history) |
| | response = qa_chain({"question": message, "chat_history": formatted_chat_history}) |
| | response_answer = response["answer"] |
| | if "Helpful Answer:" in response_answer: |
| | response_answer = response_answer.split("Helpful Answer:")[-1] |
| | response_sources = response["source_documents"] |
| | response_source1 = response_sources[0].page_content.strip() |
| | response_source2 = response_sources[1].page_content.strip() |
| | response_source3 = response_sources[2].page_content.strip() |
| | response_source1_page = response_sources[0].metadata["page"] + 1 |
| | response_source2_page = response_sources[1].metadata["page"] + 1 |
| | response_source3_page = response_sources[2].metadata["page"] + 1 |
| | new_history = history + [(message, response_answer)] |
| | return qa_chain, gr.update( |
| | value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page |
| |
|
| | def demo(): |
| | with gr.Blocks(theme="base") as demo: |
| | vector_db = gr.State() |
| | qa_chain = gr.State() |
| | collection_name = gr.State() |
| |
|
| | gr.Markdown( |
| | """<center><h2>PDF-based chatbot</center></h2> |
| | <h3>Ask any questions about your PDF documents</h3>""") |
| | gr.Markdown( |
| | """<b>Note:</b> This AI assistant, using Langchain and open-source LLMs, performs retrieval-augmented generation (RAG) from your PDF documents. \ |
| | The user interface explicitly shows multiple steps to help understand the RAG workflow. |
| | This chatbot takes past questions into account when generating answers (via conversational memory), and includes document references for clarity purposes.<br> |
| | <br><b>Warning:</b> This space uses the free CPU Basic hardware from Hugging Face. Some steps and LLM models used below (free inference endpoints) can take some time to generate a reply. |
| | """) |
| |
|
| | with gr.Tab("Step 4 - Chatbot"): |
| | chatbot = gr.Chatbot(height=300) |
| | with gr.Accordion("Advanced - Document references", open=False): |
| | with gr.Row(): |
| | doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20) |
| | source1_page = gr.Number(label="Page", scale=1) |
| | with gr.Row(): |
| | doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20) |
| | source2_page = gr.Number(label="Page", scale=1) |
| | with gr.Row(): |
| | doc_source3 = gr.Textbox(label="Reference 3", lines=2, container=True, scale=20) |
| | source3_page = gr.Number(label="Page", scale=1) |
| | with gr.Row(): |
| | msg = gr.Textbox(placeholder="Type message (e.g. 'What is this document about?')", container=True) |
| | with gr.Row(): |
| | submit_btn = gr.Button("Submit message") |
| | clear_btn = gr.ClearButton([msg, chatbot], value="Clear conversation") |
| |
|
| | |
| | db_progress = gr.Textbox(label="Vector database initialization", value="Initializing...") |
| | db_btn = gr.Button("Generate vector database", visible=False) |
| | qachain_btn = gr.Button("Initialize Question Answering chain", visible=False) |
| | llm_progress = gr.Textbox(value="None", label="QA chain initialization") |
| |
|
| | def auto_initialize(): |
| | vector_db, collection_name, db_status = initialize_database(pdf_url, DB_CHUNK_SIZE, CHUNK_OVERLAP) |
| | qa_chain, llm_status = initialize_LLM(TEMPERATURE, LLM_MAX_TOKEN, 20, vector_db) |
| | return vector_db, collection_name, db_status, qa_chain, llm_status, "Initialization complete." |
| |
|
| | demo.load(auto_initialize, [], [vector_db, collection_name, db_progress, qa_chain, llm_progress]) |
| |
|
| | |
| | msg.submit(conversation, \ |
| | inputs=[qa_chain, msg, chatbot], \ |
| | outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, |
| | source3_page], \ |
| | queue=False) |
| | submit_btn.click(conversation, \ |
| | inputs=[qa_chain, msg, chatbot], \ |
| | outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, |
| | doc_source3, source3_page], \ |
| | queue=False) |
| | return demo.queue().launch(debug=True) |
| |
|
| | if __name__ == "__main__": |
| | demo() |
| |
|