{ "cells": [ { "cell_type": "markdown", "id": "8ffa66cb", "metadata": {}, "source": [ "## Import libraries" ] }, { "cell_type": "code", "execution_count": null, "id": "431c0fdb", "metadata": {}, "outputs": [], "source": [ "import os\n", "import copy\n", "import numpy as np\n", "import pickle\n", "import pandas as pd\n", "import faiss\n", "import traceback, time\n", "\n", "import json\n", "import requests\n", "from typing import List\n", "from langchain_core.embeddings import Embeddings\n", "from tqdm.notebook import tqdm\n", "\n", "from sklearn.metrics.pairwise import cosine_similarity\n", "from langchain.prompts import PromptTemplate\n", "from typing import Literal\n", "import multiprocessing\n", "\n", "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.pydantic_v1 import BaseModel, Field\n", "from langchain_openai import ChatOpenAI\n", "\n", "from rank_bm25 import BM25Okapi\n", "from langchain_core.output_parsers import StrOutputParser,JsonOutputParser\n", "from multiprocessing import Pool, Manager\n" ] }, { "cell_type": "markdown", "id": "cb97885e", "metadata": {}, "source": [ "## CALL API ENPOINTS (LLM, EMBEDDING)" ] }, { "cell_type": "code", "execution_count": 1, "id": "89a5966f-cda1-4e3f-9f89-ecbe9e4127b8", "metadata": {}, "outputs": [], "source": [ "os.environ['CUDA_VISIBLE_DEVICES'] = \"5\"\n", "os.environ[\"OPENAI_API_KEY\"] = \"YOUR_OPENAI_API_KEY\"\n", "os.environ[\"http_proxy\"] = \"\"\n", "os.environ[\"https_proxy\"] = \"\"" ] }, { "cell_type": "markdown", "id": "bec9c145", "metadata": {}, "source": [ "### CALL LLM" ] }, { "cell_type": "code", "execution_count": 4, "id": "d3423138-d290-42bb-b838-17abcbfde695", "metadata": {}, "outputs": [], "source": [ "from transformers import AutoTokenizer\n", "from langchain_community.llms import VLLMOpenAI\n", "from langchain_openai import ChatOpenAI\n", "\n", "\n", "inference_server_url = \"your_inference_server_url\"\n", "tokenizer = AutoTokenizer.from_pretrained(\"your_tokenizer\")\n", "\n", "### For Chat OpenAI template\n", "llm = ChatOpenAI(\n", " model=\"your_model\",\n", " openai_api_key=\"test\",\n", " openai_api_base=inference_server_url,\n", " temperature=0,\n", " max_tokens=256,\n", " streaming= False\n", ")" ] }, { "cell_type": "markdown", "id": "205f37b4", "metadata": {}, "source": [ "### Embedding\n" ] }, { "cell_type": "code", "execution_count": 6, "id": "a0ae425c-e4f9-4b7d-a0d0-6614fc00fb1f", "metadata": {}, "outputs": [], "source": [ "class CustomAPIEmbeddings(Embeddings):\n", " def __init__(self, api_url: str, show_progress:bool): \n", " self.api_url = api_url\n", " self.show_progress = show_progress\n", "\n", " def embed_documents(self, texts: List[str]) -> List[List[float]]:\n", " lst_embedding = []\n", " if self.show_progress: # for tqdm embedding\n", " for query in tqdm(texts):\n", " payload = json.dumps({\n", " \"query\": query\n", " })\n", " headers = {\n", " 'Content-Type': 'application/json'\n", " }\n", " try:\n", " response = json.loads(requests.request(\"POST\", self.api_url, headers=headers, data=payload).text)['embedding']\n", " except:\n", " print(requests.request(\"POST\", self.api_url, headers=headers, data=payload).text)\n", " lst_embedding.append(response)\n", " else:\n", " for query in texts:\n", " payload = json.dumps({\n", " \"query\": query\n", " })\n", " headers = {\n", " 'Content-Type': 'application/json'\n", " }\n", " try:\n", " response = json.loads(requests.request(\"POST\", self.api_url, headers=headers, data=payload).text)['embedding']\n", " except:\n", " print(requests.request(\"POST\", self.api_url, headers=headers, data=payload).text)\n", " lst_embedding.append(response)\n", " \n", " return lst_embedding # Adjust this based on the response format of your API\n", "\n", " def embed_query(self, text: str) -> List[float]:\n", " return self.embed_documents([text])[0]\n", "embeddings = CustomAPIEmbeddings(api_url='your_api_url', show_progress=False)\n", "\n" ] }, { "cell_type": "markdown", "id": "82420213-5a13-44ae-90bd-6844c572bea1", "metadata": {}, "source": [ "### 1. Load Graph Data" ] }, { "cell_type": "markdown", "id": "b16aa1b1-4bb8-4336-949a-26a6a015c274", "metadata": {}, "source": [ "#### Load Data (Triplets, Triplets Relation Embeddings)" ] }, { "cell_type": "code", "execution_count": 8, "id": "63915c9b-798e-4ab5-a6ed-c5256d676836", "metadata": { "scrolled": true }, "outputs": [], "source": [ "with open(\"your-triplets\",'rb') as f:\n", " dct_mapping_triplet = pickle.load(f)\n", "\n", "with open(\"your-triplet-embeddings\",'rb') as f:\n", " lst_embedding = pickle.load(f)\n", "\n", "lst_embedding = np.array(lst_embedding)" ] }, { "cell_type": "code", "execution_count": 12, "id": "c3382c9a-ac7a-4eb1-80cd-ce6e3931f36c", "metadata": {}, "outputs": [], "source": [ "df_test = pd.read_csv(\"final_data.csv\")\n", "test_data = df_test['question'].tolist()\n", "df_test['documents'] = df_test['documents'].map(lambda x : eval(x))" ] }, { "cell_type": "code", "execution_count": 14, "id": "4694d341-f160-4528-baf6-5f19871e47eb", "metadata": {}, "outputs": [], "source": [ "faiss_embeddings = lst_embedding.astype('float32')\n", "d = faiss_embeddings.shape[1]\n", "index = faiss.IndexFlatIP(d)\n", "index.add(faiss_embeddings)" ] }, { "cell_type": "markdown", "id": "b9d21a29-19d1-4db9-9043-7311c364f0b3", "metadata": {}, "source": [ "### 2. Contextxual Question Retrieval (CQR)" ] }, { "cell_type": "code", "execution_count": null, "id": "b0a33bc8-4b12-4191-96b8-d311f5c1cdcc", "metadata": {}, "outputs": [], "source": [ "def faiss_cosine(query_vector, k=10):\n", " query_vector = query_vector.astype('float32')\n", " distances, indices = index.search(query_vector, k)\n", " return indices.flatten()\n", "\n", "def compute_cosine_similarity_chunk(inp):\n", " return cosine_similarity(inp['chunk'], inp['vector'])\n", "\n", "def parallel_cosine_similarity(matrix, vector, n_jobs=128):\n", " num_rows = matrix.shape[0]\n", " chunk_size = num_rows // n_jobs\n", " chunks = [{\"vector\": vector, \"chunk\":matrix[i * chunk_size:(i + 1) * chunk_size]} for i in range(n_jobs - 1)]\n", " chunks.append({\"vector\": vector, \"chunk\":matrix[(n_jobs - 1) * chunk_size:]})\n", " with multiprocessing.Pool(n_jobs) as pool:\n", " results = list(pool.imap(compute_cosine_similarity_chunk, chunks))\n", " cosine_similarities = np.vstack(results)\n", " return cosine_similarities\n", "\n", "def query_triplet_topk(query, k=10):\n", " query_emb = np.array(embeddings.embed_query(query)).reshape(1,-1)\n", " topk_indices_sorted = faiss_cosine(query_emb).tolist()\n", " return [dct_mapping_triplet[x] for x in topk_indices_sorted]\n", "\n", "def query_triplet_threshold(query, threshold=0.8):\n", " query_emb = np.array(embeddings.embed_query(query)).reshape(1,-1)\n", " similarities = cosine_similarity(query_emb, lst_embedding).flatten()\n", " topk_indices = np.where(similarities > threshold)[0]\n", " topk_indices_sorted = topk_indices[np.argsort(-similarities[topk_indices])].tolist()\n", " return [dct_mapping_triplet[x] for x in topk_indices_sorted]\n", "\n", "\n", "class GradeRelation(BaseModel):\n", " \"\"\"Binary score for relevance check on retrieved text.\"\"\"\n", " binary_score: str = Field(\n", " description=\"The Text is relevant to the question, 'yes' or 'no'\"\n", " )\n", "\n", "class GradeRelationList(BaseModel):\n", " \"\"\"List passage index check on retrieved text.\"\"\"\n", " passage_idx: str = Field(\n", " description=\"The passage index of relevant chunks, seperated by a comma\"\n", " )\n", "\n", "def check_grade(question, text):\n", " prompt_text_grader = PromptTemplate(template=\"\"\"<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are a grader assessing relevance \n", " of a retrieved text to a user question. The goal is to filter out erroneous retrievals. \\n\n", " Give a binary score 'yes' or 'no' score to indicate whether the text is relevant to the question. \\n\n", " Provide the binary score as a JSON with a single key 'score' and no premable or explaination.\n", " <|eot_id|><|start_header_id|>user<|end_header_id|>\n", " Here is the retrieved text: \\n\\n {text} \\n\\n\n", " Here is the user question: {question} \\n <|eot_id|><|start_header_id|>assistant<|end_header_id|>\n", " \"\"\",\n", " input_variables=[\"question\", \"text\"]\n", " )\n", " structured_llm_grader = llm.with_structured_output(GradeRelation)\n", " relation_grader = prompt_text_grader | structured_llm_grader \n", " result = relation_grader.invoke({\"question\": question, \"text\": text})\n", " return result\n", "\n", "def check_grade_lst(question, text):\n", " prompt_text_grader = PromptTemplate(template=\"\"\"<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are a grader assessing relevance \n", " of a list of retrieved passages to a user question. The goal is to filter out erroneous retrievals. \\n\n", " Return only the passage index whether the passage is relevant to the question. \\n\n", " Provide the output as a JSON with passage index seperated by a comma and no premable or explaination.\n", " <|eot_id|><|start_header_id|>user<|end_header_id|>\n", " Here is the list of retrieved text: \\n\\n {text} \\n\\n\n", " Here is the user question: {question} \\n <|eot_id|><|start_header_id|>assistant<|end_header_id|>\n", " \"\"\",\n", " input_variables=[\"question\", \"text\"]\n", " )\n", " structured_llm_grader = llm.with_structured_output(GradeRelationList)\n", " relation_grader = prompt_text_grader | structured_llm_grader \n", " result = relation_grader.invoke({\"question\": question, \"text\": text})\n", " return result\n", "\n", "\n", "def check_relations(question, relations):\n", " result = []\n", " for rel in relations:\n", " check = check_grade(question, rel['r.summary'])\n", " if check.binary_score == \"yes\":\n", " result.append(rel)\n", " return result\n", "\n", "def format_relations(relations):\n", " result = []\n", " for rel in relations:\n", " formatted_relation = f\"{rel['n']['id']} - {rel['r'][1]} -> {rel['m']['id']}\"\n", " result.append(formatted_relation)\n", " return result" ] }, { "cell_type": "code", "execution_count": 16, "id": "411cb9c3-501f-4e7e-8775-d2910183ad6c", "metadata": {}, "outputs": [], "source": [ "def format_claim(relations):\n", " return \"\\n\\n\".join(f\"{idx+1}. {rel['r.summary']}\" for idx, rel in enumerate(relations))\n", "\n", "def format_triplet(relations):\n", " return \"\\n\\n\".join(f\"{idx+1}. ({rel['r'][0]['id']}, {rel['r'][1]}, {rel['r'][2]['id']})\" for idx, rel in enumerate(relations))\n", "\n", "\n", "class contextual_output(BaseModel):\n", " \"\"\"contextual summarization for the input question.\"\"\"\n", " summary: str = Field(\n", " description=\"Concise summary ocontextual information of the input question\"\n", " )\n", "\n", "class contextual_triplets(BaseModel):\n", " \"\"\"contextual generation of knowledge subgraph.\"\"\"\n", " context: str = Field(\n", " description=\"generate concise contextual information based on list of triplets\"\n", " )\n", " \n", "\n", "def contextual_question_retrieval(claims):\n", " prompt_summary_contextual = PromptTemplate(template=\"\"\"<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are a helpful assistant\n", " assistant responsible for generating a comprehensive summary of the data provided below.\\n\n", " Given the list of claims that may relation with each other. Please write a Concise summary of claims that aim to provide a contextual information.\\n\n", " The output just generate a concise summary without any explaination.\\n\n", " Please note that if the provided claims are contradictory, please resolve the contradictions and provide a single, coherent summary (no need Here is part)\n", " <|eot_id|><|start_header_id|>user<|end_header_id|>\n", " Here is the list of claims: \\n\\n {claims} \\n\\n\n", " \"\"\",\n", " input_variables=[\"claims\"]\n", " )\n", " \n", " structured_summary_contextual = llm.with_structured_output(contextual_output)\n", " contextual_chain = prompt_summary_contextual | structured_summary_contextual \n", " results = contextual_chain.invoke({\"claims\": claims})\n", " return results\n", "\n", "def quick_contextual_question_retrieval(question, claims):\n", " prompt_summary_contextual = PromptTemplate(template=\"\"\"<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are a helpful assistant\n", " assistant responsible for generating a comprehensive summary of the data provided below.\\n\n", " Given the question and list of claims that may relation with each other. You have to decide which claims relevant to the question.\\n\n", " Please write a Concise summary of relevant claims that aim to provide a contextual information. (IT MUST CONTAINS ONLY RELEVANT CLAIMS).\\n\n", " The output just generate a concise summary without any explaination and without combination with the question.\\n\n", " Please note that if the provided claims are contradictory, please resolve the contradictions and provide a single, coherent summary (no need Here is part)\n", " <|eot_id|><|start_header_id|>user<|end_header_id|>\n", " Here is the question: \\n\\n {question} \\n\\n\n", " Here is the list of claims: \\n\\n {claims} \\n\\n\n", " \"\"\",\n", " input_variables=[\"question\", \"claims\"]\n", " )\n", " structured_summary_contextual = llm.with_structured_output(contextual_output)\n", " contextual_chain = prompt_summary_contextual | structured_summary_contextual \n", " results = contextual_chain.invoke({\"question\":question, \"claims\": claims})\n", " return results\n", "\n", "def contextual_question_retrieval_triplet(triplet):\n", " prompt_summary_contextual = PromptTemplate(template=\"\"\"<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are a helpful assistant\n", " assistant responsible for generating a contexual information based on the list of triplets of a given knowledge graph.\\n\n", " Given the knowledge graph contain a list of triplets (entity 1, relation, entity 2), please generate a contextual information, the objective is to represent the triplets information of the knowledge graph into plain text information.\n", " <|eot_id|><|start_header_id|>user<|end_header_id|>\n", " Here is the list of triplets: \\n\\n {triplet} \\n\\n\n", " \"\"\",\n", " input_variables=[\"triplet\"]\n", " )\n", " structured_summary_contextual = llm.with_structured_output(contextual_triplets)\n", " contextual_chain = prompt_summary_contextual | structured_summary_contextual \n", " results = contextual_chain.invoke({\"triplet\": triplet})\n", " return results\n", "\n", "def contextual_question_retrieval_triplet_descriptions_mixed(triplet):\n", " prompt_summary_contextual = PromptTemplate(template=\"\"\"<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are a helpful assistant\n", " assistant responsible for generating a contexual information based on the list of triplets of a given knowledge graph.\\n\n", " Given the knowledge graph contain a list of and their descriptions with the following format: {{(entity 1, relation, entity 2): text description}}\\n\n", " Please generate a contextual information, the objective is to represent the triplets information of the knowledge graph into plain text information.\\n\n", " Note that the output MUST only contains contextual information without any explanation and opening sentence.\n", " <|eot_id|><|start_header_id|>user<|end_header_id|>\n", " Here are the list of triplets and descriptions: \\n\\n {triplet} \\n\\n\n", " \"\"\",\n", " input_variables=[\"triplet\"]\n", " )\n", " \n", " structured_summary_contextual = llm.with_structured_output(contextual_triplets)\n", " contextual_chain = prompt_summary_contextual | structured_summary_contextual \n", " results = contextual_chain.invoke({\"triplet\": triplet})\n", " return results\n", "\n", "\n", "def add_context_to_question(question, check_relate=False):\n", " global cnt_err \n", " relations = query_triplet_topk(question)\n", " if check_relate:\n", " check_rels = check_relations(question, relations)\n", " if check_rels:\n", " contextual_summary = contextual_question_retrieval(format_claim(check_rels)).summary\n", " else:\n", " contextual_summary = \"\"\n", " else:\n", " try:\n", " context = check_grade_lst(question, format_claim(relations)).passage_idx\n", " context = [int(x) for x in context.split(\",\")]\n", " check_rels = [relations[x-1] for x in context]\n", " contextual_summary = contextual_question_retrieval(format_claim(check_rels)).summary\n", " except:\n", " cnt_err += 1\n", " contextual_summary = \"\"\n", " question = question + \" with some extra data: \" + contextual_summary\n", " return question\n", "\n", "\n", "def format_triplet_mixed(relations):\n", " return \"\\n\".join(f\"({rel['n']['id']}, {rel['r'][1]}, {rel['m']['id']}): {rel['r.summary']}\" for idx, rel in enumerate(relations))\n", "\n", "def add_triplet_context_to_question(question, check_relate=False):\n", " global cnt_err\n", " global map_triplet\n", " relations = query_triplet_topk(question)\n", " if check_relate: \n", " check_rels = check_relations(question, relations)\n", " print(len(check_rels))\n", " if check_rels:\n", " contextual_summary = contextual_question_retrieval_triplet(format_triplet(check_rels)).context\n", " else:\n", " contextual_summary = \"\"\n", " else: \n", " try:\n", " a = time.time()\n", " context = check_grade_lst(question, format_claim(relations)).passage_idx\n", " b = time.time()\n", " if context != None:\n", " context = [int(x) for x in context.split(\",\")]\n", " check_rels = [relations[x-1] for x in context]\n", " else:\n", " check_rels = []\n", " if check_rels == []:\n", " contextual_summary = \"\"\n", " else:\n", " contextual_summary = contextual_question_retrieval_triplet(format_triplet_mixed(check_rels)).context\n", " c = time.time()\n", " except Exception as e:\n", " print(e)\n", " cnt_err += 1\n", " contextual_summary = \"\"\n", " if contextual_summary != \"\":\n", " question = question + \" with some extra data: \" + contextual_summary\n", " return question" ] }, { "cell_type": "code", "execution_count": 18, "id": "9813e40f-26ba-49ca-a9b3-96a00b7ac1d9", "metadata": {}, "outputs": [], "source": [ "lst_triplet_top_k_cos = []\n", "for i in tqdm(test_data):\n", " lst_triplet_top_k_cos.append(query_triplet_topk(i))\n", "map_triplet = {}\n", "for i,j in zip(lst_triplet_top_k_cos, test_data):\n", " map_triplet[j] = i\n", "\n" ] }, { "cell_type": "markdown", "id": "8bdb4e5b-a82b-4f35-b6c1-b468a6783b5b", "metadata": {}, "source": [ "### 3. CQR for Multi-Step Questions" ] }, { "cell_type": "markdown", "id": "c8e6ab8b-8030-49ab-928a-743a6cc4e7a2", "metadata": {}, "source": [ "#### 3.1 Loading Data" ] }, { "cell_type": "code", "execution_count": null, "id": "888d725d-51c7-43a7-b546-56a17d131274", "metadata": {}, "outputs": [], "source": [ "# BM25\n", "with open(\"passages.txt\",\"r\") as f:\n", " lst_chunks = f.read().split(\"\")[:-1]\n", "print(len(list(set(lst_chunks))))\n", "mapping_chunks = {j:i for i,j in enumerate(list(set(lst_chunks)))}\n", "lst_chunks = list(set(lst_chunks))" ] }, { "cell_type": "markdown", "id": "c9351926-e8ec-4da7-bb19-581ee19256eb", "metadata": {}, "source": [ "#### 3.2 Excuting Baseline - IRCOT\n", "ref: https://github.com/stonybrooknlp/ircot" ] }, { "cell_type": "markdown", "id": "d1c635b8-65cd-408e-83d0-33b5e7c30b85", "metadata": {}, "source": [ "##### 3.2.1 Retrieve Modulus" ] }, { "cell_type": "code", "execution_count": 28, "id": "1593312f-a726-4be3-b019-dd627794995b", "metadata": {}, "outputs": [], "source": [ "tokenized_corpus = [doc.split(\" \") for doc in lst_chunks]\n", "bm25 = BM25Okapi(tokenized_corpus)" ] }, { "cell_type": "code", "execution_count": 30, "id": "70fbc603-05e5-4127-8912-8407c64c4b7c", "metadata": {}, "outputs": [], "source": [ "def retrieval_bm25(question, k):\n", " tokenized_query = question.split(\" \")\n", " lst_retrieval = bm25.get_top_n(tokenized_query, lst_chunks, n=k)\n", " return lst_retrieval" ] }, { "cell_type": "markdown", "id": "58e7b420-b61b-45c2-b3f7-101d852a6ee3", "metadata": {}, "source": [ "##### 3.2.12 Interleaving Retrieval with Chain-of-Thought Reasoning" ] }, { "cell_type": "code", "execution_count": null, "id": "bf623c44-1d2f-47ca-8f3c-78178a73014f", "metadata": { "scrolled": true }, "outputs": [], "source": [ "def format_docs(docs):\n", " return \"\\n\\n\".join(f\"{doc}\" for doc in docs)\n", "\n", "class GradeRespose(BaseModel):\n", " \"\"\"Binary score to determine if the passages provide sufficient information to answer the question directly.\"\"\"\n", " binary_score: bool = Field(\n", " description=\"The relevant passages provide sufficient information to answer the question directly, 'yes' or 'no'\"\n", " )\n", "\n", "class gen_query(BaseModel):\n", " \"\"\"Generate chain-of-thought query for futher research and exploration.\"\"\"\n", " new_query: str = Field(\n", " description=\"Generate new chain-of-thought query for futher research and exploration\"\n", " )\n", "\n", "def check_response(question, context):\n", " prompt_check_response = PromptTemplate(template=\"\"\"<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are an advanced AI assistant skilled in analyzing textual data.\\n\n", " Below is a question and relevant passages that may contain information to answer it.\\n\n", " Your task is to determine if the provided passages contain enough relevant information to answer the question, even if not directly stated.\\n\n", " Consider both direct answers and implied or partially inferred information.\\n\n", " Return a binary score: 'True' if the context provides sufficient information to answer the question; 'False' if it does not.\\n\n", " Provide only the binary score in JSON format with a single key 'score'. Do not include explanations.\\n\n", " <|eot_id|><|start_header_id|>user<|end_header_id|>\n", " Here is the question: \\n\\n {question} \\n\\n\n", " Here is the relevance passages: \\n\\n {context} \\n\\n\n", " \n", " \"\"\",\n", " input_variables=[\"question\", \"context\"]\n", " )\n", " structured_check_content= llm.with_structured_output(GradeRespose)\n", " check_response_chain = prompt_check_response | structured_check_content \n", " results = check_response_chain.invoke({\"question\": question ,\"context\": context})\n", " return results\n", "\n", "def gen_question(question, context, previous_though):\n", " prompt_gen_answer = PromptTemplate(template=\"\"\"<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are an advanced AI skilled in generating a concise insightful chain-of-thought query to guide further research and exploration.\\n\n", " Below is a question and relevant context information and previous failed queries.\\n\n", " Your task is to:\\n\n", " 1. Analyze the input question to understand its intent and identify gaps in the provided context that prevent a complete answer.\\n\n", " 2. Generate a new chain-of-thought query that is based on the input question, incorporating logical steps or deeper aspects of the topic.\\n\n", " This new query should be designed to guide further search or inquiry, aiming to bridge the identified gaps and refine the search for an answer.\\n\n", " 3. Avoid repeating or rephrasing any of the previous failed queries. Instead, aim to expand the scope or explore different facets of the topic that have not been addressed yet.\\n\n", " All JSON MUST in correct format. DO NOT get information from 'Relevant context information' to create new input variables.\n", " <|eot_id|><|start_header_id|>user<|end_header_id|>\n", " Here is the question: \\n\\n {question} \\n\\n\n", " Here is the relevance context information: \\n\\n {context} \\n\\n\n", " Here is the previous failed queries: \\n\\n {previous_though} \\n\\n\n", " \n", " \"\"\",\n", " input_variables=[\"question\", \"context\", \"previous_though\"]\n", " )\n", " structured_check_content = llm.with_structured_output(gen_query)\n", " chain_gen_answer = prompt_gen_answer | structured_check_content\n", " answer = chain_gen_answer.invoke({\"question\": question, \"context\": context, \"previous_though\": previous_though})\n", "\n", " return answer\n", "\n", "\n", "def final_answer(question, context):\n", " prompt_gen_answer = PromptTemplate(template=\"\"\"<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are an expert AI designed to analyze information from retrieval-augumented generation system.\\n\n", " Your task is to answer questions based on the input context. Below is a question along with the input context.\\n\n", " Make sure your repsonse is consice clear, and directly answer the question in 2-3 sentences WITHOUT any explaination.\\n\n", " DO NOT use any external knowledge.\\n\n", " If the answer is not directly found in the given context, try to infer the best possible answer based on the given context in 2-3 sentences.\n", " <|eot_id|><|start_header_id|>user<|end_header_id|>\n", " Here is the question: \\n\\n {question} \\n\\n\n", " Here is the input context: \\n\\n {context} \\n\\n\n", " \n", " \"\"\",\n", " input_variables=[\"question\", \"context\"]\n", " )\n", " chain_gen_answer = prompt_gen_answer | llm | StrOutputParser()\n", " answer = chain_gen_answer.invoke({\"question\":question, \"context\": context}).strip()\n", " return answer\n", "\n", "def max_length_context(context,threshold=512):\n", " res = []\n", " for i in context:\n", " if len(i.split(\" \")) > threshold:\n", " tmp = \" \".join(x for x in i.split(\" \")[:threshold])\n", " res.append(tmp)\n", " else:\n", " res.append(i)\n", " return res\n", "\n", " " ] }, { "cell_type": "markdown", "id": "bb0a38e3-4ca8-4b96-a9b4-8cd45534f2da", "metadata": {}, "source": [ "# IRCoT Baseline" ] }, { "cell_type": "code", "execution_count": null, "id": "76b13928-9551-49a0-a763-cba30eab7815", "metadata": { "scrolled": true }, "outputs": [], "source": [ "def process_question(tasks):\n", " \"\"\"Process a single question.\"\"\"\n", " question, label, k, n_loop = tasks[0], tasks[1], tasks[2], tasks[3]\n", " try:\n", " i = 0\n", " thought_q = \"\"\n", " pt = []\n", " context = max_length_context(retrieval_bm25(question, k))\n", " while i < n_loop:\n", " check = check_response(question, format_docs(context)).binary_score\n", " if check or (not check and i == n_loop - 1):\n", " gen_answer = final_answer(question, format_docs(context))\n", " break\n", " else: \n", " new_CoT_query = gen_question(question, format_docs(context), \"\\n\".join(pt)).new_query\n", " pt.append(new_CoT_query)\n", " thought_q += \"\\n\" + str(i) + \"-\" + new_CoT_query\n", " new_context = max_length_context(retrieval_bm25(new_CoT_query, k))\n", " context = context + new_context\n", " context = list(set(context)) \n", " i += 1\n", " return {\n", " \"Question\": question,\n", " \"Answer\": gen_answer,\n", " \"Label\": label,\n", " \"Context\": context,\n", " \"CoT\": thought_q,\n", " \"n_CoT\": int(i+1),\n", " }\n", " except Exception as e:\n", " print(f\"Error occurred during processing question '{question}': {e}\")\n", " return None\n" ] }, { "cell_type": "markdown", "id": "b48bb3c5-c57a-452d-a9e3-9341ad87c7ae", "metadata": {}, "source": [ "# IRCoT + KG" ] }, { "cell_type": "code", "execution_count": null, "id": "351838d5-2e6d-42eb-a7c4-fd638c917fd2", "metadata": { "scrolled": true }, "outputs": [], "source": [ "def process_question_KG(tasks):\n", " question, label, k, n_loop= tasks[0], tasks[1], tasks[2], tasks[3] # Unpack the arguments\n", " \n", " try:\n", " i = 0\n", " thought_q = \"\"\n", " pt = []\n", " context = max_length_context(retrieval_bm25(add_triplet_context_to_question(question), k))\n", " while i < n_loop:\n", " check = check_response(question, format_docs(context)).binary_score\n", " if check or (not check and i == n_loop - 1):\n", " gen_answer = final_answer(question, format_docs(context))\n", " break\n", " else:\n", " new_CoT_query = gen_question(question, format_docs(context), \"\\n\".join(pt)).new_query\n", " pt.append(new_CoT_query)\n", " thought_q += \"\\n\" + str(i) + \"-\" + new_CoT_query\n", " new_context = max_length_context(retrieval_bm25(add_triplet_context_to_question(new_CoT_query), k))\n", " context = context + new_context\n", " context = list(set(context))\n", " i += 1\n", " return {\n", " \"Question\": question,\n", " \"Answer\": gen_answer,\n", " \"Label\": label,\n", " \"Context\": context,\n", " \"CoT\": thought_q,\n", " \"n_CoT\": int(i+1),\n", " }\n", " except Exception as e:\n", " print(f\"Error occurred during processing question '{question}': {e}\")\n", " return None\n", "\n" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" } }, "nbformat": 4, "nbformat_minor": 5 }