| | from langchain_core.runnables import RunnablePassthrough
|
| | from langchain_core.output_parsers import StrOutputParser
|
| | from langchain_community.chat_models import ChatOllama
|
| | from langchain_core.prompts import ChatPromptTemplate
|
| | from langchain_pinecone import PineconeVectorStore
|
| | from langchain_community.embeddings import SentenceTransformerEmbeddings
|
| |
|
| | def make_chain_llm(retriever,llm):
|
| | def format_docs(docs):
|
| |
|
| | return "\n\n".join(doc.page_content for doc in docs)
|
| |
|
| |
|
| |
|
| |
|
| | template = "\"```\" Below is an instruction that describes a task. Write a response that appropriately completes the request."\
|
| | "์ ์ํ๋ context์์๋ง ๋๋ตํ๊ณ context์ ์๋ ๋ด์ฉ์ ์์ฑํ์ง๋ง"\
|
| | "make answer in korean. ํ๊ตญ์ด๋ก ๋๋ตํ์ธ์"\
|
| | "\n\nContext:\n{context}\n;"\
|
| | "Question: {question}"\
|
| | "\n\nAnswer:"
|
| |
|
| | prompt = ChatPromptTemplate.from_template(template)
|
| |
|
| | rag_chain = (
|
| | {"context": retriever| format_docs, "question": RunnablePassthrough()}
|
| | | prompt
|
| | | llm
|
| | | StrOutputParser()
|
| | )
|
| |
|
| | return rag_chain
|
| |
|