File size: 1,333 Bytes
f233426
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from langchain_community.chat_models import ChatOllama
from langchain_core.prompts import ChatPromptTemplate
from langchain_pinecone import PineconeVectorStore
from langchain_community.embeddings import SentenceTransformerEmbeddings

def make_chain_llm(retriever,llm):
    def format_docs(docs):
        # ๊ฒ€์ƒ‰ํ•œ ๋ฌธ์„œ ๊ฒฐ๊ณผ๋ฅผ ํ•˜๋‚˜์˜ ๋ฌธ๋‹จ์œผ๋กœ ํ•ฉ์ณ์ค๋‹ˆ๋‹ค.
        return "\n\n".join(doc.page_content for doc in docs)

    # LangChain์ด ์ง€์›ํ•˜๋Š” ๋‹ค๋ฅธ ์ฑ„ํŒ… ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค. ์—ฌ๊ธฐ์„œ๋Š” Ollama๋ฅผ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค.
    # llm = ChatOllama(model="zephyr:latest")

    template = "\"```\" Below is an instruction that describes a task. Write a response that appropriately completes the request."\
    "์ œ์‹œํ•˜๋Š” context์—์„œ๋งŒ ๋Œ€๋‹ตํ•˜๊ณ  context์— ์—†๋Š” ๋‚ด์šฉ์€ ์ƒ์„ฑํ•˜์ง€๋งˆ"\
    "make answer in korean. ํ•œ๊ตญ์–ด๋กœ ๋Œ€๋‹ตํ•˜์„ธ์š”"\
    "\n\nContext:\n{context}\n;"\
    "Question: {question}"\
    "\n\nAnswer:"

    prompt = ChatPromptTemplate.from_template(template)

    rag_chain = (
    {"context": retriever| format_docs, "question": RunnablePassthrough()}
    | prompt
    | llm
    | StrOutputParser()
    )

    return rag_chain