-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathchain.py
More file actions
79 lines (66 loc) · 2.56 KB
/
chain.py
File metadata and controls
79 lines (66 loc) · 2.56 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from llm import llm
from Retrieval import Retriver
class chain(llm,Retriver):
def __init__(self, template,model,context=None, history=None):
llm.__init__(self,modelType=model.modelType, temperature=model.temperature,
format=model.format)
prompt = ChatPromptTemplate.from_template(template)
self.prompt = prompt
self.data = context
self.history = history
self.buildChain()
return
def setLLM(self,model):
llm.__init__(self, modelType=model.modelName, temperature=model.temperature,
format=model.format)
self.buildChain()
def setTemplate(self,template):
prompt = ChatPromptTemplate.from_template(template)
self.prompt = prompt
self.buildChain()
def setData(self,dataRetriver):
self.data = dataRetriver.retriver
self.buildChain()
def format_docs(self,docs):
return "\n\n".join(doc.page_content for doc in docs)
def buildChain(self):
if self.data:
self.data = self.data.retriver
if self.history:
self.chainRunner = (
{"context": self.data | self.format_docs,
"history": self.history,
"question": RunnablePassthrough()
}
| self.prompt
| self.modelSettings
| StrOutputParser()
)
else:
self.chainRunner = (
{"context": self.data | self.format_docs,
"question": RunnablePassthrough()}
| self.prompt
| self.modelSettings
| StrOutputParser()
)
else:
if self.history:
self.chainRunner = (
{"history": self.history,
"question": RunnablePassthrough()
}
| self.prompt
| self.modelSettings
| StrOutputParser()
)
else:
self.chainRunner = (
{"question": RunnablePassthrough()}
| self.prompt
| self.modelSettings
| StrOutputParser()
)