-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathget_agent_response.py
58 lines (46 loc) · 1.72 KB
/
get_agent_response.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import streamlit as st
from langchain_core.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_upstage import ChatUpstage
from agent_selection_page import AGENT_MANNER
def get_agent_response(agent, user_input, chat_data):
llm = ChatUpstage(api_key=st.session_state.solar_api_key)
prompt_template = PromptTemplate.from_template(
"""
Please provide the most relevant response based on the given context.
First, a text of chat between two possible lovers, is given as "<Chat>".
Second, the previous questions and the corresponding responses given by you, are given as "<Conversation>".
Third, the question on which you should provide a response now, is given as "<Question>".
Each region is separated by "-----------".
Keep in mind that you are "{agent_type}" agent.
{agent_manner}
----------
<Chat>
{chat}
----------
<Conversation>
{conversation}
----------
<Question>
{question}
"""
)
chain = prompt_template | llm | StrOutputParser()
# 3. define chain
agent_type = agent
agent_manner = AGENT_MANNER[agent]
response = chain.invoke(
{
"agent_type": agent_type,
"agent_manner": agent_manner,
"chat": chat_data,
"conversation": st.session_state.conversation_history.get(agent, ""),
"question": user_input,
}
)
st.session_state.conversation_history[agent] = (
st.session_state.conversation_history.get(agent, "")
+ f"Q: {user_input}\n"
+ f"A: {response}\n\n"
)
return response