-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathapp.py
51 lines (44 loc) · 1.45 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import vertexai
from vertexai.preview.generative_models import GenerativeModel, Part
import streamlit as st
model = GenerativeModel("gemini-pro-vision")
st.set_page_config(
page_title="Gemini Pro Streamlit App",
page_icon="✪",
layout="wide",
)
st.title("Gemini AI Chat")
st.markdown('By [Saurabh](https://linkedin.com/in/dev-saurabh)')
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if user_input := st.chat_input("You:"):
st.session_state.messages.append({"role": "user", "content": user_input})
with st.chat_message("user"):
st.markdown(user_input)
# Make API request
responses = model.generate_content(
user_input,
generation_config={
"max_output_tokens": 2048,
"temperature": 0.4,
"top_p": 1,
"top_k": 32
},
stream=True,
)
for response in responses:
#capture full response
full_res = "" + response.candidates[0].content.parts[0].text
# Display assistant response
if response:
with st.chat_message("assistant"):
st.markdown(full_res)
st.session_state.messages.append({"role": "assistant", "content": full_res})
else:
st.text("An error occurred while fetching the response.")