-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathapp.py
150 lines (112 loc) · 4.88 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
from flask import Flask, render_template, jsonify, request, send_from_directory, session
# request is the object to handle incoming requests
# from transcript import transcribe
# ^^ replaced by whisper:
import whisper
from chatbot import chat_with_bot
import os
from dotenv import load_dotenv
from openai import OpenAI
import chatbot
load_dotenv()
api_key = os.getenv('OPENAI_API_KEY')
client = OpenAI(api_key=api_key)
app = Flask(__name__)
app.secret_key = 'FLASK_KEY'
# configure maximum file size: 100 MB
app.config['MAX_CONTENT_LENGTH'] = 100 * 1024 * 1024
#init chatbot
chatbot.main()
@app.route('/set_language', methods=['POST'])
def set_lang():
data = request.get_json()
selected_language = data['language']
session['language'] = selected_language # Store language in session
print(session['language'])
return {'status': 'Language changed'}, 200
@app.route('/explain', methods=['POST'])
def explain_text():
data = request.get_json()
text_to_expl = data['textToExplain']
text_context = data['contextText']
chat_history = data['chatHistory'] # is a list
explained_text = chat_with_bot('Explain this selected text from the transcript concisely: ' + text_to_expl +
'Here is the context in which this selected portion of the transcript appears: '
+ text_context,
chat_history)
return jsonify({'explanation': explained_text})
@app.route('/translate', methods=['POST'])
def translate_text():
data = request.get_json()
text_to_translate = data['textToTranslate']
chat_history = data['chatHistory'] # is a list
text_context = data['contextText']
user_language = session.get('language', 'zh-TW') # chinese as a default
print(user_language)
print(chat_history)
prompt = f"Please translate the following selected text into {user_language}: {text_to_translate} To help you with your translation, here is the context in which the selected text appears: {text_context}. Only return the translated selected portion"
translated_text = chat_with_bot(prompt, chat_history)
return jsonify({'translation': translated_text})
@app.route('/summarize', methods=['POST'])
def summarize_text():
data = request.get_json()
text_to_summ = data['textToSummarize']
chat_history = data['chatHistory'] # is a list
print(chat_history)
summ_text = chat_with_bot('Concisely summarize this selected portion of the transcript: ' + text_to_summ,
chat_history)
return jsonify({'summary': summ_text})
@app.route('/chat-with-bot', methods=['POST'])
def message():
data = request.get_json()
userInput = data['message']
chatHistory = data['chatHistory']
response = chat_with_bot(userInput, chatHistory)
print(response)
return response
# when the web server is accessed at the root URL ('/'), function index runs
# it finds index.html file in the templates folder and renders it
@app.route('/')
def index():
return render_template('index.html')
@app.route('/audio-files/<filename>')
def data(filename):
return send_from_directory('audio-files', filename)
@app.route('/upload-video', methods=['POST'])
def upload_video():
print("here")
print(request.files)
if 'video' not in request.files:
return jsonify(error='No video part'), 400
file = request.files['video']
if file.filename == '':
return jsonify(error='No selected video'), 400
print("flag")
if file and allowed_file(file.filename): # You might want to check file content type here as well.
print("flag2")
# Process video file to generate transcript
# things to check - file size limit
print(file.filename)
# temporarily save the video file to the audio-files folder
file.save(os.path.join('audio-files', file.filename))
local_filename = os.path.join('audio-files', file.filename)
# convert video to audio
# POTENTIAL ERROR: WILL SHOW "NO SUCH FILE OR DIRECTORY"
# IF THE FILENAME CONTAINS SPACES
# -y flag overwrites existing file
command = "ffmpeg -i audio-files/" + file.filename + " audio-files/VideoAudio.wav -y"
os.system(command)
local_audio_filename = os.path.join('audio-files', 'VideoAudio.wav')
model = whisper.load_model("base")
transcript = model.transcribe(local_audio_filename)
print("Transcribing done! Here's the transcript: ")
print(transcript["text"])
return jsonify(transcript=transcript)
def allowed_file(filename):
# Check if file extension is allowed
# do later - which extensions should we allow?
ALLOWED_EXTENSIONS = {'mp4', 'avi', 'mov', 'flv', 'wmv'}
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
if __name__ == '__main__':
app.run(debug=True, port=3000)