-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcli.py
162 lines (128 loc) · 5.4 KB
/
cli.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
import os
import sys
import time
import threading
import signal
import subprocess
from datetime import datetime
from openai import OpenAI
import tiktoken
import torch
import whisper
from dotenv import load_dotenv
whisper_model = "small"
command_prompt = "Crie tópicos claros e concisos sem rótulos resumindo as informações principais"
stop_ticker = False
def display_ticker():
start_time = time.time()
while not stop_ticker:
elapsed_time = time.time() - start_time
minutes, seconds = divmod(int(elapsed_time), 60)
sys.stdout.write(f'\rRecording: {minutes:02d}:{seconds:02d}')
sys.stdout.flush()
time.sleep(1)
def record_meeting(output_filename):
global stop_ticker
def signal_handler(sig, frame):
global stop_ticker
stop_ticker = True
print("\nRecording stopped.")
process.terminate()
sys.exit(0)
# Handle Ctrl + C to stop recording
signal.signal(signal.SIGINT, signal_handler)
# Command to capture audio from both devices (Microphone and Virtual Cable)
command = [
'ffmpeg',
'-f', 'dshow', # DirectShow capture for Windows
'-i', 'audio=CABLE Output (VB-Audio Virtual Cable)', # System audio from Virtual Cable
'-f', 'dshow', # Capture the microphone input
'-i', 'audio=Microphone (FIFINE K670 Microphone)', # Microphone input
'-filter_complex', '[0:a][1:a]amix=inputs=2:duration=longest[aout]', # Mix both audio inputs
'-map', '[aout]', # Map the mixed audio to the output
'-c:a', 'libmp3lame', # Use MP3 codec
'-q:a', '2', # Set audio quality (0-9, lower is better)
'-y', # Overwrite output file if it exists
output_filename
]
# Start ffmpeg subprocess
process = subprocess.Popen(command, stderr=subprocess.PIPE)
# Start ticker thread to display recording time
ticker_thread = threading.Thread(target=display_ticker)
ticker_thread.start()
# Print any ffmpeg errors to the console
print_ffmpeg_errors(process)
# Wait for ticker thread to finish
ticker_thread.join()
def print_ffmpeg_errors(process):
"""Read and print FFmpeg errors from stderr."""
for line in iter(process.stderr.readline, b''):
print(line.decode(), end='')
def transcribe_audio(filename):
# load model
devices = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = whisper.load_model(whisper_model, device=devices)
# load audio and pad/trim it to fit 30 seconds
audio = whisper.load_audio(filename)
print("Beginning Transcribing Process...")
result = model.transcribe(audio)
return result['text']
def summarize_transcript(transcript):
def generate_summary(prompt):
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": "Você é um assistente especializado em resumir transcrições de reuniões. Seu objetivo é criar um resumo conciso e bem estruturado, destacando os principais pontos discutidos, decisões tomadas e ações a serem realizadas. Organize o resumo em tópicos claros e concisos, mantendo a essência da reunião em um formato fácil de ler e compreender."},
{"role": "user", "content": f"{command_prompt}: {prompt}"}
],
temperature=0.2,
)
return response.choices[0].message.content.strip()
chunks = []
prompt = "Por favor, resuma o seguinte texto:\n\n"
text = prompt + transcript
tokenizer = tiktoken.get_encoding("cl100k_base")
tokens = tokenizer.encode(text)
while tokens:
chunk_tokens = tokens[:15000]
chunk_text = tokenizer.decode(chunk_tokens)
chunks.append(chunk_text)
tokens = tokens[15000:]
summary = "\n".join([generate_summary(chunk) for chunk in chunks])
return summary
if __name__ == "__main__":
if len(sys.argv) != 3:
print(f"Usage: python {sys.argv[0]} [record|summarize] output.mp3")
sys.exit(1)
load_dotenv()
api_key = os.getenv('OPEN_API_KEY')
if api_key is None:
print("Environment variable OPEN_API_KEY not found. Exiting...")
sys.exit(1)
client = OpenAI(api_key=api_key)
action = sys.argv[1]
output_filename = sys.argv[2]
if action == "record":
record_meeting(output_filename)
elif action == "summarize":
transcript = transcribe_audio(output_filename)
summary = summarize_transcript(transcript)
# Create 'output' folder if it doesn't exist
os.makedirs('output', exist_ok=True)
# Generate timestamp
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
# Save transcript
transcript_filename = f"output/transcript_{timestamp}.txt"
with open(transcript_filename, 'w', encoding='utf-8') as f:
f.write(transcript)
# Save summary
summary_filename = f"output/summary_{timestamp}.txt"
with open(summary_filename, 'w', encoding='utf-8') as f:
f.write(summary)
print(f"Transcript saved to: {transcript_filename}")
print(f"Summary saved to: {summary_filename}")
print(f"TRANSCRIPT:{transcript}\n")
print(f"SUMMARY_START:\n{summary}\nSUMMARY_END\n")
else:
print(f"Invalid action. Usage: python {sys.argv[0]} [record|summarize] output.mp3")
sys.exit(1)