Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

model 생성 후 db에 저장 #2

Open
wants to merge 8 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file modified djangoServer/.DS_Store
Binary file not shown.
Binary file modified djangoServer/combined.wav
Binary file not shown.
6 changes: 6 additions & 0 deletions djangoServer/djangoServer/admin.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
from django.contrib import admin
from .models import PrintLog

@admin.register(PrintLog)
class LogAdmin(admin.ModelAdmin):
list_display = ('id','wav_file', 'image', 'result', 'create_dt', 'update_dt')
Binary file added djangoServer/djangoServer/audio/W.m4a
Binary file not shown.
11 changes: 11 additions & 0 deletions djangoServer/djangoServer/models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
from django.db import models

class PrintLog(models.Model):
wav_file = models.FileField(upload_to='audio/')
image = models.ImageField('IMAGE',upload_to='specImage/%Y/%m/',blank=True,null=True)
result = models.CharField(max_length=50)
create_dt = models.DateTimeField('CREATE DT', auto_now_add=True)
update_dt = models.DateTimeField('UPDATE DT', auto_now=True)

def __str__(self):
return self.wav_file.name
Binary file not shown.
11 changes: 3 additions & 8 deletions djangoServer/djangoServer/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent


# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.2/howto/deployment/checklist/

Expand All @@ -26,8 +25,7 @@
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True

ALLOWED_HOSTS = []

ALLOWED_HOSTS = ['223.194.130.187']

# Application definition

Expand All @@ -40,6 +38,7 @@
'django.contrib.staticfiles',

'rest_framework',
'djangoServer',
]

MIDDLEWARE = [
Expand Down Expand Up @@ -72,7 +71,6 @@

WSGI_APPLICATION = 'djangoServer.wsgi.application'


# Database
# https://docs.djangoproject.com/en/4.2/ref/settings/#databases

Expand All @@ -83,7 +81,6 @@
}
}


# Password validation
# https://docs.djangoproject.com/en/4.2/ref/settings/#auth-password-validators

Expand All @@ -102,7 +99,6 @@
},
]


REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
Expand All @@ -124,7 +120,6 @@

USE_TZ = True


# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.2/howto/static-files/

Expand All @@ -137,4 +132,4 @@
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'

MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR / 'media'
MEDIA_ROOT = BASE_DIR / 'media'
1 change: 1 addition & 0 deletions djangoServer/djangoServer/urls.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@

urlpatterns = [
path('', include(router.urls)),
path('admin/', admin.site.urls),
path('process_audio/', views.process_audio, name='process_audio'),
path('get_spectrogram/', views.get_spectrogram, name='get_spectrogram'),

Expand Down
170 changes: 112 additions & 58 deletions djangoServer/djangoServer/views.py
Original file line number Diff line number Diff line change
@@ -1,59 +1,84 @@
from io import BytesIO
import os

from PIL import Image
from django.http import HttpResponse, HttpResponseServerError
from django.core.files import File
from django.http import HttpResponseServerError
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse

import requests
import traceback
import os
import json

def get_spectrogram(request):
if request.method == 'GET':
# 클라이언트에서 전송한 파일을 가져옵니다.

audio_path = 'djangoServer/audio/m.m4a'
url = 'http://localhost:8000/process_audio/'
audio_path = 'djangoServer/audio/W.m4a'
url = 'http://223.194.130.187:8000/process_audio/'
headers = {"Content-Type": "application/json"}

try:
with open(audio_path, 'rb') as f:
# audio_file = {'m4a' : f}
byte_array = bytearray(f.read()) # 파일을 바이트 배열로 읽음

if not f.closed:
print(f"{audio_path} is opened.")
spectrogram = requests.post(url, files={'m4a': f})
# print(spectrogram)

data = {'recordData': byte_array}
#response = requests.post(url, data=json.dumps(data), headers=headers)
response = requests.post(url, data= json.dumps(data))
#response = requests.post(url, data=byte_array)

print(response.json())

if response.status_code == 200:
response_data = response.json()
predicted_alphabet = response_data['predicted_alphabet']
print("(get) response_data : ", response_data)
print("(get) predicted_alphabet : ", predicted_alphabet)

return JsonResponse({'predicted_alphabet': predicted_alphabet})
else:
print(f"An error occurred: {response.status_code}")
return JsonResponse({'error': f"An error occurred: {response.status_code}"})

except FileNotFoundError:
# 파일이 존재하지 않을 때의 예외 처리
print(f"{audio_path} does not exist.")
return JsonResponse({'error': f"An error occurred: FileNotFoundError"})
except Exception as e:
# 그 외의 예외 처리
print(f"An error occurred while opening {audio_path}: {e}")
# audio_file = request.FILES.get('audio_file')

# 스펙트로그램 이미지를 생성합니다.
# spectrogram = process_audio(audio_file)

# spectrogram = requests.post(url, files=audio_file)

# 스펙트로그램 이미지를 응답으로 반환합니다.
response = HttpResponse(spectrogram, content_type='image/jpeg')
return response
return JsonResponse({'error': f"An error occurred: FileNotFoundError"})


# POST 응답 처리
from pydub import AudioSegment
import torch
import torchvision.transforms as transforms
import numpy as np
import librosa, librosa.display
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import subprocess
import struct
from .models import PrintLog

from django.conf import settings
matplotlib.use('Agg')

FIG_SIZE = (15, 10)
DATA_NUM = 30

alpha = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z']

# 음성 데이터의 샘플링 레이트, 채널 수, 샘플링 포맷 등을 정의합니다.
SAMPLE_RATE = 44100
CHANNELS = 1
SAMPLE_WIDTH = 2

# m4a -> wav -> spectrogram / -> model -> result
# m4a -> wav -> spectrogram / -> resnetModel -> result
@csrf_exempt
def process_audio(request):
global peekIndex, image_url
Expand All @@ -62,22 +87,32 @@ def process_audio(request):
try:
if request.method == 'POST':
print("POST")
# POST 요청에서 이미지 파일을 가져옵니다.
m4a_file = request.FILES['m4a']
#print("m4a_file : ", m4a_file)

# 소리 + 묵음
# load the audio files
audio1 = AudioSegment.from_file(m4a_file, format="m4a")
audio2 = AudioSegment.from_file("djangoServer/slienceSound.m4a", format="m4a")
# POST 요청에서 biteArray 데이터를 가져옵니다.
requestBody = json.loads(request.body) # 안드로이드 앱에서 보낸 데이터를 가져옵니다.
byte_data = requestBody['recordData']
byte_array = bytes([struct.pack('b', x)[0] for x in byte_data])

with open('my_audio_file.aac', 'wb+') as destination:
for i in range(0, len(byte_array), 32):
chunk = byte_array[i:i + 32]
destination.write(chunk)

# aac -> wav
input_file = "my_audio_file.aac"
output_file = "my_audio_file.wav"

# Run the ffmpeg command to convert the AAC file to WAV
subprocess.run(["ffmpeg", "-y", "-i", input_file, output_file])

# concatenate the audio files
combined_audio = audio1 + audio2
audio1 = AudioSegment.from_file("my_audio_file.wav", format="wav")
silence = AudioSegment.silent(duration=1000) # 1초 묵음
combined_audio = audio1 + silence

# export the concatenated audio as a new file
file_handle = combined_audio.export("combined.wav", format="wav")

# paths.append(file_path)
# 신호 및 샘플링 레이트 가져오기
sig, sr = librosa.load(file_handle, sr=22050)

# 에너지 평균 구하기
Expand All @@ -91,15 +126,15 @@ def process_audio(request):
if (sig[i] ** 2 >= mean):
peekIndex = i
break

#
START_LEN = 1102
END_LEN = 20948
if peekIndex > 1102:
print(peekIndex)
#print(peekIndex)
startPoint = peekIndex - START_LEN
endPoint = peekIndex + 22050
else:
print(peekIndex)
#print(peekIndex)
startPoint = peekIndex
endPoint = peekIndex + END_LEN

Expand All @@ -125,6 +160,7 @@ def process_audio(request):
n_fft_duration = float(n_fft) / sr

# STFT
# stft = librosa.stft(sig[startPoint:endPoint], n_fft=n_fft, hop_length=hop_length)
stft = librosa.stft(sig[startPoint:endPoint], n_fft=n_fft, hop_length=hop_length)

# 복소공간 값 절댓값 취하기
Expand All @@ -139,37 +175,55 @@ def process_audio(request):
plt.figure(figsize=FIG_SIZE)
librosa.display.specshow(log_spectrogram, sr=sr, hop_length=hop_length, cmap='magma')


# matplotlib 라이브러리를 사용하여 생성된 spectrogram 이미지를 jpg 형식으로 저장
#name_end_pos = file_handle.find('.')
# print(name_end_pos)
# image_path = 'images/' + file_handle[:name_end_pos] + '.jpg'
# print(image_path)
# image_url = settings.STATIC_URL + image_path
# print(image_url)
image_path = 'static/images/' + 'test.jpg'

# save spectrogram image
#plt.savefig('static/images/' + file_handle[:name_end_pos] + '.jpg')
# spectrogram 이미지 저장
plt.savefig(image_path)

plt.close()

# 모델 입히기
model = torch.load('djangoServer/resnetModel/resnet32.pth')
# switch resnetModel to evaluation mode
model.eval()
# define the image transforms
image_transforms = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

# 이미지 열기
image = Image.open(image_path)
# 저장된 이미지를 열어서 확인
#os.system('open ' + image_path) # Mac OS 기준

# 이미지를 바이트 형태로 변환하여 메모리에 저장
image_bytes = BytesIO()
image.save(image_bytes, format='JPEG')
image_bytes = image_bytes.getvalue()

# 이미지를 HttpResponse 객체에 첨부 파일로 반환
response = HttpResponse(image_bytes, content_type='image/jpeg')
response['Content-Disposition'] = 'inline; filename="spectrogram.jpeg"'
return response

# apply the transforms to the test image
test_image_tensor = image_transforms(image)
# add batch dimension to the image tensor
test_image_tensor = test_image_tensor.unsqueeze(0)

# get the resnetModel's prediction
with torch.no_grad():
prediction = model(test_image_tensor)

# get the predicted class index
predicted_class_index = torch.argmax(prediction).item()

# 모델 객체 생성 및 데이터 저장
print_log = PrintLog()
print_log.wav_file.save("combined.wav", file_handle)
# 이미지 파일을 열고 File 객체 생성
with open(image_path, 'rb') as f:
file_name = os.path.basename(image_path)
django_file = File(f, name=file_name)

# 모델 객체 생성 및 이미지 파일 저장
print_log.image.save(file_name, django_file, save=True)
print_log.result = alpha[predicted_class_index]
print_log.save()

response = {'predicted_alphabet': alpha[predicted_class_index]}
# 예측값 알파벳 출력
print("post: ", response)
return JsonResponse(response)
except Exception as e:
print(traceback.format_exc()) # 예외 발생시 traceback 메시지 출력
return HttpResponseServerError() # 500 Internal Server Error 응답 반환
return HttpResponseServerError() # 500 Internal Server Error 응답 반환
Binary file modified djangoServer/static/images/test.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.