Skip to content

Commit

Permalink
feat : yoona test code
Browse files Browse the repository at this point in the history
  • Loading branch information
sycuuui committed May 3, 2024
1 parent 422c3cc commit cada2e9
Show file tree
Hide file tree
Showing 6 changed files with 93 additions and 1 deletion.
Binary file modified __pycache__/app.cpython-311.pyc
Binary file not shown.
Binary file modified __pycache__/createTarget.cpython-311.pyc
Binary file not shown.
Binary file added __pycache__/yoona_target.cpython-311.pyc
Binary file not shown.
10 changes: 9 additions & 1 deletion app.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import io
import os
from pytorch import mosaic_jiyeon
from createTarget import extract_and_identify_faces_from_video,save_faces
from createTarget import extract_and_identify_faces_from_video
from yoona_target import yoona_test


from flask import (Flask, request, send_file, jsonify)
Expand Down Expand Up @@ -31,6 +32,13 @@ def process_video():
# face_base64_arrays = save_faces(identified_faces) # 이미지를 Base64 인코딩된 문자열로 반환
return jsonify({"images": identified_faces}) # JSON 객체로 변환

@app.route('/target2', methods=['POST'])
def yoona():
video_path='./cutVideo.mp4'
identified_faces = yoona_test(video_path)
# face_base64_arrays = save_faces(identified_faces) # 이미지를 Base64 인코딩된 문자열로 반환
return jsonify({"images": identified_faces}) # JSON 객체로 변환


@app.route('/video', methods=['POST'])
def handle_video():
Expand Down
Binary file modified pytorch/__pycache__/mosaic_jiyeon.cpython-311.pyc
Binary file not shown.
84 changes: 84 additions & 0 deletions yoona_target.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
import base64
from io import BytesIO

import cv2
import face_recognition
import numpy as np
from PIL import Image


def yoona_test(video_path):
face_encodings = [] # 얼굴별 인코딩 저장
face_images = [] # 얼굴별 이미지 저장
identified_faces = [] # 식별된 얼굴별 (객체) 저장

video_capture = cv2.VideoCapture(video_path)

while video_capture.isOpened():
success, frame = video_capture.read()
if not success:
break

face_locations = face_recognition.face_locations(frame) # 현재 프레임에서 얼굴 위치 탐지
current_encodings = face_recognition.face_encodings(frame, face_locations) # 얼굴 위치에 대한 인코딩

for (top, right, bottom, left), encoding in zip(face_locations, current_encodings):
# 얼굴 이미지 추출
face_image = frame[top:bottom, left:right]
face_images.append(face_image)




face_encodings.append(encoding)

# 인식된 얼굴 분류
for idx, encoding in enumerate(face_encodings):
if not identified_faces:
identified_faces.append([(face_images[idx], encoding)])
else:
matched = False
for face_group in identified_faces:
group_encodings = [enc for _, enc in face_group]
avg_encoding = np.mean(group_encodings, axis=0)
dist = np.linalg.norm(avg_encoding - encoding)
if dist < 0.6: # 같은 사람으로 판단하는 임계값
face_group.append((face_images[idx], encoding))
matched = True
break
if not matched:
identified_faces.append([(face_images[idx], encoding)])

video_capture.release()
print('end1')

# 인식된 얼굴 이미지를 Base64로 인코딩하여 반환
return save_faces(identified_faces)


def save_faces(identified_faces):
face_base64_arrays = []

for face_group in identified_faces:
encoded_faces = []
count = 0 # 각 그룹별로 이미지 개수를 세는 카운터
for face_image, _ in face_group:
# OpenCV는 BGR 형식으로 이미지를 읽기 때문에 RGB로 변환
face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB)
# 이미지를 PIL 이미지 객체로 변환
pil_img = Image.fromarray(face_image)
# 메모리 내에서 이미지를 저장하기 위한 버퍼 생성
buf = BytesIO()
# 이미지를 JPEG 포맷으로 저장
pil_img.save(buf, format="JPEG")
# 버퍼의 바이트 데이터를 Base64 인코딩 문자열로 변환
base64_string = base64.b64encode(buf.getvalue()).decode('utf-8')
# 해당 인물의 인코딩된 이미지를 추가
encoded_faces.append(base64_string)
count += 1
if count == 3: # 각 인물 그룹에서 최대 3개의 이미지만 저장
break
# 모든 인물의 인코딩된 이미지를 배열에 추가
face_base64_arrays.append(encoded_faces)

return face_base64_arrays

0 comments on commit cada2e9

Please sign in to comment.