Update app.py
Browse files
app.py
CHANGED
|
@@ -2,6 +2,7 @@ import cv2
|
|
| 2 |
from transformers import ViTImageProcessor, ViTForImageClassification, AutoModelForImageClassification, AutoImageProcessor
|
| 3 |
import torch
|
| 4 |
import numpy as np
|
|
|
|
| 5 |
|
| 6 |
torch.backends.cudnn.benchmark = True
|
| 7 |
|
|
@@ -144,18 +145,21 @@ def postProcessed( rawfaces, maximunSize, minSize = 30 ):
|
|
| 144 |
faces.append( (x, y, w, h) )
|
| 145 |
return faces
|
| 146 |
def image_inference(image):
|
|
|
|
| 147 |
|
| 148 |
if sum(image.shape) == 0:
|
| 149 |
return { 'ErrorFound': 'ImageNotFound' }
|
| 150 |
# Convert into grayscale
|
| 151 |
-
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 152 |
# Detect faces
|
| 153 |
-
rawfaces = face_cascade.detectMultiScale(gray, 1.05, 5, minSize = (30, 30))
|
| 154 |
-
image = np.asarray( image )
|
| 155 |
# Draw rectangle around the faces
|
| 156 |
-
rawfaces = postProcessed( rawfaces, image.shape[:2] )
|
| 157 |
-
|
| 158 |
-
|
|
|
|
|
|
|
| 159 |
faces = [ Image.fromarray(x, mode = 'RGB') for x in faces ]
|
| 160 |
ages, genders, beards, blurs, ethncities, masks = AnalysisFeatures( faces )
|
| 161 |
|
|
@@ -174,23 +178,26 @@ def video_inference(video_path):
|
|
| 174 |
while(cap.isOpened()):
|
| 175 |
_, img = cap.read()
|
| 176 |
|
| 177 |
-
try:
|
| 178 |
# Convert into grayscale
|
| 179 |
-
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
except:
|
| 181 |
break
|
| 182 |
-
#
|
| 183 |
-
rawfaces =
|
| 184 |
-
image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 185 |
-
image = np.asarray( image )
|
| 186 |
-
|
| 187 |
-
rawfaces = postProcessed( rawfaces, image.shape[:2] )
|
| 188 |
-
|
| 189 |
# Draw rectangle around the faces
|
| 190 |
# https://stackoverflow.com/questions/15589517/how-to-crop-an-image-in-opencv-using-python for fliping axis
|
| 191 |
global_facesCo.append( rawfaces )
|
| 192 |
-
for (
|
| 193 |
-
face = image[x:w+x, y:h+y].copy()
|
|
|
|
| 194 |
global_faces.append(Image.fromarray( face , mode = 'RGB') )
|
| 195 |
|
| 196 |
ages, genders, beards, blurs, ethncities, masks = AnalysisFeatures( global_faces )
|
|
|
|
| 2 |
from transformers import ViTImageProcessor, ViTForImageClassification, AutoModelForImageClassification, AutoImageProcessor
|
| 3 |
import torch
|
| 4 |
import numpy as np
|
| 5 |
+
import face_recognition
|
| 6 |
|
| 7 |
torch.backends.cudnn.benchmark = True
|
| 8 |
|
|
|
|
| 145 |
faces.append( (x, y, w, h) )
|
| 146 |
return faces
|
| 147 |
def image_inference(image):
|
| 148 |
+
|
| 149 |
|
| 150 |
if sum(image.shape) == 0:
|
| 151 |
return { 'ErrorFound': 'ImageNotFound' }
|
| 152 |
# Convert into grayscale
|
| 153 |
+
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 154 |
# Detect faces
|
| 155 |
+
# rawfaces = face_cascade.detectMultiScale(gray, 1.05, 5, minSize = (30, 30))
|
| 156 |
+
# image = np.asarray( image )
|
| 157 |
# Draw rectangle around the faces
|
| 158 |
+
# rawfaces = postProcessed( rawfaces, image.shape[:2] )
|
| 159 |
+
|
| 160 |
+
rawfaces = face_recognition.face_locations( image , model="cnn")
|
| 161 |
+
faces = [ image[top:bottom, left:right].copy() for (top, left, bottom, right) in rawfaces ]
|
| 162 |
+
# faces = [ image[x:w+x, y:h+y].copy() for (x, y, w, h) in rawfaces ]
|
| 163 |
faces = [ Image.fromarray(x, mode = 'RGB') for x in faces ]
|
| 164 |
ages, genders, beards, blurs, ethncities, masks = AnalysisFeatures( faces )
|
| 165 |
|
|
|
|
| 178 |
while(cap.isOpened()):
|
| 179 |
_, img = cap.read()
|
| 180 |
|
| 181 |
+
# try:
|
| 182 |
# Convert into grayscale
|
| 183 |
+
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
| 184 |
+
# except:
|
| 185 |
+
# break
|
| 186 |
+
# Detect faces
|
| 187 |
+
# rawfaces = face_cascade.detectMultiScale(gray, 1.05, 6, minSize = (30, 30))
|
| 188 |
+
try:
|
| 189 |
+
image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 190 |
+
image = np.asarray( image )
|
| 191 |
except:
|
| 192 |
break
|
| 193 |
+
# rawfaces = postProcessed( rawfaces, image.shape[:2] )
|
| 194 |
+
rawfaces = face_recognition.face_locations( image , model="cnn")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 195 |
# Draw rectangle around the faces
|
| 196 |
# https://stackoverflow.com/questions/15589517/how-to-crop-an-image-in-opencv-using-python for fliping axis
|
| 197 |
global_facesCo.append( rawfaces )
|
| 198 |
+
for (top, left, bottom, right) in rawfaces:
|
| 199 |
+
# face = image[x:w+x, y:h+y].copy()
|
| 200 |
+
face = image[top:bottom, left:right].copy()
|
| 201 |
global_faces.append(Image.fromarray( face , mode = 'RGB') )
|
| 202 |
|
| 203 |
ages, genders, beards, blurs, ethncities, masks = AnalysisFeatures( global_faces )
|