Spaces:
Sleeping
Sleeping
paresh95
commited on
Commit
·
4ff0905
1
Parent(s):
4eddd18
PS|Modularised face proportions
Browse files- app.py +6 -1
- parameters.yml +2 -0
- src/face_proportions.py +121 -0
- src/face_texture.py +1 -1
app.py
CHANGED
|
@@ -2,13 +2,15 @@ import gradio as gr
|
|
| 2 |
from src.face_texture import GetFaceTexture
|
| 3 |
from src.face_symmetry import GetFaceSymmetry
|
| 4 |
from src.face_demographics import GetFaceDemographics
|
|
|
|
| 5 |
|
| 6 |
|
| 7 |
def combined_fn(input_image):
|
| 8 |
texture_results = GetFaceTexture().main(input_image)
|
| 9 |
symmetry_results = GetFaceSymmetry().main(input_image)
|
| 10 |
demographics_results = GetFaceDemographics().main(input_image)
|
| 11 |
-
|
|
|
|
| 12 |
|
| 13 |
|
| 14 |
iface = gr.Interface(
|
|
@@ -21,6 +23,9 @@ iface = gr.Interface(
|
|
| 21 |
gr.outputs.Image(type="pil"), # From GetFaceSymmetry
|
| 22 |
"text", # From GetFaceSymmetry
|
| 23 |
"text", # From GetFaceDemographics
|
|
|
|
|
|
|
|
|
|
| 24 |
],
|
| 25 |
)
|
| 26 |
|
|
|
|
| 2 |
from src.face_texture import GetFaceTexture
|
| 3 |
from src.face_symmetry import GetFaceSymmetry
|
| 4 |
from src.face_demographics import GetFaceDemographics
|
| 5 |
+
from src.face_proportions import GetFaceProportions
|
| 6 |
|
| 7 |
|
| 8 |
def combined_fn(input_image):
|
| 9 |
texture_results = GetFaceTexture().main(input_image)
|
| 10 |
symmetry_results = GetFaceSymmetry().main(input_image)
|
| 11 |
demographics_results = GetFaceDemographics().main(input_image)
|
| 12 |
+
proportion_results = GetFaceProportions().main(input_image)
|
| 13 |
+
return (*texture_results, *symmetry_results, demographics_results, *proportion_results)
|
| 14 |
|
| 15 |
|
| 16 |
iface = gr.Interface(
|
|
|
|
| 23 |
gr.outputs.Image(type="pil"), # From GetFaceSymmetry
|
| 24 |
"text", # From GetFaceSymmetry
|
| 25 |
"text", # From GetFaceDemographics
|
| 26 |
+
"text", # From GetFaceProportions
|
| 27 |
+
"text", # From GetFaceProportions
|
| 28 |
+
gr.outputs.Image(type="pil"), # From GetFaceProportions
|
| 29 |
],
|
| 30 |
)
|
| 31 |
|
parameters.yml
CHANGED
|
@@ -7,3 +7,5 @@ face_age:
|
|
| 7 |
face_gender:
|
| 8 |
config: "models/face_gender/gender_deploy.prototxt"
|
| 9 |
model: "models/face_gender/gender_net.caffemodel"
|
|
|
|
|
|
|
|
|
| 7 |
face_gender:
|
| 8 |
config: "models/face_gender/gender_deploy.prototxt"
|
| 9 |
model: "models/face_gender/gender_net.caffemodel"
|
| 10 |
+
face_landmarks:
|
| 11 |
+
model: 'models/face_alignment/shape_predictor_68_face_landmarks.dat'
|
src/face_proportions.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import dlib
|
| 2 |
+
import yaml
|
| 3 |
+
import cv2
|
| 4 |
+
import os
|
| 5 |
+
import numpy as np
|
| 6 |
+
import imutils
|
| 7 |
+
from src.cv_utils import get_image
|
| 8 |
+
from typing import List, Union
|
| 9 |
+
from PIL import Image as PILImage
|
| 10 |
+
|
| 11 |
+
with open("parameters.yml", "r") as stream:
|
| 12 |
+
try:
|
| 13 |
+
parameters = yaml.safe_load(stream)
|
| 14 |
+
except yaml.YAMLError as exc:
|
| 15 |
+
print(exc)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class GetFaceProportions:
|
| 19 |
+
def __init__(self):
|
| 20 |
+
self.golden_ratio = 1.618
|
| 21 |
+
|
| 22 |
+
@staticmethod
|
| 23 |
+
def preprocess_image(image: np.array) -> np.array:
|
| 24 |
+
image = imutils.resize(image, width=500)
|
| 25 |
+
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 26 |
+
return gray_image
|
| 27 |
+
|
| 28 |
+
@staticmethod
|
| 29 |
+
def detect_face_landmarks(gray_image: np.array) -> List[Union[np.array, np.array]]:
|
| 30 |
+
|
| 31 |
+
detector = dlib.get_frontal_face_detector()
|
| 32 |
+
predictor = dlib.shape_predictor(parameters["face_landmarks"]["model"])
|
| 33 |
+
rects = detector(gray_image, 1)
|
| 34 |
+
for rect in rects:
|
| 35 |
+
shape = predictor(gray_image, rect)
|
| 36 |
+
shape = np.array([(shape.part(i).x, shape.part(i).y) for i in range(shape.num_parts)])
|
| 37 |
+
|
| 38 |
+
# Draw facial landmarks
|
| 39 |
+
for (x, y) in shape:
|
| 40 |
+
cv2.circle(gray_image, (x, y), 2, (0, 255, 0), -1)
|
| 41 |
+
|
| 42 |
+
return shape, gray_image
|
| 43 |
+
|
| 44 |
+
def compute_golden_ratios(self, shape: np.array) -> dict:
|
| 45 |
+
top_mouth, middle_mouth, bottom_mouth = shape[51], shape[62], shape[57]
|
| 46 |
+
top_nose, bottom_nose = shape[27], shape[33]
|
| 47 |
+
bottom_chin = shape[8]
|
| 48 |
+
|
| 49 |
+
# 1
|
| 50 |
+
top_nose_to_middle_mouth_dist = np.linalg.norm(top_nose - middle_mouth) # euclidean distance
|
| 51 |
+
middle_mouth_to_bottom_chin_dist = np.linalg.norm(middle_mouth - bottom_chin)
|
| 52 |
+
ratio_top_nose_to_middle_mouth_vs_middle_mouth_to_bottom_chin = top_nose_to_middle_mouth_dist/middle_mouth_to_bottom_chin_dist
|
| 53 |
+
|
| 54 |
+
# 2
|
| 55 |
+
top_mouth_to_middle_mouth_dist = np.linalg.norm(top_mouth - middle_mouth)
|
| 56 |
+
middle_mouth_to_bottom_mouth_dist = np.linalg.norm(middle_mouth - bottom_mouth)
|
| 57 |
+
ratio_middle_mouth_to_bottom_mouth_vs_top_mouth_to_middle_mouth = middle_mouth_to_bottom_mouth_dist/top_mouth_to_middle_mouth_dist
|
| 58 |
+
|
| 59 |
+
golden_ratios = {
|
| 60 |
+
"Ideal ratio (golden ratio)": self.golden_ratio,
|
| 61 |
+
"Top of nose to middle of mouth vs middle mouth to bottom of chin": ratio_top_nose_to_middle_mouth_vs_middle_mouth_to_bottom_chin,
|
| 62 |
+
"Middle of mouth to bottom of mouth vs top of mouth to middle of mouth": ratio_middle_mouth_to_bottom_mouth_vs_top_mouth_to_middle_mouth
|
| 63 |
+
}
|
| 64 |
+
return golden_ratios
|
| 65 |
+
|
| 66 |
+
@staticmethod
|
| 67 |
+
def compute_equal_ratios(shape: np.array) -> dict:
|
| 68 |
+
left_side_left_eye, right_side_left_eye, left_side_right_eye, right_side_right_eye = shape[36], shape[39], shape[42], shape[45]
|
| 69 |
+
left_eye_top, left_eye_bottom, right_eye_top, right_eye_bottom = shape[37], shape[41], shape[44], shape[46]
|
| 70 |
+
left_eyebrow_top, right_eyebrow_top = shape[19], shape[24]
|
| 71 |
+
left_eye_center = np.mean([shape[37], shape[38], shape[41], shape[40]], axis=0)
|
| 72 |
+
right_eye_center = np.mean([shape[43], shape[44], shape[47], shape[46]], axis=0)
|
| 73 |
+
left_mouth, right_mouth = shape[48], shape[54]
|
| 74 |
+
|
| 75 |
+
# 1
|
| 76 |
+
left_eye_dist = np.linalg.norm(left_side_left_eye - right_side_left_eye)
|
| 77 |
+
right_eye_dist = np.linalg.norm(left_side_right_eye - right_side_right_eye)
|
| 78 |
+
average_eye_dist = (left_eye_dist + right_eye_dist)/2
|
| 79 |
+
between_eye_dist = np.linalg.norm(right_side_left_eye - left_side_right_eye)
|
| 80 |
+
ratio_eyes_width_vs_between_eye = average_eye_dist/between_eye_dist
|
| 81 |
+
|
| 82 |
+
# 2
|
| 83 |
+
left_eye_to_eyebrow_dist = np.linalg.norm(left_eyebrow_top - left_eye_top)
|
| 84 |
+
right_eye_to_eyebrow_dist = np.linalg.norm(right_eyebrow_top - right_eye_top)
|
| 85 |
+
eye_to_eyebrow_dist = (left_eye_to_eyebrow_dist + right_eye_to_eyebrow_dist)/2
|
| 86 |
+
left_eye_height = np.linalg.norm(left_eye_top - left_eye_bottom)
|
| 87 |
+
right_eye_height = np.linalg.norm(right_eye_top - right_eye_bottom)
|
| 88 |
+
eye_height = (left_eye_height + right_eye_height)/2
|
| 89 |
+
ratio_eye_to_eyebrow_vs_eye_height = eye_to_eyebrow_dist/eye_height
|
| 90 |
+
|
| 91 |
+
# 3
|
| 92 |
+
left_to_right_eye_center_dist = np.linalg.norm(left_eye_center - right_eye_center)
|
| 93 |
+
mouth_width = np.linalg.norm(left_mouth - right_mouth)
|
| 94 |
+
ratio_left_to_right_eye_center_vs_mouth_width = left_to_right_eye_center_dist/mouth_width
|
| 95 |
+
|
| 96 |
+
equal_ratios = {
|
| 97 |
+
"Ideal ratio": 1,
|
| 98 |
+
"Eye width vs distance between eyes": ratio_eyes_width_vs_between_eye,
|
| 99 |
+
"Eye to eyebrows vs eye height": ratio_eye_to_eyebrow_vs_eye_height,
|
| 100 |
+
"Center of left to right eye vs mouth width": ratio_left_to_right_eye_center_vs_mouth_width
|
| 101 |
+
}
|
| 102 |
+
return equal_ratios
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def main(self, image_input):
|
| 106 |
+
image = get_image(image_input)
|
| 107 |
+
gray_image = self.preprocess_image(image)
|
| 108 |
+
shape, image = self.detect_face_landmarks(gray_image)
|
| 109 |
+
golden_ratios = self.compute_golden_ratios(shape)
|
| 110 |
+
equal_ratios = self.compute_equal_ratios(shape)
|
| 111 |
+
image = PILImage.fromarray(image)
|
| 112 |
+
return golden_ratios, equal_ratios, image
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
if __name__ == "__main__":
|
| 116 |
+
path_to_images = "data/"
|
| 117 |
+
image_files = os.listdir(path_to_images)
|
| 118 |
+
for image in image_files:
|
| 119 |
+
print(image)
|
| 120 |
+
results = GetFaceProportions().main(path_to_images + image)
|
| 121 |
+
print(results)
|
src/face_texture.py
CHANGED
|
@@ -14,7 +14,7 @@ class GetFaceTexture:
|
|
| 14 |
|
| 15 |
@staticmethod
|
| 16 |
def preprocess_image(image) -> np.array:
|
| 17 |
-
image = imutils.resize(image, width=
|
| 18 |
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 19 |
return gray_image
|
| 20 |
|
|
|
|
| 14 |
|
| 15 |
@staticmethod
|
| 16 |
def preprocess_image(image) -> np.array:
|
| 17 |
+
image = imutils.resize(image, width=500)
|
| 18 |
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 19 |
return gray_image
|
| 20 |
|