-
Notifications
You must be signed in to change notification settings - Fork 0
/
mlface.py
259 lines (226 loc) · 8.23 KB
/
mlface.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
#
#
# websocket server on port 4785, recieves an jpg image
# matches the face(s) in the image to some 'known' faces
# returns a json with match values
#
import sys
import json
import argparse
import warnings
from datetime import datetime
import time, threading, sched
import socket
import os
import logging
import logging.handlers
import asyncio
import websockets
#import websocket
#import cv2
import numpy as np
import os.path
import pwd
import grp
import base64
import dlib
import face_recognition
from sys import platform
# Globals
settings = None
applog = None
isPi = False
use_cuda = True
muted = False
five_min_thread = None
debug = False;
'''
if platform == 'darwin':
KNOWN_FACES_DIR = '/usr/local/lib/mlface/known_faces'
#UNKNOWN_FACES_DIR = '/usr/local/lib/mlface/unknown_faces'
else:
KNOWN_FACES_DIR = '/usr/local/lib/mlface/known_faces'
#UNKNOWN_FACES_DIR = '/usr/local/lib/mlface/unknown_faces'
'''
TOLERANCE = 0.6
#FRAME_THICKNESS = 3
#FONT_THICKNESS = 2
MODEL = 'cnn' # default: 'hog', other one can be 'cnn' - CUDA accelerated (if available) deep-learning pretrained model
known_faces = []
known_names = []
known_faces_dir =''
# returns IP string of running system.
#
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
def init_models():
# We organize known faces as subfolders of known_faces_dir
# Each subfolder's name becomes our label (name)
global log, known_faces, known_names, known_faces_dir
print("known_faces_dir", known_faces_dir)
for name in os.listdir(known_faces_dir):
# Next we load every file of faces of known person
for filename in os.listdir(f'{known_faces_dir}/{name}'):
log.info('working on {}/{}'.format(name,filename))
# Load an image
image = face_recognition.load_image_file(f'{known_faces_dir}/{name}/{filename}')
# Get 128-dimension face encoding
# Always returns a list of found faces, for this purpose we take first face only (assuming one face per image as you can't be twice on one image)
try:
encoding = face_recognition.face_encodings(image)[0]
# Append encodings and name
known_faces.append(encoding)
known_names.append(name)
except:
log.info(f"can't find a face in {name}/{filename}")
def update_models(name, image):
global log, known_faces, known_names, known_faces_dir
td = f'{known_faces_dir}/{name}'
if not os.path.exists(td):
os.mkdir(td)
fp = f'{td}/{name}.jpg'
f = open(fp, 'wb')
f.write(image)
f.close()
# we are root, change the file ownership
uid = pwd.getpwnam("ccoupe").pw_uid
gid = grp.getgrnam("root").gr_gid
os.chown(fp, uid, gid)
os.chmod(fp, 0o664)
log.info(f'created {fp}')
img = face_recognition.load_image_file(fp)
try:
encoding = face_recognition.face_encodings(img)[0]
# Append encodings and name
known_faces.append(encoding)
known_names.append(name)
log.info(f'updated running models')
except:
log.info(f'is there a face in {fp}')
def long_timer_fired():
global five_min_thread
#five_min_thread = threading.Timer(5 * 60, long_timer_fired)
#five_min_thread.start()
exit()
def five_min_timer():
global five_min_thread
print('creating long one shot timer')
five_min_thread = threading.Timer(1.5 * 60, long_timer_fired)
five_min_thread.start()
# ---- websocket server - send payload to mqtt ../reply/set
async def wss_on_message(ws, path):
global hmqtt, settings, log, use_cuda
#log.info(f'wake up {path}')
message = await ws.recv()
start_time = datetime.now()
# get the image sent to us.
# TODO: write to memory chunk instead of file:
# create a Numpy object from PIL.image(message)
#
imageBytes = base64.b64decode(message)
o = open("/tmp/face.jpg","wb")
o.write(imageBytes)
o.close()
image = face_recognition.load_image_file("/tmp/face.jpg")
img_width = image.shape[1]
img_height = image.shape[0]
# Find face locations
locations = face_recognition.face_locations(image, model=MODEL)
# Now since we know loctions, we can pass them to face_encodings as second argument
# Without that it will search for faces once again slowing down whole process
encodings = face_recognition.face_encodings(image, locations)
# We passed our image through face_locations and face_encodings, so we can modify it
# First we need to convert it from RGB to BGR as we are going to work with cv2
#image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# But this time we assume that there might be more faces in an image - we can find faces of different people
mats = []
nm = None
for face_encoding, face_location in zip(encodings, locations):
# We use compare_faces (but might use face_distance as well)
# Returns array of True/False values in order of passed known_faces
results = face_recognition.compare_faces(known_faces, face_encoding, TOLERANCE)
# Since order is being preserved, we check if any face was found then grab index
# then label (name) of first matching known face withing a tolerance
# TODO test with multiple known faces. Good luck getting that.
if True in results:
if nm is None:
nm = known_names[results.index(True)]
m = {'x': face_location[0], 'y': face_location[1],
'width': face_location[2], 'height': face_location[3],
'tag': nm, 'confidence': 1.00}
#log.info(f'image has {nm} from {face_location}')
mats.append(m)
# locations is an array of tuples (x,y,w,h probably)
end_time = datetime.now()
el = end_time - start_time
et = el.total_seconds()
#log.info(f'locations {locations}')
dt = {"details": { 'plug': 'face', 'name': 'face', 'reason': 'face',
'matrices': mats, 'imgWidth': img_width, 'imgHeight': img_height,
'time' : et}
}
if len(encodings) > 0:
log.info(f'found {nm} in image, took: {et}')
else:
log.info(f'no face for image, took: {et}')
await ws.send(json.dumps(dt))
def wss_server_init(port):
global wss_server, log
#wss_server = websockets.serve(wss_on_message, '192.168.1.2', port)
wss_server = websockets.serve(wss_on_message, get_ip(), port)
def main():
global isPi, settings, log, wss_server, MODEL, known_faces_dir
# process cmdline arguments
loglevels = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL')
ap = argparse.ArgumentParser()
#ap.add_argument("-c", "--conf", required=True, type=str,
# help="path and name of the json configuration file")
ap.add_argument("-p", "--port", action='store', type=int, default='4785',
nargs='?', help="server port number, 4785 is default")
ap.add_argument("-s", "--syslog", action = 'store_true',
default=False, help="use syslog")
ap.add_argument("--nogpu", action='store_true', default=False,
help="don't use gpu, default is will use gpu")
ap.add_argument("-d","--dir", type=str, default="./known_faces/",
help="path to directory of known faces")
args = vars(ap.parse_args())
# logging setupd$
# Note websockets is very chatty at DEBUG level. Sigh.
log = logging.getLogger('mlface')
if args['syslog']:
log.setLevel(logging.INFO)
handler = logging.handlers.SysLogHandler(address = '/dev/log')
# formatter for syslog (no date/time or appname.
formatter = logging.Formatter('%(name)s-%(levelname)-5s: %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
else:
logging.basicConfig(level=logging.INFO,datefmt="%H:%M:%S",format='%(asctime)s %(levelname)-5s %(message)s')
#isPi = os.uname()[4].startswith("arm")
known_faces_dir = args['dir']
have_cuda = dlib.cuda.get_num_devices() > 0
use_cuda = args['nogpu']==False and have_cuda
log.info(f'loading models from {known_faces_dir}, have_cuda = {have_cuda}, use cuda = {use_cuda}' )
if use_cuda:
MODEL = "cnn"
else:
MODEL = "hog"
init_models()
wss_server_init(args['port'])
five_min_timer()
asyncio.get_event_loop().run_until_complete(wss_server)
asyncio.get_event_loop().run_forever()
# do something magic to integrate the event loops?
while True:
time.sleep(5)
if __name__ == '__main__':
sys.exit(main())