V831——人脸识别通过串口向下位机发送指令
时间:2022-12-22 08:00:00
V831
人脸识别串口发送指令
前言
V831人脸识别通过XY坐标和Z的距离判断向下位机发送的数据,下位机处理后发送数据。
一、V831人脸识别
读取模型文件
class Face_recognize : score_threshold = 70 #识别分数阈值 input_size = (224, 224, 3) #输入图片尺寸 input_size_fe = (128, 128, 3) #输入人脸数据 feature_len = 256 #人脸数据宽度 steps = [8, 16, 32] # channel_num = 0 #通道数量 users = [] #初始化用户列表 threshold = 0.5 #人脸阈值 nms = 0.3 max_face_num = 3 #输出图中人脸最大数量 names = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"] #人脸
标签定义 model = { "param": "/home/model/face_recognize/model_int8.param", "bin": "/home/model/face_recognize/model_int8.bin" } model_fe = { "param": "/home/model/face_recognize/fe_res18_117.param", "bin": "/home/model/face_recognize/fe_res18_117.bin" } def __init__(self): from maix import nn, camera, image, display from maix.nn.app.face import FaceRecognize for i in range(len(self.steps)): self.channel_num += self.input_size[1] / self.steps[i] * (self.input_size[0] / self.steps[i]) * 2 self.channel_num = int(self.channel_num) #统计通道数量 self.options = { #准备人脸输出参数 "model_type": "awnn", "inputs": { "input0": self.input_size }, "outputs": { "output0": (1, 4, self.channel_num) , "431": (1, 2, self.channel_num) , "output2": (1, 10, self.channel_num) }, "mean": [127.5, 127.5, 127.5], "norm": [0.0078125, 0.0078125, 0.0078125], } self.options_fe = { #准备特征提取参数 "model_type": "awnn", "inputs": { "inputs_blob": self.input_size_fe }, "outputs": { "FC_blob": (1, 1, self.feature_len) }, "mean": [127.5, 127.5, 127.5], "norm": [0.0078125, 0.0078125, 0.0078125], } print("-- load model:", self.model) self.m = nn.load(self.model, opt=self.options) print("-- load ok") print("-- load model:", self.model_fe) self.m_fe = nn.load(self.model_fe, opt=self.options_fe) print("-- load ok") self.face_recognizer = FaceRecognize(self.m, self.m_fe, self.feature_len, self.input_size, self.threshold, self.nms, self.max_face_num) def map_face(self, box,points): #将224*224空间的位置转换到240*240空间内 def tran(x): return int(x/224*240) box = list(map(tran, box)) def tran_p(p): return list(map(tran, p)) points = list(map(tran_p, points)) return box,points def recognize(self, feature): #进行人脸匹配 def _compare(user): #定义映射函数 return self.face_recognizer.compare(user, feature) #推测匹配分数 score相关分数 face_score_l = list(map(_compare,self.users)) #映射特征数据在记录中的比对分数 return max(enumerate(face_score_l), key=lambda x: x[-1]) #提取出人脸分数最大值和最大值所在的位置 def __del__(self): del self.face_recognizer del self.m_fe del self.m global face_recognizer face_recognizer = Face_recognize()
寻找人脸
from maix import camera, image, display
while True:
img = camera.capture() #获取224*224*3的图像数据
AI_img = img.copy().resize(224, 224)
faces = face_recognizer.face_recognizer.get_faces(AI_img.tobytes(),False) #提取人脸特征信息
if faces:
for prob, box, landmarks, feature in faces:
disp_str = "Unmarked face"
bg_color = (255, 0, 0)
font_color=(255, 255, 255)
box,points = face_recognizer.map_face(box,landmarks)
font_wh = img.get_string_size(disp_str)
for p in points:
img.draw_rectangle(p[0] - 1, p[1] -1, p[0] + 1, p[1] + 1, color=bg_color)
img.draw_rectangle(box[0], box[1], box[0] + box[2], box[1] + box[3], color=bg_color, thickness=2)
img.draw_rectangle(box[0], box[1] - font_wh[1], box[0] + font_wh[0], box[1], color=bg_color, thickness = -1)
img.draw_string(box[0], box[1] - font_wh[1], disp_str, color=font_color)
display.show(img)
二、代码实现
串口和测距相关的知识看之前的博客,这里直接附上源码
from maix import camera, image, display import serial ser = serial.Serial("/dev/ttyS1",115200) # 连接串口 UART-1 TX (PG6)和 UART-1 RX (PG7) K=116 x=0 y=0 class Face_recognize : score_threshold = 70 #识别分数阈值 input_size = (224, 224, 3) #输入图片尺寸 input_size_fe = (128, 128, 3) #输入人脸数据 feature_len = 256 #人脸数据宽度 steps = [8, 16, 32] # channel_num = 0 #通道数量 users = [] #初始化用户列表 threshold = 0.5 #人脸阈值 nms = 0.3 max_face_num = 3 #输出的画面中的人脸的最大个数 model = { "param": "/home/model/face_recognize/model_int8.param", "bin": "/home/model/face_recognize/model_int8.bin" } model_fe = { "param": "/home/model/face_recognize/fe_res18_117.param", "bin": "/home/model/face_recognize/fe_res18_117.bin" } def __init__(self): from maix import nn, camera, image, display from maix.nn.app.face import FaceRecognize for i in range(len(self.steps)): self.channel_num += self.input_size[1] / self.steps[i] * (self.input_size[0] / self.steps[i]) * 2 self.channel_num = int(self.channel_num) #统计通道数量 self.options = { #准备人脸输出参数 "model_type": "awnn", "inputs": { "input0": self.input_size }, "outputs": { "output0": (1, 4, self.channel_num) , "431": (1, 2, self.channel_num) , "output2": (1, 10, self.channel_num) }, "mean": [127.5, 127.5, 127.5], "norm": [0.0078125, 0.0078125, 0.0078125], } self.options_fe = { #准备特征提取参数 "model_type": "awnn", "inputs": { "inputs_blob": self.input_size_fe }, "outputs": { "FC_blob": (1, 1, self.feature_len) }, "mean": [127.5, 127.5, 127.5], "norm": [0.0078125, 0.0078125, 0.0078125], } print("-- load model:", self.model) self.m = nn.load(self.model, opt=self.options) print("-- load ok") print("-- load model:", self.model_fe) self.m_fe = nn.load(self.model_fe, opt=self.options_fe) print("-- load ok") self.face_recognizer = FaceRecognize(self.m, self.m_fe, self.feature_len, self.input_size, self.threshold, self.nms, self.max_face_num) def map_face(self, box,points): #将224*224空间的位置转换到240*240空间内 def tran(x): return int(x/224*240) box = list(map(tran, box)) def tran_p(p): return list(map(tran, p)) points = list(map(tran_p, points)) return box,points def recognize(self, feature): #进行人脸匹配 def _compare(user): #定义映射函数 return self.face_recognizer.compare(user, feature) #推测匹配分数 score相关分数 face_score_l = list(map(_compare,self.users)) #映射特征数据在记录中的比对分数 return max(enumerate(face_score_l), key=lambda x: x[-1]) #提取出人脸分数最大值和最大值所在的位置 def __del__(self): del self.face_recognizer del self.m_fe del self.m global face_recognizer face_recognizer = Face_recognize() while True: img = camera.capture() #获取224*224*3的图像数据 AI_img = img.copy().resize(224, 224) faces = face_recognizer.face_recognizer.get_faces(AI_img.tobytes(),False) #提取人脸特征信息 if faces: for prob, box, landmarks, feature in faces: disp_str = "face" bg_color = (0, 255, 0) font_color=(255, 0, 0) box,points = face_recognizer.map_face(box,landmarks) font_wh = img.get_string_size(disp_str) for p in points: img.draw_rectangle(p[0] - 1, p[1] -1, p[0] + 1, p[1] + 1, color=bg_color) img.draw_rectangle(box[0], box[1], box[0] + box[2], box[1] + box[3], color=bg_color, thickness=2) img.draw_rectangle(box[0], box[1] - font_wh[1], box[0] + font_wh[0], box[1], color=bg_color, thickness = -1) img.draw_string(box[0], box[1] - font_wh[1], disp_str, color=font_color) img.draw_string(0,30, "x="+str(((box[0]+box[3])/2-28)), color= font_color) img.draw_string(70,30, "y="+str((box[1]+box[2])/2-20), color= font_color) x=(box[0]+box[3])/2-28 #x坐标 y=(box[1]+box[2])/2-20 #y坐标 Lm = (box[1]+box[3])/2 length = K*13/Lm #距离 img.draw_string(0,60 , "Z="+str(round(length)), color= font_color) if 1<x<33 and 1<y<33 and 100>length>50: ser.write(b"0x5a 0x01 0x02 0x03 0x04\r\n") tmp=ser.read(4) #串口读取数据 ser.write(tmp) if 1<x<33 and 1<y<33 and 150>length>100: ser.write(b"0x5a 0x01 0x02 0x03 0x04\r\n") tmp=ser.read(4) #串口读取数据. ser.write(tmp) if 1<x<33 and 1<y<33 and 200>length>150: ser.write(b"0x5a 0x01 0x02 0x03 0x04\r\n") tmp=ser.read(4) #串口读取数据 ser