人工智能机器学习大作业

人工智能机器学习大作业
实验内容
1 理解人脸图像特征提取的各种方法(至少包括HoG、Dlib和卷积神经网络特征);2. 掌握笑脸数据集()正负样本的划分、模型训练和测试的过程(至少包括SVM、CNN),输出模型训练精度和测试精度(F1-score和ROC);3. 完成一个摄像头采集自己人脸、并对表情(笑脸和非笑脸)的实时分类判读(输出分类文字)的程序;4. 将笑脸数据集换成口罩数据集,完成对口罩佩戴与否的模型训练,采取合适的特征提取方法,重新做上述2-3部 。
2 完成实验报告和技术报告,技术报告写入博客,提交博客地址到学习通,和代码发邮件 。实验报告按照最初的doc实验报告模版格式撰写(实验类型写“综合性、创新性”),也提交至邮箱 。
笑脸数据集()正负样本的划分、模型训练和测试的过程
训练数据集
import keraskeras.__version__import os, shutil# The path to the directory where the original# dataset was uncompressedriginal_dataset_dir = 'C:\\Users\\Desktop\\genki4k'# The directory where we will# store our smaller datasetbase_dir = 'C:\\Users\\Desktop\\genki4k\\smile_and_unsmile'os.mkdir(base_dir)# Directories for our training,# validation and test splitstrain_dir = os.path.join(base_dir, 'train')os.mkdir(train_dir)validation_dir = os.path.join(base_dir,'validation')os.mkdir(validation_dir)test_dir = os.path.join(base_dir, 'test')os.mkdir(test_dir)# Directory with our training smile picturestrain_smile_dir = os.path.join(train_dir, 'smile')os.mkdir(train_smile_dir)# Directory with our training unsmile picturestrain_unsmile_dir = os.path.join(train_dir,'unsmile')os.mkdir(train_unsmile_dir)#Directory with our validation smile picturesvalidation_smile_dir = os.path.join(validation_dir,'smile')os.mkdir(validation_smile_dir)# Directory with our validation unsmile picturesvalidation_unsmile_dir =os.path.join(validation_dir, 'unsmile')os.mkdir(validation_unsmile_dir)# Directory with our validation smile picturestest_smile_dir = os.path.join(test_dir, 'smile')os.mkdir(test_smile_dir)# Directory with our validation unsmile picturestest_unsmile_dir = os.path.join(test_dir,'unsmile')os.mkdir(test_unsmile_dir)
** 构建小型卷积神经网络**

人工智能机器学习大作业

文章插图
from keras import layersfrom keras import modelsmodel = models.Sequential()model.add(layers.Conv2D(32, (3, 3),activation='relu',input_shape=(150, 150, 3)))model.add(layers.MaxPooling2D((2, 2)))model.add(layers.Conv2D(64, (3, 3),activation='relu'))model.add(layers.MaxPooling2D((2, 2)))model.add(layers.Conv2D(128, (3, 3),activation='relu'))model.add(layers.MaxPooling2D((2, 2)))model.add(layers.Conv2D(128, (3, 3),activation='relu'))model.add(layers.MaxPooling2D((2, 2)))model.add(layers.Flatten())model.add(layers.Dense(512, activation='relu'))model.add(layers.Dense(1, activation='sigmoid'))
数据预处理
from keras.preprocessing.image importImageDataGenerator# All images will be rescaled by 1./255train_datagen = ImageDataGenerator(rescale=1./255)test_datagen = ImageDataGenerator(rescale=1./255)train_generator =train_datagen.flow_from_directory(#This is the target directorytrain_dir,# Allimages will be resized to 150x150target_size=(150, 150),batch_size=20,#Since we use binary_crossentropy loss, we need binary labelsclass_mode='binary')validation_generator =test_datagen.flow_from_directory(validation_dir,target_size=(150, 150),batch_size=20,class_mode='binary')
摄像头笑脸识别
import cv2from keras.preprocessing import imagefrom keras.models import load_modelimport numpy as npimport dlibfrom PIL import Imagemodel = load_model('smile_and_unsmile_2.h5')detector = dlib.get_frontal_face_detector()video=cv2.VideoCapture(0)font = cv2.FONT_HERSHEY_SIMPLEXdef rec(img):gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)dets=detector(gray,1)if dets is not None:for face in dets:left=face.left()top=face.top()right=face.right()bottom=face.bottom()cv2.rectangle(img,(left,top),(right,bottom),(0,255,0),2)img1=cv2.resize(img[top:bottom,left:right],dsize=(150,150))img1=cv2.cvtColor(img1,cv2.COLOR_BGR2RGB)img1 = np.array(img1)/255.img_tensor = img1.reshape(-1,150,150,3)prediction =model.predict(img_tensor)print(prediction)if prediction[0][0]>0.5:result='unsmile'else:result='smile'cv2.putText(img, result, (left,top), font, 2, (0, 255, 0), 2,cv2.LINE_AA)cv2.imshow('Video', img)while video.isOpened():res,img_rd = video.read()if notres:breakrec(img_rd)ifcv2.waitKey(1) & 0xFF == ord('q'):breakvideo.release()cv2.destroyAllWindows()
佩戴口罩人脸识别
import cv2from keras.preprocessing import imagefrom keras.models import load_modelimport numpy as npimport dlibfrom PIL import Imagemodel=load_model('C:\\Users\\UHS\\Desktop\\test\\smile_and_nosmile_1.h5')detector = dlib.get_frontal_face_detector()video=cv2.VideoCapture(0)font = cv2.FONT_HERSHEY_SIMPLEXdef rec(img):gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)dets=detector(gray,1)if dets is not None:for face in dets:left=face.left()top=face.top()right=face.right()bottom=face.bottom()cv2.rectangle(img,(left,top),(right,bottom),(0,255,0),2)img1=cv2.resize(img[top:bottom,left:right],dsize=(150,150)img1=cv2.cvtColor(img1,cv2.COLOR_BGR2RGB)img1 =np.array(img1)/255.img_tensor =img1.reshape(-1,150,150,3)prediction=model.predict(img_tensor)print(prediction)if prediction[0][0]>0.5:result='mask'else:result='nomask'v2.putText(img,result, (left,top), font, 2, (0, 255, 0), 2, cv2.LINE_AA)cv2.imshow('maskdetector', img)while video.isOpened():res, img_rd =video.read()if not res:breakrec(img_rd)if cv2.waitKey(1)& 0xFF == ord('q'):breakvideo.release()cv2.destroyAllWindows()
人工智能机器学习大作业

文章插图
口罩识别结果
实验体会
(1)在我进行这个项目的时候我没有把虚拟机的环境配好,所以使用做的 , 但是不管在哪里运行这个代码,都需要提前安装好Keras , 不然程序是运行不了的 。
(2)我的电脑太卡了,运行了好半天笑脸识别还是不行 , 所以我这个报告笑脸识别部分;
(3)代码运行的时候一定要提前写清楚文件的路径,我的模型训练花费了很久的时间才发现是我的训练集的路径不对,所以要提前检查好;
【人工智能机器学习大作业】参考文献