需要源码和数据集请点赞关注收藏后评论区留言私信~~~

下面利用tensorflow平台进行人脸识别实战,使用的是Olivetti Faces人脸图像 部分数据集展示如下

程序训练过程如下

接下来训练CNN模型 可以看到训练进度和损失值变化

接下来展示人脸识别结果

程序会根据一张图片自动去图片集中寻找相似的人脸 如上图所示

部分代码如下 需要全部源码和数据集请点赞关注收藏后评论区留言私信~~~

from os import listdirimport numpy as npfrom PIL import Imageimport cv2from tensorflow.keras.models import Sequential, load_modelfrom tensorflow.keras.layers import Dense, Activation, Convolution2D, MaxPooling2D, Flattenfrom sklearn.model_selection import train_test_splitfrom tensorflow.python.keras.utils import np_utils# 读取人脸图片数据def img2vector(fileNamestr):# 创建向量returnVect = np.zeros((57,47))image = Image.open(fileNamestr).convert('L')img = np.asarray(image).reshape(57,47)return img# 制作人脸数据集def GetDataset(imgDataDir): print('| Step1 |: Get dataset...')imgDataDir='faces_4/'FileDir = listdir(imgDataDir)m = len(FileDir)imgarray=[]hwLabels=[]hwdata=[]# 逐个读取图片文件for i in range(m):# 提取子目录className=isubdirName='faces_4/'+str(FileDir[i])+'/'fileNames = listdir(subdirName)lenFiles=len(fileNames)# 提取文件名for j in range(lenFiles): fileNamestr = subdirName+fileNames[j]hwLabels.append(className)imgarray=img2vector(fileNamestr)hwdata.append(imgarray)hwdata = np.array(hwdata)return hwdata,hwLabels,6# CNN模型类class MyCNN(object):FILE_PATH = "face_recognition.h5"# 模型存储/读取目录picHeight = 57# 模型的人脸图片长47,宽57picWidth = 47def __init__(self):self.model = None# 获取训练数据集def read_trainData(self, dataset):self.dataset = dataset# 建立Sequential模型,并赋予参数def build_model(self):print('| Step2 |: Init CNN model...')self.model = Sequential()print('self.dataset.X_train.shape[1:]',self.dataset.X_train.shape[1:])self.model.add( Convolution2D( filters=32,kernel_size=(5, 5),padding='same',#dim_ordering='th',input_shape=self.dataset.X_train.shape[1:]))self.model.add(Activation('relu'))self.model.add( MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same' ) )self.model.add(Convolution2D(filters=64,kernel_size=(5, 5),padding='same') )self.model.add(Activation('relu'))self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same') )self.model.add(Flatten())self.model.add(Dense(512))self.model.add(Activation('relu'))self.model.add(Dense(self.dataset.num_classes))self.model.add(Activation('softmax'))self.model.summary()# 模型训练def train_model(self):print('| Step3 |: Train CNN model...')self.model.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])# epochs:训练代次、batch_size:每次训练样本数self.model.fit(self.dataset.X_train, self.dataset.Y_train, epochs=10, batch_size=20)def evaluate_model(self):loss, accuracy = self.model.evaluate(self.dataset.X_test, self.dataset.Y_test)print('| Step4 |: Evaluate performance...')print('===================================')print('Loss Value is :', loss)print('Accuracy Value is :', accuracy)def save(self, file_path=FILE_PATH):print('| Step5 |: Save model...')self.model.save(file_path)print('Model ',file_path,'is succeesfuly saved.')# 建立一个用于存储和格式化读取训练数据的类class DataSet(object):def __init__(self, path):self.num_classes = Noneself.X_train = Noneself.X_test = Noneself.Y_train = Noneself.Y_test = Noneself.picWidth = 47self.picHeight = 57self.makeDataSet(path)# 在这个类初始化的过程中读取path下的训练数据def makeDataSet(self, path):# 根据指定路径读取出图片、标签和类别数imgs, labels, clasNum = GetDataset(path)# 将数据集打乱随机分组X_train, X_test, y_train, y_test = train_test_split(imgs, labels, test_size=0.2,random_state=1)# 重新格式化和标准化X_train = X_train.reshape(X_train.shape[0], 1, self.picHeight, self.picWidth) / 255.0X_test = X_test.reshape(X_test.shape[0], 1, self.picHeight, self.picWidth) / 255.0X_train = X_train.astype('float32')X_test = X_test.astype('float32')# 将labels转成 binary class matricesY_train = np_utils.to_categorical(y_train, num_classes=clasNum)Y_test = np_utils.to_categorical(y_test, num_classes=clasNum)# 将格式化后的数据赋值给类的属性上self.X_train = X_trainself.X_test = X_testself.Y_train = Y_trainself.Y_test = Y_testself.num_classes = clasNum# 人脸图片目录dataset = DataSet('faces_4/')model = MyCNN()model.read_trainData(dataset)model.build_model()model.train_model()model.evaluate_model()model.save()

创作不易 觉得有帮助请点赞关注收藏~~~