win10 x64搭建tensorflow-gpu开发环境

我的显卡是两块丽台 RTX 4000, 

准备软件资源如下:

pycharm-professional-2020.2.2
Anaconda3-5.3.1-Windows-x86_64
CUDA 10.1.243
cudnn 10.1
tensorflow-gpu 2.3.0

步骤:

1,安装Anaconda,建库,配Windows环境变量;

2,安装Pycharm 安装社区版或企业版,删除探测文件可无限使用
文件地址:C:\Users\jerry\AppData\Roaming\JetBrains\PyCharm2020.2\eval

3,配置pycharm的所需的tensorflow的环境
pycharm->file->settings->Project XXX ->Python Interpreter->Python Interpreter Add
选择anaconda创建的,如:C:\Users\jerry\AppData\Local\conda\conda\envs\py37\python.exe

4,安装cmake(有些python模块是编译安装的,需要cmake)

5,安装VS 2019 社区版,仅勾选C++基础库即可(有些python模块是编译安装的,需要C++编译器)

4,安装CUDA 10.1,在线安装(已有VS的话去掉VS选项)

5,下载cudnn 8.0.3.33,CUDA安装完成后解压到CUDA根目录

6,pip或conda安装tensorflow-gpu 2.3.0

7,pip或conda安装dlib(人脸识别库,可以不装)

8,pip或conda安装face_recognition(人脸识别库,可以不装)

试验:

案例1,猫狗大战(准备数据、先训练,再载入模型预测)

# -*- coding:utf-8 -*-
# @Time : 2020/10/11 15:41
# @Author: jerry
# @File : image_classification.py
import tensorflow as tf
import numpy as np
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator, load_img
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Activation, BatchNormalization
from matplotlib import pyplot as plt
from tensorflow.python.keras.callbacks import EarlyStopping, ReduceLROnPlateau
import os

def train_model_ex(train_input_dir,IMAGE_WIDTH,IMAGE_HEIGHT,IMAGE_CHANNELS,batch_size,saved_model_path,FAST_RUN = False):
    df,train_df, validate_df,total_train,total_validate = prepare_data_ex(train_input_dir,IMAGE_WIDTH, IMAGE_HEIGHT,batch_size)
    train_datagen, train_generator = traning_generator_ex(train_df, train_input_dir, IMAGE_WIDTH, IMAGE_HEIGHT,batch_size)
    validation_datagen, validation_generator = validation_generator_ex(validate_df,train_input_dir,IMAGE_WIDTH,IMAGE_HEIGHT,batch_size)

    model = build_model(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS)

    earlystop = EarlyStopping(patience=10)
    learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy',
                                                patience=2,
                                                verbose=1,
                                                factor=0.5,
                                                min_lr=0.00001)
    callbacks = [earlystop, learning_rate_reduction]

    # See how our generator work
    example_df = train_df.sample(n=1).reset_index(drop=True)
    example_generator = train_datagen.flow_from_dataframe(
        example_df,
        train_input_dir,
        x_col='filename',
        y_col='category',
        target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
        class_mode='categorical'
    )
    plt.figure(figsize=(12, 12))
    for i in range(0, 15):
        plt.subplot(5, 3, i + 1)
        for X_batch, Y_batch in example_generator:
            image = X_batch[0]
            plt.imshow(image)
            break
    plt.tight_layout()
    plt.show()

    fit_ex(model,FAST_RUN,train_generator,total_train,validation_generator,total_validate,batch_size,callbacks,saved_model_path)

def prepare_data_ex(train_input_dir,IMAGE_WIDTH, IMAGE_HEIGHT,batch_size):
    # Prepare Traning Data
    filenames = os.listdir(train_input_dir)
    categories = []
    for filename in filenames:
        category = filename.split('.')[0]
        if category == 'dog':
            categories.append(1)
        else:
            categories.append(0)

    df = pd.DataFrame({
        'filename': filenames,
        'category': categories
    })
    df.head()
    df.tail()

    # Prepare data
    df["category"] = df["category"].replace({0: 'cat', 1: 'dog'})
    train_df, validate_df = train_test_split(df, test_size=0.20, random_state=42)
    train_df = train_df.reset_index(drop=True)
    validate_df = validate_df.reset_index(drop=True)
    train_df['category'].value_counts().plot.bar()
    validate_df['category'].value_counts().plot.bar()
    total_train = train_df.shape[0]
    total_validate = validate_df.shape[0]

    return df,train_df, validate_df,total_train,total_validate

def traning_generator_ex(train_df,train_input_dir,IMAGE_WIDTH,IMAGE_HEIGHT,batch_size):
    # Traning Generator
    train_datagen = ImageDataGenerator(
        rotation_range=15,
        rescale=1. / 255,
        shear_range=0.1,
        zoom_range=0.2,
        horizontal_flip=True,
        width_shift_range=0.1,
        height_shift_range=0.1
    )

    train_generator = train_datagen.flow_from_dataframe(
        train_df,
        train_input_dir,
        x_col='filename',
        y_col='category',
        target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
        class_mode='categorical',
        batch_size=batch_size
    )

    return train_datagen,train_generator

def validation_generator_ex(validate_df,validate_input_dir,IMAGE_WIDTH,IMAGE_HEIGHT,batch_size):
    # Validation Generator
    validation_datagen = ImageDataGenerator(rescale=1. / 255)
    validation_generator = validation_datagen.flow_from_dataframe(
        validate_df,
        validate_input_dir,
        x_col='filename',
        y_col='category',
        target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
        class_mode='categorical',
        batch_size=batch_size
    )

    return validation_datagen,validation_generator

def build_model(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS):
    # Build Model
    model = Sequential()
    model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS)))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(512, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense(2, activation='softmax'))  # 2 because we have cat and dog classes
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    model.summary()
    return model

def fit_ex(model,FAST_RUN,train_generator,total_train,validation_generator,total_validate,batch_size,callbacks,saved_model_path):
    # Fit Model
    epochs = 1 if FAST_RUN else 100
    history = model.fit_generator(
        train_generator,
        epochs=epochs,
        validation_data=validation_generator,
        validation_steps=total_validate // batch_size,
        steps_per_epoch=total_train // batch_size,
        callbacks=callbacks
    )
    #model.save_weights(saved_model_path)
    model.save(saved_model_path)
    virtualize_training(history,epochs)

def virtualize_training(history,epochs):
    # Virtualize Training
    fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 12))
    ax1.plot(history.history['loss'], color='b', label="Training loss")
    ax1.plot(history.history['val_loss'], color='r', label="validation loss")
    ax1.set_xticks(np.arange(1, epochs, 1))
    ax1.set_yticks(np.arange(0, 1, 0.1))

    ax2.plot(history.history['accuracy'], color='b', label="Training accuracy")
    ax2.plot(history.history['val_accuracy'], color='r', label="Validation accuracy")
    ax2.set_xticks(np.arange(1, epochs, 1))

    legend = plt.legend(loc='best', shadow=True)
    plt.tight_layout()
    plt.show()

def testing_ex(test_input_dir,IMAGE_WIDTH,IMAGE_HEIGHT,batch_size):
    # Prepare Testing Data
    test_filenames = os.listdir(test_input_dir)
    test_df = pd.DataFrame({
        'filename': test_filenames
    })
    nb_samples = test_df.shape[0]

    # Create Testing Generator
    test_gen = ImageDataGenerator(rescale=1. / 255)
    test_generator = test_gen.flow_from_dataframe(
        test_df,
        test_input_dir,
        x_col='filename',
        y_col=None,
        class_mode=None,
        target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
        batch_size=batch_size,
        shuffle=False
    )
    return test_df,nb_samples,test_generator

def predict_ex(model_path, predict_input_dir, output_dir, IMAGE_WIDTH, IMAGE_HEIGHT, batch_size):
    test_df, nb_samples, test_generator = testing_ex(predict_input_dir, IMAGE_WIDTH, IMAGE_HEIGHT, batch_size)

    model = tf.keras.models.load_model(model_path)
    # 检查其架构
    model.summary()
    # Predict
    predict = model.predict_generator(test_generator, steps=np.ceil(nb_samples / batch_size))
    test_df['category'] = np.argmax(predict, axis=-1)

    # Submission
    submission_df = test_df.copy()
    submission_df['id'] = submission_df['filename'].str.split('.').str[0]
    submission_df['label'] = submission_df['category']
    submission_df.drop(['filename', 'category'], axis=1, inplace=True)
    submission_df.to_csv(output_dir + 'submission.csv', index=False)

def test(model_path,predict_image_path, IMAGE_WIDTH, IMAGE_HEIGHT):
    model = tf.keras.models.load_model(model_path)
    # Predict
    img = tf.keras.preprocessing.image.load_img(
        predict_image_path, target_size=(IMAGE_HEIGHT, IMAGE_WIDTH)
    )
    img_array = tf.keras.preprocessing.image.img_to_array(img)
    img_array = tf.expand_dims(img_array, 0)  # Create a batch

    predictions = model.predict(img_array)
    score = tf.nn.softmax(predictions[0])
    print(
        "This image most likely belongs to {} with a {:.2f} percent confidence."
            .format(np.argmax(score), 100 * np.max(score))
    )

def evaluate_ex(model_path, input_dir, IMAGE_HEIGHT, IMAGE_WIDTH, batch_size):
    model = tf.keras.models.load_model(model_path)
    model.summary()

    df, train_df, validate_df, total_train, total_validate = prepare_data_ex(input_dir, IMAGE_WIDTH, IMAGE_HEIGHT,
                                                                             batch_size)
    train_datagen, train_generator = traning_generator_ex(train_df, input_dir, IMAGE_WIDTH, IMAGE_HEIGHT,
                                                          batch_size)
    validation_datagen, validation_generator = validation_generator_ex(validate_df, input_dir, IMAGE_WIDTH,
                                                                       IMAGE_HEIGHT, batch_size)

    test_loss, test_acc = model.evaluate(validate_df, verbose=2)
    print('\nTest accuracy:', test_acc)

def main_ex():
    FAST_RUN = False
    train_input_dir = "./dataset/cat_and_dog/input/train/"
    model_path = "./dataset/cat_and_dog/models/saved_model/model.h5"
    test_input_dir = "./dataset/cat_and_dog/input/test1/"
    output_dir = "./dataset/cat_and_dog/output/"
    predict_image_path = "./dataset/cat_and_dog/input/test1/10.jpg"

    IMAGE_WIDTH = 128
    IMAGE_HEIGHT = 128
    IMAGE_CHANNELS = 3
    batch_size = 16

    #train_model_ex(train_input_dir,IMAGE_WIDTH,IMAGE_HEIGHT,IMAGE_CHANNELS,batch_size,model_path,FAST_RUN)

    #evaluate_ex(model_path, train_input_dir, IMAGE_HEIGHT, IMAGE_WIDTH, batch_size)

    predict_ex(model_path, test_input_dir, output_dir, IMAGE_WIDTH, IMAGE_HEIGHT, batch_size)

    #test(model_path,predict_image_path,IMAGE_WIDTH, IMAGE_HEIGHT)

main_ex()

案例2,鲜花分类(准备数据、先训练,再载入模型预测)

import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import tensorflow as tf
import pathlib
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
import matplotlib.pyplot as plt
from tensorflow.python.keras.callbacks import EarlyStopping, ReduceLROnPlateau


def train_model_ex(model_path, input_dir, img_height, img_width, batch_size,FAST_RUN):
    train_ds,val_ds,data_augmentation = prepare_data_ex(input_dir, img_height, img_width, batch_size)
    model = bulid_model(data_augmentation,img_height, img_width)
    history,epochs = fit_ex(model,model_path,train_ds,val_ds,FAST_RUN)
    virtualize_training(history, epochs)

def prepare_data_ex(input_dir, img_height, img_width, batch_size):
    input_dir = pathlib.Path(input_dir)
    image_count = len(list(input_dir.glob('*/*.jpg')))
    print(image_count)

    train_ds = tf.keras.preprocessing.image_dataset_from_directory(
        input_dir,
        validation_split=0.2,
        subset="training",
        seed=123,
        image_size=(img_height, img_width),
        batch_size=batch_size)

    val_ds = tf.keras.preprocessing.image_dataset_from_directory(
        input_dir,
        validation_split=0.2,
        subset="validation",
        seed=123,
        image_size=(img_height, img_width),
        batch_size=batch_size)

    class_names = train_ds.class_names
    print(class_names)

    # 可视化数据
    # plt.figure(figsize=(10, 10))
    # for images, labels in train_ds.take(1):
    #     for i in range(9):
    #         ax = plt.subplot(3, 3, i + 1)
    #         plt.imshow(images[i].numpy().astype("uint8"))
    #         plt.title(class_names[labels[i]])
    #         plt.axis("off")
    # plt.show()

    for image_batch, labels_batch in train_ds:
        print(image_batch.shape)
        print(labels_batch.shape)
        break

    # 配置数据集以提高性能
    AUTOTUNE = tf.data.experimental.AUTOTUNE
    train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
    val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)

    # 标准化数据
    normalization_layer = layers.experimental.preprocessing.Rescaling(1. / 255)
    normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
    image_batch, labels_batch = next(iter(normalized_ds))
    first_image = image_batch[0]
    # Notice the pixels values are now in `[0,1]`.
    print(np.min(first_image), np.max(first_image))

    data_augmentation = keras.Sequential(
        [
            layers.experimental.preprocessing.RandomFlip("horizontal",
                                                         input_shape=(img_height,
                                                                      img_width,
                                                                      3)),
            layers.experimental.preprocessing.RandomRotation(0.1),
            layers.experimental.preprocessing.RandomZoom(0.1),
        ]
    )
    plt.figure(figsize=(10, 10))
    for images, _ in train_ds.take(1):
        for i in range(9):
            augmented_images = data_augmentation(images)
            ax = plt.subplot(3, 3, i + 1)
            plt.imshow(augmented_images[0].numpy().astype("uint8"))
            plt.axis("off")
    plt.show()

    return train_ds,val_ds,data_augmentation

def bulid_model(data_augmentation,img_height, img_width):
    # 创建模型
    num_classes = 5

    model = Sequential([
        data_augmentation,
        layers.experimental.preprocessing.Rescaling(1. / 255),
        layers.Conv2D(16, 3, padding='same', activation='relu'),
        layers.MaxPooling2D(),
        layers.Conv2D(32, 3, padding='same', activation='relu'),
        layers.MaxPooling2D(),
        layers.Conv2D(64, 3, padding='same', activation='relu'),
        layers.MaxPooling2D(),
        layers.Dropout(0.2),
        layers.Flatten(),
        layers.Dense(128, activation='relu'),
        layers.Dense(num_classes)
    ])

    # 编译模型
    model.compile(optimizer='adam',
                  loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
                  metrics=['accuracy'])

    # 查看模型信息
    model.summary()
    return model

def fit_ex(model,model_path,train_ds,val_ds,FAST_RUN):
    earlystop = EarlyStopping(patience=10)
    learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy',
                                                patience=2,
                                                verbose=2,
                                                factor=0.5,
                                                min_lr=0.00001)
    callbacks = [earlystop, learning_rate_reduction]

    # 训练模型
    # Fit Model
    epochs = 1 if FAST_RUN else 100
    history = model.fit_generator(
        train_ds,
        validation_data=val_ds,
        epochs=epochs,
        callbacks=callbacks
    )

    model.save(model_path)

    return history,epochs

def virtualize_training(history,epochs):
    # 可视化培训结果
    fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 12))
    ax1.plot(history.history['loss'], color='b', label="Training loss")
    ax1.plot(history.history['val_loss'], color='r', label="validation loss")
    ax1.set_xticks(np.arange(1, epochs, 1))
    ax1.set_yticks(np.arange(0, 1, 0.1))

    ax2.plot(history.history['accuracy'], color='b', label="Training accuracy")
    ax2.plot(history.history['val_accuracy'], color='r', label="Validation accuracy")
    ax2.set_xticks(np.arange(1, epochs, 1))

    legend = plt.legend(loc='best', shadow=True)
    plt.tight_layout()
    plt.show()
    print(legend)

def predict_ex(model_path, predict_image_path, img_width, img_height):
    model = tf.keras.models.load_model(model_path)
    model.summary()

    img = keras.preprocessing.image.load_img(
        predict_image_path, target_size=(img_height, img_width)
    )
    img_array = keras.preprocessing.image.img_to_array(img)
    img_array = tf.expand_dims(img_array, 0)  # Create a batch

    predictions = model.predict(img_array)
    score = tf.nn.softmax(predictions[0])

    print(
        "This image most likely belongs to {} with a {:.2f} percent confidence."
            .format(np.argmax(score), 100 * np.max(score))
    )

def evaluate_ex(model_path, input_dir, img_height, img_width, batch_size):
    model = tf.keras.models.load_model(model_path)
    model.summary()
    train_ds, val_ds, data_augmentation = prepare_data_ex(input_dir, img_height, img_width, batch_size)
    test_loss, test_acc = model.evaluate(train_ds, verbose=2)
    print('\nTest accuracy:', test_acc)

def main_ex():
    FAST_RUN = False
    batch_size = 15
    img_height = 180
    img_width = 180

    model_path = "./dataset/flower_photos/models/saved_model/model.h5"
    input_dir = "./dataset/flower_photos/input/"
    predict_image_path = "./dataset/flower_photos/input/roses/319298955_0c72bd36bf.jpg"
    # train_model_ex(model_path, input_dir, img_height, img_width, batch_size, FAST_RUN)

    evaluate_ex(model_path, input_dir, img_height, img_width, batch_size)

    # 0 daisy 雏菊、1 dandelion 蒲公英、2 roses 玫瑰、3 sunflowers 向日葵、4 tulips 郁金香
    # predict_ex(model_path, predict_image_path, img_width, img_height)

main_ex()

案例3,dlib人脸识别

#coding=utf-8
#图片检测 - Dlib版本
import cv2
import dlib

path = "img/tly0.jpg"
img = cv2.imread(path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

#人脸分类器
detector = dlib.get_frontal_face_detector()
# 获取人脸检测器
predictor = dlib.shape_predictor(
    "data/shape_predictor_68_face_landmarks.dat"
)

dets = detector(gray, 1)
for face in dets:
    # 在图片中标注人脸,并显示
    # left = face.left()
    # top = face.top()
    # right = face.right()
    # bottom = face.bottom()
    # cv2.rectangle(img, (left, top), (right, bottom), (0, 255, 0), 2)
    # cv2.imshow("image", img)

    shape = predictor(img, face)  # 寻找人脸的68个标定点
    # 遍历所有点,打印出其坐标,并圈出来
    for pt in shape.parts():
        pt_pos = (pt.x, pt.y)
        cv2.circle(img, pt_pos, 1, (0, 255, 0), 2)
    cv2.imshow("image", img)

cv2.waitKey(0)
cv2.destroyAllWindows()

案例4,opencv人脸识别

#coding=utf-8
#图片检测 - OpenCV版本
import cv2
import datetime
import time

filepath = "img/duoren.jpg"
# OpenCV人脸识别分类器
classifier = cv2.CascadeClassifier(
    "data\haarcascades\haarcascade_frontalface_default.xml"
)

# 程序开始时间
startTime = datetime.datetime.now()

img = cv2.imread(filepath)  # 读取图片
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # 转换灰色
color = (0, 255, 0)  # 定义绘制颜色
# 调用识别人脸
faceRects = classifier.detectMultiScale(
    gray, scaleFactor=1.2, minNeighbors=3, minSize=(32, 32))
if len(faceRects):  # 大于0则检测到人脸
    for faceRect in faceRects:  # 单独框出每一张人脸
        x, y, w, h = faceRect
        # 框出人脸
        cv2.rectangle(img, (x, y), (x + h, y + w), color, 1)
        # 左眼 (不对
        # cv2.circle(img, (x + w // 4, y + h // 4 + 30), min(w // 8, h // 8),color)
        #右眼 (不对)
        # cv2.circle(img, (x + 3 * w // 4, y + h // 4 + 30), min(w // 8, h // 8), color)
        #嘴巴 (不对
        # cv2.rectangle(img, (x + 3 * w // 8, y + 3 * h // 4),(x + 5 * w // 8, y + 7 * h // 8), color)

# 程序结束时间
endTime = datetime.datetime.now()
print((endTime - startTime))
cv2.imshow("image", img)  # 显示图像
cv2.waitKey(0)
cv2.destroyAllWindows()

案例5,opecv面部表情识别

#coding=utf-8
#表情识别

import cv2
from keras.models import load_model
import numpy as np
import chineseText
import datetime

startTime = datetime.datetime.now()
emotion_classifier = load_model(
    'classifier/emotion_models/simple_CNN.530-0.65.hdf5')
endTime = datetime.datetime.now()
print(endTime - startTime)

emotion_labels = {
    0: '生气',
    1: '厌恶',
    2: '恐惧',
    3: '开心',
    4: '难过',
    5: '惊喜',
    6: '平静'
}

img = cv2.imread("img/duoren.jpg")
face_classifier = cv2.CascadeClassifier(
    "data\haarcascades\haarcascade_frontalface_default.xml"
)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(
    gray, scaleFactor=1.2, minNeighbors=3, minSize=(40, 40))
color = (255, 0, 0)

for (x, y, w, h) in faces:
    gray_face = gray[(y):(y + h), (x):(x + w)]
    gray_face = cv2.resize(gray_face, (48, 48))
    gray_face = gray_face / 255.0
    gray_face = np.expand_dims(gray_face, 0)
    gray_face = np.expand_dims(gray_face, -1)
    emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
    emotion = emotion_labels[emotion_label_arg]
    cv2.rectangle(img, (x + 10, y + 10), (x + h - 10, y + w - 10),
                  (255, 255, 255), 2)
    img = chineseText.cv2ImgAddText(img, emotion, x + h * 0.3, y, color, 20)

cv2.imshow("Image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()

案例6,face_recognition 自动化妆

#coding=utf-8
#数字化妆类
import face_recognition
from PIL import Image, ImageDraw

#加载图片到numpy array
image = face_recognition.load_image_file("img/tly0.jpg")

#标识脸部特征
face_landmarks_list = face_recognition.face_landmarks(image)

for face_landmarks in face_landmarks_list:
    pil_image = Image.fromarray(image)
    d = ImageDraw.Draw(pil_image, 'RGBA')

    # 绘制眉毛
    d.polygon(face_landmarks['left_eyebrow'], fill=(68, 54, 39, 128))
    d.polygon(face_landmarks['right_eyebrow'], fill=(68, 54, 39, 128))
    d.line(face_landmarks['left_eyebrow'], fill=(68, 54, 39, 150), width=2)
    d.line(face_landmarks['right_eyebrow'], fill=(68, 54, 39, 150), width=2)

    # 绘制嘴唇
    d.polygon(face_landmarks['top_lip'], fill=(120, 0, 0, 128))
    d.polygon(face_landmarks['bottom_lip'], fill=(120, 0, 0, 128))
    d.line(face_landmarks['top_lip'], fill=(120, 0, 0, 64), width=2)
    d.line(face_landmarks['bottom_lip'], fill=(120, 0, 0, 64), width=2)

    # 绘制眼睛
    d.polygon(face_landmarks['left_eye'], fill=(255, 255, 255, 30))
    d.polygon(face_landmarks['right_eye'], fill=(255, 255, 255, 30))

    # 绘制眼线
    d.line(
        face_landmarks['left_eye'] + [face_landmarks['left_eye'][0]],
        fill=(0, 0, 0, 110),
        width=6)
    d.line(
        face_landmarks['right_eye'] + [face_landmarks['right_eye'][0]],
        fill=(0, 0, 0, 110),
        width=6)

    pil_image.show()

案例7,face_recognition人脸识别(用opencv拿摄像头数据,要有摄像头)

#coding=utf-8
#人脸识别类 - 使用face_recognition模块
import cv2
import face_recognition
import os

path = "img/face_recognition"  # 模型数据图片目录
cap = cv2.VideoCapture(0)
total_image_name = []
total_face_encoding = []
for fn in os.listdir(path):  #fn 表示的是文件名q
    print(path + "/" + fn)
    total_face_encoding.append(
        face_recognition.face_encodings(
            face_recognition.load_image_file(path + "/" + fn))[0])
    fn = fn[:(len(fn) - 4)]  #截取图片名(这里应该把images文件中的图片名命名为为人物名)
    total_image_name.append(fn)  #图片名字列表
while (1):
    ret, frame = cap.read()
    # 发现在视频帧所有的脸和face_enqcodings
    face_locations = face_recognition.face_locations(frame)
    face_encodings = face_recognition.face_encodings(frame, face_locations)
    # 在这个视频帧中循环遍历每个人脸
    for (top, right, bottom, left), face_encoding in zip(
            face_locations, face_encodings):
        # 看看面部是否与已知人脸相匹配。
        for i, v in enumerate(total_face_encoding):
            match = face_recognition.compare_faces(
                [v], face_encoding, tolerance=0.5)
            name = "Unknown"
            if match[0]:
                name = total_image_name[i]
                break
        # 画出一个框,框住脸
        cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
        # 画出一个带名字的标签,放在框下
        cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255),
                      cv2.FILLED)
        font = cv2.FONT_HERSHEY_DUPLEX
        cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0,
                    (255, 255, 255), 1)
    # 显示结果图像
    cv2.imshow('Video', frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()

案例8,face_recognition勾画面部轮廓

#coding=utf-8
#绘制面部轮廓
import face_recognition
from PIL import Image, ImageDraw

# 将图片文件加载到numpy 数组中
image = face_recognition.load_image_file("img/tly0.jpg")

#查找图像中所有面部的所有面部特征
face_landmarks_list = face_recognition.face_landmarks(image)

for face_landmarks in face_landmarks_list:
    facial_features = [
        'chin',  # 下巴
        'left_eyebrow',  # 左眉毛
        'right_eyebrow',  # 右眉毛
        'nose_bridge',  # 鼻樑
        'nose_tip',  # 鼻尖
        'left_eye',  # 左眼
        'right_eye',  # 右眼
        'top_lip',  # 上嘴唇
        'bottom_lip'  # 下嘴唇
    ]
    pil_image = Image.fromarray(image)
    d = ImageDraw.Draw(pil_image)
    for facial_feature in facial_features:
        d.line(face_landmarks[facial_feature], fill=(0, 255, 0), width=2)
    pil_image.show()

案例9,OCR 文本识别(装个Tesseract-OCR,记得勾选中文支持,修改python pytesseract模块里的环境)

#coding=utf-8
#文字识别类
from PIL import Image
import pytesseract
import cv2

path = "img/text-img.png"

text = pytesseract.image_to_string(Image.open(path),lang='chi_sim')
print(text)

img = cv2.imread(path)
cv2.imshow("Image", img)

cv2.waitKey(0)
cv2.destroyAllWindows()

 

  • 1
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 3
    评论
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

外码斯迪

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值