#
import numpy as np
import tensorflow as tf
# 模型参数
img_row, img_cols = 28, 28
num_class = 10
bath_size = 128
epoch = 10
# 定义卷积层的数量
conv1_filters = 32
conv2_filters = 64
kernel_size = (3, 3)
# 池化层数量
pool_size = (2, 2)
dro_ration = 0.25
# 加载MINIST数据
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
import cv2
img = cv2.imread("./test3.png")
def face_demo():
gaay = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
face_img = cv2.CascadeClassifier(r'F:\w11_opencv\venv\Lib\site-packages\cv2\data\haarcascade_frontalface_alt.xml')
eye_img = cv2.CascadeClassifier(r'F:\w11_opencv\venv\Lib\site-packages\cv2\data\haarcascade_eye.xml')
face = face_img.detectMultiScale(gaay,scaleFactor=1.1,minNeighbors=5,minSize=(30,30))
face_eye = eye_img.detectMultiScale(gaay,scaleFactor=1.1,minNeighbors=6,maxSize=(50,50))
for x,y,w,h in face:
cv2.rectangle(img,(x,y),(x+w,y+h),color=(0,0,255),thickness=10)
for x,y,w,h in face_eye:
cv2.rectangle(img,(x,y),(x+w,y+h),color=(0,0,255),thickness=10)
cv2.imshow("result", img)
face_demo()
while True:
if cv2.waitKey(0)==ord('q'):
break
cv2.destroyAllWindows()
import cv2
import face_recognition
import numpy as np
image = face_recognition.load_image_file('F:/88.jpg')
image = cv2.resize(image, (1000, 800))
face_landmarks_list = face_recognition.face_landmarks(image, face_locations=None, model='large')
print("循环遍历")
for face_landmarks in face_landmarks_list:
face_s = [
'chin',
'left_eyebrow',
'right_eyebrow',
'nose_bridge',
'nose_tip',
'left_eye',
'right_eye',
'top_lip',
'bottom_lip'
]
for landmarks in face_s:
points = np.array(face_landmarks[landmarks], np.int32)
points = points.reshape(-1, 1, 2)
cv2.polylines(image, [points], False, (0, 0, 200), 2)
image = image[:, :, ::-1]
cv2.imshow('image', image)
cv2.waitKey(0)
import tensorflow as tf
import input_data
tf.__version__
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
print('训练数据集', mnist.train.num_examples)
print("验证机", mnist.validation.num_examples)
print("测试数据", mnist.test.num_examples)
print("训练数据集的维度", mnist.train.images.shape)
print("数据集的标签", mnist.train.labels.shape)
import matplotlib.pyplot as plt
def plot_images(image):
plt.imshow(image.reshape(28, 28), cmap='binary')
plt.show()
plot_images(mnist.train.images[10])
print(mnist.train.labels[10])
for i in range(1, 10001):
index = 0
for n in mnist.train.labels[i]:
if n == 1:
print(str(i) + ":" + str(index))
index = index + 1
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train / 255.0
x_test = x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
optimizer = tf.keras.optimizers.Adam()
model.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test, verbose=2)
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import models, layers
img_row, img_cols = 28, 28
num_class = 10
batch_size = 128
epochs = 10
conv1_filters = 32
conv2_filters = 64
kernel_size = (3, 3)
pool_size = (2, 2)
dro_ration = 0.25
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], img_row, img_cols, 1)
x_train = x_train.astype('float32')
x_train /= 255.0
x_test = x_test.reshape(x_test.shape[0], img_row, img_cols, 1)
x_test = x_test.astype('float32')
x_test /= 255.0
from keras.utils import np_utils
y_train = np_utils.to_categorical(y_train, num_class)
y_test = np_utils.to_categorical(y_test, num_class)
model = models.Sequential()
model.add(layers.Conv2D(conv1_filters, kernel_size, activation='relu'))
model.add(layers.MaxPooling2D(pool_size=pool_size))
model.add(layers.Dropout(dro_ration))
model.add(layers.Conv2D(conv1_filters, kernel_size, activation='relu'))
model.add(layers.MaxPooling2D(pool_size=pool_size))
model.add(layers.Dropout(dro_ration))
model.add(layers.Conv2D(conv1_filters, kernel_size, activation='relu'))
model.add(layers.MaxPooling2D(pool_size=pool_size))
model.add(layers.Dropout(dro_ration))
model.add(layers.Flatten(input_shape=(img_row, img_cols)))
model.add(layers.Dense(batch_size, activation='relu'))
model.add(layers.Dropout(dro_ration))
model.add(layers.Dense(10, activation='softmax'))
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train,
y_train,
epochs=epochs,
batch_size=batch_size,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test)
print('Test loss', score[0])
print('Test accuracy', score[1])
model.save_weights('modle_data/model')
tf.keras.models.Sequential(
[tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10)])
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import models, layers, utils
from keras.utils import np_utils
img_row, img_cols = 28, 28
num_class = 10
batch_size = 128
epochs = 10
conv1_filters = 32
conv2_filters = 64
kernel_size = (3, 3)
pool_size = (2, 2)
dro_ration = 0.25
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], img_row, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_row, img_cols, 1)
input_shape = (img_row, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train = x_train / 255.0
x_test = x_test / 255.0
y_train = np_utils.to_categorical(y_train, num_class)
y_test = np_utils.to_categorical(y_test, num_class)
model = models.Sequential()
model.add(layers.Conv2D(conv1_filters, kernel_size=kernel_size, activation='relu', input_shape=input_shape))
model.add(layers.MaxPooling2D(pool_size=pool_size))
model.add(layers.Dropout(dro_ration))
model.add(layers.Conv2D(conv2_filters, kernel_size=kernel_size, activation='relu', input_shape=input_shape))
model.add(layers.MaxPooling2D(pool_size=pool_size))
model.add(layers.Dropout(dro_ration))
model.add(layers.Flatten(input_shape=(img_row, img_cols)))
model.add(layers.Dense(batch_size, activation='relu'))
model.add(layers.Dropout(dro_ration))
model.add(layers.Dense(10, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=2)
model.save_weights('model_data/model')