Google涂鸦识别挑战项目(下)

G o o g l e 涂 鸦 识 别 挑 战 项 目 ( 下 ) Google涂鸦识别挑战项目(下) Google()

from_generator方法读取数据

import glob
import tensorflow as tf
import numpy as np
import pandas as pd
import os
import cv2
import json
import matplotlib.pyplot as plt
os.environ['CUDA_VISIBLE_DEVICES']='1'


#每一个csv.gz文件读取 -> y已经ok ->drawing 如何处理

#json

#单通道
def draw_cv2(raw_strokes, size=256, lw=6, time_color=True):
    img = np.zeros((BASE_SIZE, BASE_SIZE), np.uint8) #画板
    for t, stroke in enumerate(raw_strokes):
        for i in range(len(stroke[0]) - 1):
            color = 255 - min(t, 10) * 13 if time_color else 255
            _ = cv2.line(img, (stroke[0][i], stroke[1][i]),
                         (stroke[0][i + 1], stroke[1][i + 1]), color, lw)
    if size != BASE_SIZE:
        return cv2.resize(img, (size, size))
    else:
        return img
    
fileList = glob.glob("./shuffle_data_gzip/*.csv.gz") 
a =json.loads(pd.read_csv(fileList[0]).loc[0,'drawing'])
BASE_SIZE=256
plt.imshow(draw_cv2(a,time_color=False))
plt.show()

在这里插入图片描述

#RGB图片
def draw_cv2(raw_strokes, size=256, lw=6):
    img = np.zeros((BASE_SIZE, BASE_SIZE, 3), np.uint8)
    for t, stroke in enumerate(raw_strokes):
        points_count = len(stroke[0]) - 1
        grad = 255//points_count
        for i in range(len(stroke[0]) - 1):
            _ = cv2.line(img, (stroke[0][i], stroke[1][i]), (stroke[0][i + 1], stroke[1][i + 1]), (255, 255 - min(t,10)*13, max(255 - grad*i, 20)), lw)
    if size != BASE_SIZE:
        img = cv2.resize(img, (size, size))
    return img#
plt.imshow(draw_cv2(a))

在这里插入图片描述

class DataLoader(object):
    def __init__(self, resize_height=64, resize_width=64, batch_size=512, fileList=None, size=256, lw=6):
        self.resize_height = resize_height #图片高
        self.resize_height = resize_height #图片宽
        self.batch_size = batch_size #batch
        self.fileList = fileList #文件数据
        self.size = size #画图时图片大小
        self.lw = lw

    def __call__(self):
        def _generator(size,lw):
            while True: #
                for filename in np.random.permutation(self.fileList):
                    df = pd.read_csv(filename)
                    df['drawing'] = df['drawing'].apply(json.loads)
                    x = np.zeros((len(df), size, size))
                    for i, raw_strokes in enumerate(df.drawing.values):
                        x[i] = draw_cv2(raw_strokes, size=size, lw=lw)
                    x = x / 255.
                    x = x.reshape((len(df), size, size, 1)).astype(np.float32)
                    y = tf.keras.utils.to_categorical(df.y, num_classes=n_labels)
                    for x_i,y_i in zip(x,y):
                        yield (x_i,y_i)

        dataset = tf.data.Dataset.from_generator(generator=_generator,
                                                 output_types=(tf.dtypes.float32, tf.dtypes.int32),
                                                 output_shapes=((self.resize_height, self.resize_height, 1), (340, )),
                                                 args=(self.size, self.lw))
        dataset = dataset.prefetch(buffer_size=10240)
        dataset = dataset.shuffle(buffer_size=10240).batch(self.batch_size)
        return dataset    
fileList = glob.glob("./shuffle_data_gzip/*.csv.gz") 
fileList[0]

在这里插入图片描述

DP_DIR = './shuffle_data_gzip/'


BASE_SIZE = 256
n_labels = 340
np.random.seed(seed=1987)
size = 64
batchsize = 1024    
fileList = glob.glob("./shuffle_data_gzip/*.csv.gz") 
train_fileList = fileList[:-1]
val_fileList = fileList[-1:]
train_ds = DataLoader(resize_height=64, resize_width=64, batch_size=batchsize, fileList=train_fileList, size=size, lw=6)()    
val_ds = DataLoader(resize_height=64, resize_width=64, batch_size=batchsize, fileList=val_fileList, size=size, lw=6)()    
        
    

#建模

inputs = tf.keras.layers.Input(shape=(size, size, 1))
base_model = tf.keras.applications.MobileNet(input_shape=(size, size, 1), include_top=False, weights=None, classes=n_labels)
x = base_model(inputs)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(1024, activation='relu')(x)
predictions = tf.keras.layers.Dense(n_labels, activation='softmax')(x)
model = tf.keras.models.Model(inputs=inputs, outputs=predictions)
model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.002), 
              loss='categorical_crossentropy',
              metrics=[tf.keras.metrics.CategoricalCrossentropy(), 
                       tf.keras.metrics.CategoricalAccuracy(), 
                       tf.keras.metrics.TopKCategoricalAccuracy(k=3,name='top_3_categorical_accuracy')])
# base_model = tf.keras.applications.MobileNet(input_shape=(size, size, 1), include_top=False, weights=None, classes=n_labels)
# x = base_model.output
# x = tf.keras.layers.Flatten()(x)
# x = tf.keras.layers.Dense(1024, activation='relu')(x)
# predictions = tf.keras.layers.Dense(n_labels, activation='softmax')(x)
# model = tf.keras.models.Model(inputs=base_model.input, outputs=predictions)
model.summary()

在这里插入图片描述

callbacks = [
    tf.keras.callbacks.ReduceLROnPlateau(monitor='val_top_3_categorical_accuracy', factor=0.75, patience=3, min_delta=0.001,
                          mode='max', min_lr=1e-5, verbose=1),
    tf.keras.callbacks.ModelCheckpoint('model_all.h5', monitor='val_top_3_categorical_accuracy', mode='max', save_best_only=True,
                    save_weights_only=True),
]

model.fit(
    train_ds, epochs=50, verbose=1,steps_per_epoch=48500, #一个epoch用多少个step,为了快速迭代:可以设置1000。
    validation_data = val_ds,
    validation_steps = 400,
    callbacks = callbacks
)



在这里插入图片描述

def apk(actual, predicted, k=3):
    """
    Source: https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/average_precision.py
    """
    if len(predicted) > k:
        predicted = predicted[:k]
    score = 0.0
    num_hits = 0.0
    for i, p in enumerate(predicted):
        if p in actual and p not in predicted[:i]:
            num_hits += 1.0
            score += num_hits / (i + 1.0)
    if not actual:
        return 0.0
    return score / min(len(actual), k)

def mapk(actual, predicted, k=3):
    """
    Source: https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/average_precision.py
    """
    return np.mean([apk(a, p, k) for a, p in zip(actual, predicted)])

test_ds = 
model.predict()

textlinedatset

import glob
import tensorflow as tf
import numpy as np
import pandas as pd
import os
import cv2
import json
import glob
import matplotlib.pyplot as plt
os.environ['CUDA_VISIBLE_DEVICES']='2'

DP_DIR = './shuffle_data_gzip/'


BASE_SIZE = 256
NCSVS = 100
NCATS = 340
np.random.seed(seed=1987)



STEPS = 800
EPOCHS = 16
size = 64
batchsize = 680
NCATS =340
fileList = glob.glob("./shuffle_data_gzip/*.csv.gz")     
fileList[1]

在这里插入图片描述

# pd.read_csv('./shuffle_data_gzip/train_k52.csv.gz',sep=',').shape
def parse_csv(line):
    column_default = [tf.constant("0",dtype=tf.string), #drawing
                      tf.constant(0,dtype=tf.int32)] #label
    columns = tf.io.decode_csv(line, column_default, select_cols=[1,5])
    features = columns[0]
    label = columns[1]
    return features, label


def draw_cv2(raw_strokes, size=64, lw=6):
    raw_strokes = eval(raw_strokes.numpy())
    img = np.zeros((256, 256), np.uint8)
    for stroke in raw_strokes:
        for i in range(len(stroke[0]) - 1):
            _ = cv2.line(img, (stroke[0][i], stroke[1][i]), (stroke[0][i + 1], stroke[1][i + 1]), 255, lw)
    return cv2.resize(img, (size, size)) 
def tf_draw_cv2(image, label):
    [image] = tf.py_function(draw_cv2, [image], [tf.float32])
    image = tf.reshape(image,(64,64,1))
    label = tf.one_hot(label,depth=NCATS)
    image.set_shape((64,64,1))
    label.set_shape((340,))
    return image, label
train_ds = tf.data.Dataset.from_tensor_slices(fileList[:-1])
train_ds = train_ds.interleave(lambda x:
    tf.data.TextLineDataset(x,compression_type='GZIP').skip(1).map(parse_csv,num_parallel_calls=tf.data.experimental.AUTOTUNE),
    cycle_length=4, block_length=16,num_parallel_calls=tf.data.experimental.AUTOTUNE)

#drawing处理
train_ds = train_ds.map(tf_draw_cv2,num_parallel_calls=tf.data.experimental.AUTOTUNE)
# tensor 



train_ds = train_ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE).shuffle(3000).batch(1024)
# train_ds = tf.data.TextLineDataset(fileList[5],compression_type='GZIP').skip(1).map(parse_csv)  
# train_ds = train_ds.map(tf_draw_cv2)
# train_ds = train_ds.shuffle(3000).batch(1024)


# import time
# import tqdm
# for file in fileList:
#     try:
#         train_ds = tf.data.TextLineDataset(file,compression_type='GZIP').skip(1).map(parse_csv)  
#         train_ds = train_ds.map(tf_draw_cv2,num_parallel_calls=tf.data.experimental.AUTOTUNE)
#         train_ds = train_ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE).shuffle(3000).batch(10240)

#         for image,label in tqdm.tqdm(train_ds):
#             time.sleep(0.0000000000001)
#     except:
#         print(file)
# def parse_csv(line):
#     column_default = [tf.constant("0",dtype=tf.string),
#                       tf.constant("0",dtype=tf.string),
#                       tf.constant("0",dtype=tf.string),
#                       tf.constant("0",dtype=tf.string),
#                       tf.constant("0",dtype=tf.string),
#                       tf.constant(0,dtype=tf.int32),
#                       tf.constant(0,dtype=tf.int32)]
#     columns = tf.io.decode_csv(line, column_default)
#     label = columns[-2] 
#     features = columns[1]
#     return features, label


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值