利用pytorch实现的斗图机器人接口

几百年没有更新博客了,这篇博客主要是存下自己为了完成数据挖掘课程项目写的代码。

一些声明

因为这是第一次使用pytorch,没怎么上手就开始写代码了,所以贴出来的代码是我基于pytorch的官方教程,经过修改得到的代码。
教程地址:http://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html

分类代码

先是可以运行的分类代码,请把数据集和代码放在同一个目录下,事先分好训练集和交叉验证集,当然为了以后超参数的调整,鼓励你再加一个测试集。然后额外准备一个testset文件夹,里面用unknow文件夹存放实际使用时要测试的图片。当然为了节省时间,本人没有刻意把代码写的很健全,有兴趣的读者可以自行阅读修改(应该没有把QAQ)

注意:请实现在你的电脑上安装好pytorch,并且本程序默认你的电脑可以使用cuda

from __future__ import print_function, division

import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import copy
import os
from random import choice
from skimage import io

try:
    import cPickle as pickle
except ImportError:
    import pickle

import ssl
from functools import wraps

def show_image(dir):
    im = io.imread(dir.decode('utf-8'))
    io.imshow(im)

def imshow(inp, title=None):
    """Imshow for Tensor."""
    inp = inp.numpy().transpose((1, 2, 0))
    mean = np.array([0.485, 0.456, 0.406])
    std = np.array([0.229, 0.224, 0.225])
    inp = std * inp + mean
    plt.imshow(inp)
    if title is not None:
        plt.title(title)
    plt.pause(0.001)  # pause a bit so that plots are updated

def sslwrap(func):
    @wraps(func)
    def bar(*args, **kw):
        kw['ssl_version'] = ssl.PROTOCOL_TLSv1
        return func(*args, **kw)
    return bar

class Doutu(object):
    def __init__(self, model_name='2017-05-31 00:30:24', data_floder='biaoqingbao'):
        self.model = None
        self.classes = None
        model_path = None
        if model_name != None:
            model_path = os.path.join('models', model_name)
        if model_path == None or not os.path.isfile(model_path) or data_floder != 'biaoqingbao':
            self.training(data_floder)
        else:
            self.load_model(model_name)

    def training(self, data_dir):
        ssl.wrap_socket = sslwrap(ssl.wrap_socket)

        plt.ion()  # interactive mode

        ######################################################################
        # Load Data
        # ---------

        data_transforms = {
            'train': transforms.Compose([
                transforms.RandomSizedCrop(224),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ]),
            'val': transforms.Compose([
                transforms.Scale(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ]),
        }

        dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
                 for x in ['train', 'val']}
        dset_loaders = {x: torch.utils.data.DataLoader(dsets[x], batch_size=4,
                                                       shuffle=True, num_workers=4)
                        for x in ['train', 'val']}
        dset_sizes = {x: len(dsets[x]) for x in ['train', 'val']}
        dset_classes = dsets['train'].classes
        num_classes = len(dset_classes)
        print(dset_classes)

        use_gpu = torch.cuda.is_available()
        if use_gpu:
            print ('training with gpu...')
        else :
            print ('training with cpu...')

        ######################################################################
        # Visualize a few images
        # ^^^^^^^^^^^^^^^^^^^^^^
        # Let's visualize a few training images so as to understand the data
        # augmentations.

        # Get a batch of training data
        inputs, classes = next(iter(dset_loaders['train']))

        # Make a grid from batch
        out = torchvision.utils.make_grid(inputs)

        imshow(out, title=[dset_classes[x] for x in classes])

        ######################################################################
        # Training the model
        # ------------------
        #
        # Now, let's write a general function to train a model. Here, we will
        # illustrate:
        #
        # -  Scheduling the learning rate
        # -  Saving (deep copying) the best model
        #
        # In the following, parameter ``lr_scheduler(optimizer, epoch)``
        # is a function  which modifies ``optimizer`` so that the learning
        # rate is changed according to desired schedule.

        def train_model(model, criterion, optimizer, lr_scheduler, num_epochs=25):
            since = time.time()

            best_model = model
            best_acc = 0.0

            for epoch in range(num_epochs):
                print('Epoch {}/{}'.format(epoch, num_epochs - 1))
                print('-' * 10)

                # Each epoch has a training and validation phase
                for phase in ['train', 'val']:
                    if phase == 'train':
                        optimizer = lr_scheduler(optimizer, epoch)
                        model.train(True)  # Set model to training mode
                    else:
                        model.train(False)  # Set model to evaluate mode

                    running_loss = 0.0
                    running_corrects = 0

                    # Iterate over data.
                    for data in dset_loaders[phase]:
                        # get the inputs
                        inputs, labels = data

                        # wrap them in Variable
                        if use_gpu:
                            inputs, labels = Variable(inputs.cuda()), \
                                             Variable(labels.cuda())
                        else:
                            inputs, labels = Variable(inputs), Variable(labels)

                        # zero the parameter gradients
                        optimizer.zero_grad()

                        # forward
                        outputs = model(inputs)
                        _, preds = torch.max(outputs.data, 1)
                        loss = criterion(outputs, labels)
                        # print(preds)

                        # backward + optimize only if in training phase
                        if phase == 'train':
                            loss.backward()
                            optimizer.step()

                        # statistics
                        running_loss += loss.data[0]
                        running_corrects += torch.sum(preds == labels.data)

                    epoch_loss = running_loss / dset_sizes[phase]
                    epoch_acc = running_corrects / dset_sizes[phase]

                    print('{} Loss: {:.4f} Acc: {:.4f}'.format(
                        phase, epoch_loss, epoch_acc))

                    # deep copy the model
                    if phase == 'val' and epoch_acc > best_acc:
                        best_acc = epoch_acc
                        best_model = copy.deepcopy(model)

                print()

            time_elapsed = time.time() - since
            print('Training complete in {:.0f}m {:.0f}s'.format(
                time_elapsed // 60, time_elapsed % 60))
            print('Best val Acc: {:4f}'.format(best_acc))
            return best_model

        ######################################################################
        # Learning rate scheduler
        # ^^^^^^^^^^^^^^^^^^^^^^^
        # Let's create our learning rate scheduler. We will exponentially
        # decrease the learning rate once every few epochs.

        def exp_lr_scheduler(optimizer, epoch, init_lr=0.001, lr_decay_epoch=7):
            """Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs."""
            lr = init_lr * (0.1 ** (epoch // lr_decay_epoch))

            if epoch % lr_decay_epoch == 0:
                print('LR is set to {}'.format(lr))

            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

            return optimizer

        ######################################################################
        # Visualizing the model predictions
        # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
        #
        # Generic function to display predictions for a few images
        #

        def visualize_model(model, num_images=6):
            images_so_far = 0
            fig = plt.figure()

            for i, data in enumerate(dset_loaders['val']):
                inputs, labels = data
                if use_gpu:
                    inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
                else:
                    inputs, labels = Variable(inputs), Variable(labels)

                outputs = model(inputs)
                _, preds = torch.max(outputs.data, 1)

                preds = preds.int()

                for j in range(inputs.size()[0]):
                    images_so_far += 1
                    ax = plt.subplot(num_images // 2, 2, images_so_far)
                    ax.axis('off')
                    ax.set_title('predicted: {}'.format(dset_classes[preds[j][0]]))
                    imshow(inputs.cpu().data[j])

                    if images_so_far == num_images:
                        return

        ######################################################################
        # Finetuning the convnet
        # ----------------------
        #
        # Load a pretrained model and reset final fully connected layer.
        #

        model_ft = models.resnet18(pretrained=True)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)

        if use_gpu:
            model_ft = model_ft.cuda()

        criterion = nn.CrossEntropyLoss()

        # Observe that all parameters are being optimized
        optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)

        ######################################################################
        # Train and evaluate
        # ^^^^^^^^^^^^^^^^^^
        #
        # It should take around 15-25 min on CPU. On GPU though, it takes less than a
        # minute.
        #

        model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
                               num_epochs=25)

        ######################################################################
        #

        visualize_model(model_ft)

        plt.ioff()
        plt.show()

        self.model = model_ft
        self.classes = dset_classes

    def predict(self, imdir):
        testdir = os.path.join('testset', 'unknow')
        open(os.path.join(testdir, os.path.split(imdir)[1]), 'wb').write(
            open(imdir, 'rb').read())

        data_transform = transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
        dsets = datasets.ImageFolder('testset', data_transform)
        dsets_loader = torch.utils.data.DataLoader(dsets, batch_size=1, num_workers=1)

        results = []
        for data in dsets_loader:
            inputs, labels = data
            inputs = Variable(inputs.cuda())
            outputs = self.model(inputs)
            _, preds = torch.max(outputs.data, 1)
            preds = preds.int()
            for j in range(inputs.size()[0]):
                results.append(self.classes[preds[j][0]])
                # imshow(inputs.cpu().data[j])

        os.remove(os.path.join(testdir, os.path.split(imdir)[1]))
        return results

    def save_model(self):
        print ('saving model...')
        ISOTIMEFORMAT = '%Y-%m-%d %X'
        save_file = os.path.join('models', time.strftime(ISOTIMEFORMAT, time.localtime(time.time())))
        os.mknod(save_file)
        pickle.dump((self.model, self.classes), open(save_file, 'wb'))
        print ('model saved')

    def load_model(self, save_file):
        print ('loading model...')
        save_file = os.path.join('models', save_file)
        self.model, self.classes = pickle.load(open(save_file, 'rb'))
        print ('model loaded')

    def get_image(self, imdir):
        imclass = self.predict(imdir.decode('utf-8'))[0]
        imdir = os.path.join('biaoqingbao', 'train', imclass)
        return os.path.join(imdir, choice(os.listdir(imdir))).decode('utf-8')

特征抽取器代码

= =, 因为本人电脑显存太小了跑不动(不过也有可能是代码需要某种优化的策略),所以下面的程序可能有一些小小的bug(毕竟我无法成功运行此代码)。

可以加入kd-tree进行优化

from __future__ import print_function, division

import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import copy
import os
from skimage import io

try:
    import cPickle as pickle
except ImportError:
    import pickle

import ssl
from functools import wraps

def show_image(dir):
    im = io.imread(dir.decode('utf-8'))
    io.imshow(im)

def imshow(inp, title=None):
    """Imshow for Tensor."""
    inp = inp.numpy().transpose((1, 2, 0))
    mean = np.array([0.485, 0.456, 0.406])
    std = np.array([0.229, 0.224, 0.225])
    inp = std * inp + mean
    plt.imshow(inp)
    if title is not None:
        plt.title(title)
    plt.pause(0.001)  # pause a bit so that plots are updated

def sslwrap(func):
    @wraps(func)
    def bar(*args, **kw):
        kw['ssl_version'] = ssl.PROTOCOL_TLSv1
        return func(*args, **kw)
    return bar

def get_file(data_dir):
    result = []
    for sub in os.listdir(data_dir):
        now = os.path.join(data_dir, sub)
        if os.path.isfile(now):
            result.append(now)
        elif os.path.isdir(now):
            result += get_file(now)
    return result

class Doutu(object):
    def __init__(self, model_name='2017-05-31 00:30:24', data_floder='biaoqingbao'):
        self.model = None
        self.dir2feat = {}
        model_path = None
        if model_name != None:
            model_path = os.path.join('models', model_name)
        if model_path == None or not os.path.isfile(model_path) or data_floder != 'biaoqingbao':
            self.training(data_floder)
        else:
            self.load_model(model_name)

    def training(self, data_dir):
        ssl.wrap_socket = sslwrap(ssl.wrap_socket)

        plt.ion()  # interactive mode

        ######################################################################
        # Load Data
        # ---------

        data_transforms = {
            'train': transforms.Compose([
                transforms.RandomSizedCrop(224),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ]),
            'val': transforms.Compose([
                transforms.Scale(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ]),
        }

        dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
                 for x in ['train', 'val']}
        dset_loaders = {x: torch.utils.data.DataLoader(dsets[x], batch_size=4,
                                                       shuffle=True, num_workers=4)
                        for x in ['train', 'val']}
        dset_sizes = {x: len(dsets[x]) for x in ['train', 'val']}
        dset_classes = dsets['train'].classes
        num_classes = len(dset_classes)
        print(dset_classes)

        use_gpu = torch.cuda.is_available()
        if use_gpu:
            print ('training with gpu...')
        else :
            print ('training with cpu...')

        ######################################################################
        # Visualize a few images
        # ^^^^^^^^^^^^^^^^^^^^^^
        # Let's visualize a few training images so as to understand the data
        # augmentations.

        # Get a batch of training data
        inputs, classes = next(iter(dset_loaders['train']))

        # Make a grid from batch
        out = torchvision.utils.make_grid(inputs)

        imshow(out, title=[dset_classes[x] for x in classes])

        ######################################################################
        # Training the model
        # ------------------
        #
        # Now, let's write a general function to train a model. Here, we will
        # illustrate:
        #
        # -  Scheduling the learning rate
        # -  Saving (deep copying) the best model
        #
        # In the following, parameter ``lr_scheduler(optimizer, epoch)``
        # is a function  which modifies ``optimizer`` so that the learning
        # rate is changed according to desired schedule.

        def train_model(model, criterion, optimizer, lr_scheduler, num_epochs=25):
            since = time.time()

            best_model = model
            best_acc = 0.0

            for epoch in range(num_epochs):
                print('Epoch {}/{}'.format(epoch, num_epochs - 1))
                print('-' * 10)

                # Each epoch has a training and validation phase
                for phase in ['train', 'val']:
                    if phase == 'train':
                        optimizer = lr_scheduler(optimizer, epoch)
                        model.train(True)  # Set model to training mode
                    else:
                        model.train(False)  # Set model to evaluate mode

                    running_loss = 0.0
                    running_corrects = 0

                    # Iterate over data.
                    for data in dset_loaders[phase]:
                        # get the inputs
                        inputs, labels = data

                        # wrap them in Variable
                        if use_gpu:
                            inputs, labels = Variable(inputs.cuda()), \
                                             Variable(labels.cuda())
                        else:
                            inputs, labels = Variable(inputs), Variable(labels)

                        # zero the parameter gradients
                        optimizer.zero_grad()

                        # forward
                        outputs = model(inputs)
                        _, preds = torch.max(outputs.data, 1)
                        loss = criterion(outputs, labels)
                        # print(preds)

                        # backward + optimize only if in training phase
                        if phase == 'train':
                            loss.backward()
                            optimizer.step()

                        # statistics
                        running_loss += loss.data[0]
                        running_corrects += torch.sum(preds == labels.data)

                    epoch_loss = running_loss / dset_sizes[phase]
                    epoch_acc = running_corrects / dset_sizes[phase]

                    print('{} Loss: {:.4f} Acc: {:.4f}'.format(
                        phase, epoch_loss, epoch_acc))

                    # deep copy the model
                    if phase == 'val' and epoch_acc > best_acc:
                        best_acc = epoch_acc
                        best_model = copy.deepcopy(model)

                print()

            time_elapsed = time.time() - since
            print('Training complete in {:.0f}m {:.0f}s'.format(
                time_elapsed // 60, time_elapsed % 60))
            print('Best val Acc: {:4f}'.format(best_acc))
            return best_model

        ######################################################################
        # Learning rate scheduler
        # ^^^^^^^^^^^^^^^^^^^^^^^
        # Let's create our learning rate scheduler. We will exponentially
        # decrease the learning rate once every few epochs.

        def exp_lr_scheduler(optimizer, epoch, init_lr=0.001, lr_decay_epoch=7):
            """Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs."""
            lr = init_lr * (0.1 ** (epoch // lr_decay_epoch))

            if epoch % lr_decay_epoch == 0:
                print('LR is set to {}'.format(lr))

            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

            return optimizer

        ######################################################################
        # Visualizing the model predictions
        # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
        #
        # Generic function to display predictions for a few images
        #

        def visualize_model(model, num_images=6):
            images_so_far = 0
            fig = plt.figure()

            for i, data in enumerate(dset_loaders['val']):
                inputs, labels = data
                if use_gpu:
                    inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
                else:
                    inputs, labels = Variable(inputs), Variable(labels)

                outputs = model(inputs)
                _, preds = torch.max(outputs.data, 1)

                preds = preds.int()

                for j in range(inputs.size()[0]):
                    images_so_far += 1
                    ax = plt.subplot(num_images // 2, 2, images_so_far)
                    ax.axis('off')
                    ax.set_title('predicted: {}'.format(dset_classes[preds[j][0]]))
                    imshow(inputs.cpu().data[j])

                    if images_so_far == num_images:
                        return

        ######################################################################
        # Finetuning the convnet
        # ----------------------
        #
        # Load a pretrained model and reset final fully connected layer.
        #

        model_ft = models.resnet18(pretrained=True)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)

        if use_gpu:
            model_ft = model_ft.cuda()

        criterion = nn.CrossEntropyLoss()

        # Observe that all parameters are being optimized
        optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)

        ######################################################################
        # Train and evaluate
        # ^^^^^^^^^^^^^^^^^^
        #
        # It should take around 15-25 min on CPU. On GPU though, it takes less than a
        # minute.
        #

        model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
                               num_epochs=10)

        ######################################################################
        #

        visualize_model(model_ft)

        plt.ioff()
        plt.show()

        self.model = nn.Sequential(*list(model_ft.children())[:-1])
        filelist = get_file(data_dir)
        filelist = map(lambda x: x.decode('utf-8'), filelist)
        for filedir in filelist:
            self.dir2feat[filedir] = self.predict(filedir)


    def predict(self, imdir):
        testdir = os.path.join('testset', 'unknow')
        open(os.path.join(testdir, os.path.split(imdir)[1]), 'wb').write(
            open(imdir, 'rb').read())

        data_transform = transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
        dsets = datasets.ImageFolder('testset', data_transform)
        dsets_loader = torch.utils.data.DataLoader(dsets, batch_size=1, num_workers=1)

        results = []
        for data in dsets_loader:
            inputs, labels = data
            inputs = Variable(inputs.cuda())
            outputs = self.model(inputs)
            for j in range(inputs.size()[0]):
                results.append(outputs[j])
                # imshow(inputs.cpu().data[j])

        os.remove(os.path.join(testdir, os.path.split(imdir)[1]))
        return results

    def save_model(self):
        print ('saving model...')
        ISOTIMEFORMAT = '%Y-%m-%d %X'
        save_file = os.path.join('models', time.strftime(ISOTIMEFORMAT, time.localtime(time.time())))
        os.mknod(save_file)
        pickle.dump((self.model, self.dir2feat), open(save_file, 'wb'))
        print ('model saved')

    def load_model(self, save_file):
        print ('loading model...')
        save_file = os.path.join('models', save_file)
        self.model, self.dir2feat = pickle.load(open(save_file, 'rb'))
        print ('model loaded')

    def get_image(self, imdir):
        feat = self.predict(imdir.decode('utf-8'))[0]
        mn = -1
        result = None
        for k, v in self.dir2feat.iteritems():
            d = feat - v
            d = d.norm()
            if result == None or mn > d:
                result = k
                mn = d
        return result
  • 1
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值