docker hub 代理_gpu_pytorch_docker

c1e87c287ffa92007495173bd76d0bca.png
gpu pytorch docker 镜像制作
https://hub.docker.com/r/anibali/pytorch/tags
docker pull anibali/pytorch:1.4.0-cuda10.1

docker run --name=pytorch1.4_cuda10 --rm -it --init 
  --gpus=all 
  --ipc=host 
  --user="$(id -u):$(id -g)" 
  --volume="$PWD:/app" 
  anibali/pytorch:1.4.0-cuda10.1  /bin/bash

  
docker run --name=pytorch1.4_cuda10_v2 -it --init 
  --gpus=all 
  --ipc=host 
  --user="$(id -u):$(id -g)" 
  --volume="$PWD:/app" 
  anibali/pytorch:1.4.0-cuda10.1  /bin/bash

docker run --name=pytorch1.4_cuda10_v3 -it 
  --gpus=all 
  --ipc=host 
  --user="$(id -u):$(id -g)" 
  --volume="/dataa/liwei/pytorch_build_model:/app" 
  anibali/pytorch:1.4.0-cuda10.1  /bin/bash
  
  
nvidia-smi
conda info -e  # 发现是用 通过 miniconda 安装的Pytorch
docker attach pytorch1.4_cuda10_v3 # 这个会卡主
docker exec -it c7b973540447  /bin/bash #用这个
import torch
import torchvision
print("PyTorch Version: ", torch.__version__) # PyTorch Version:  1.4.0
print("Torchvision Version: ", torchvision.__version__) #Torchvision Version:  0.5.0

# 测试pytorc+cuda+cuDNN 安装是否成功
import torch
print(torch.cuda.is_available()) # True  返回结果是True,则PyTorch的GPU安装成功

device = torch.device("cuda:0" if (torch.cuda.is_available()) else "cpu")
print(device)
print(torch.cuda.get_device_name(0)) # Tesla P100-PCIE-16GB
print(torch.rand(3,3).cuda()) 


ubuntu apt-get 使用代理更新 https://blog.csdn.net/weixin_34148340/article/details/92060503
cd /etc/apt/下建立一个文件 apt.conf
Acquire::http::proxy "http://f3410453:Idsbg2017@10.191.131.14:3128";
Acquire::ftp::proxy "http://f3410453:Idsbg2017@10.191.131.14:3128";
Acquire::https::proxy "http://f3410453:Idsbg2017@10.191.131.14:3128";

apt-get update -c apt-proxy-conf
apt-get install vim
export http_proxy=http://f3410453:Idsbg2017@10.191.131.14:3128

https://blog.csdn.net/yjk13703623757/article/details/93673927  #ubuntu 依赖处理

#docker 环境 常见python包安装
conda info -e
pip install opencv-python==4.2.0.32 -i https://pypi.tuna.tsinghua.edu.cn/simple --proxy=http://f3410453:Idsbg2017@10.191.131.14:3128
numpy, requests, pandas
pip install pandas -i https://pypi.tuna.tsinghua.edu.cn/simple  --proxy=http://f3410453:Idsbg2017@10.191.131.14:3128
pip install pymysql -i https://pypi.tuna.tsinghua.edu.cn/simple  --proxy=http://f3410453:Idsbg2017@10.191.131.14:3128
pip install django==2.2.4 -i https://pypi.tuna.tsinghua.edu.cn/simple  --proxy=http://f3410453:Idsbg2017@10.191.131.14:3128
pip install pillow -i https://pypi.tuna.tsinghua.edu.cn/simple  --proxy=http://f3410453:Idsbg2017@10.191.131.14:3128
pip install matplotlib -i https://pypi.tuna.tsinghua.edu.cn/simple  --proxy=http://f3410453:Idsbg2017@10.191.131.14:3128
pip install imutils -i https://pypi.tuna.tsinghua.edu.cn/simple  --proxy=http://f3410453:Idsbg2017@10.191.131.14:3128
pip install line_profiler -i https://pypi.tuna.tsinghua.edu.cn/simple  --proxy=http://f3410453:Idsbg2017@10.191.131.14:3128




#SSD pytorch   https://github.com/liwei86521/SSD
pip install yacs -i https://pypi.tuna.tsinghua.edu.cn/simple  --proxy=http://f3410453:Idsbg2017@10.191.131.14:3128
pip install tqdm  -i https://pypi.tuna.tsinghua.edu.cn/simple  --proxy=http://f3410453:Idsbg2017@10.191.131.14:3128
pip install vizer  -i https://pypi.tuna.tsinghua.edu.cn/simple  --proxy=http://f3410453:Idsbg2017@10.191.131.14:3128

VOC 目标检测数据集 格式
VOC2007/
├── Annotations
│   ├── screw1095.xml
│   ├── screw1100.xml
│   ├── screw605.xml
│   └── screw610.xml
├── ImageSets
│   └── Main
│       ├── test.txt
│       ├── train.txt
│       └── val.txt
└── JPEGImages
    ├── screw1095.jpg
    ├── screw600.jpg
    ├── screw605.jpg
    └── screw610.jpg

cat screw1095.xml   # 该xml 放在标准VOC格式目录中,不需要该任何东西
<annotation>
	<folder>yesScrew</folder> 
	<filename>screw2075.jpg</filename>
	<path>D:data5T_5B_datassdyesScrewscrew2075.jpg</path> #ps 这个放在server上不要修改
	<source>
		<database>Unknown</database>
	</source>
	<size>
		<width>250</width>
		<height>100</height>
		<depth>3</depth>
	</size>
	<segmented>0</segmented>
	<object>
		<name>yesScrew</name>
		<pose>Unspecified</pose>
		<truncated>0</truncated>
		<difficult>0</difficult>
		<bndbox>
			<xmin>103</xmin>
			<ymin>67</ymin>
			<xmax>122</xmax>
			<ymax>86</ymax>
		</bndbox>
	</object>
</annotation>

step1: 制作数据集 用的是 D:labelImg      cd D:labelImg  ---->  python labelImg.py    进行数据集标注即可
step2: cat train_val_test_split.py   #训练之前划分 train val test 数据集
import os
import random

root_dir = 'datasets/VOC2007/'

## 0.7train 0.1val 0.2test
trainval_percent = 0.9
train_percent = 0.8
xmlfilepath = root_dir + 'Annotations'
txtsavepath = root_dir + 'ImageSets/Main'
total_xml = os.listdir(xmlfilepath)

num = len(total_xml)  # 100
list = range(num)
tv = int(num * trainval_percent)  # 80
tr = int(tv * train_percent)  # 80*0.7=56
trainval = random.sample(list, tv)
train = random.sample(trainval, tr)

ftrainval = open(root_dir + 'ImageSets/Main/trainval.txt', 'w')
ftest = open(root_dir + 'ImageSets/Main/test.txt', 'w')
ftrain = open(root_dir + 'ImageSets/Main/train.txt', 'w')
fval = open(root_dir + 'ImageSets/Main/val.txt', 'w')

for i in list:
    name = total_xml[i][:-4] + 'n'
    if i in trainval:
        ftrainval.write(name)
        if i in train:
            ftrain.write(name)
        else:
            fval.write(name)
    else:
        ftest.write(name)

ftrainval.close()
ftrain.close()
fval.close()
ftest.close()

step3:选择预训练model
cat efficient_net_b3_ssd300_voc0724_hotbar.yaml
MODEL:
  DEVICE: 'cuda:0'
  NUM_CLASSES: 4
  BACKBONE:
    NAME: 'efficient_net-b3'
    OUT_CHANNELS: (48, 136, 384, 256, 256, 256)
INPUT:
  IMAGE_SIZE: 300
DATASETS:
  TRAIN: ("voc_2007_train", "voc_2007_val")
  TEST: ("voc_2007_test", )
DATA_LOADER:
  NUM_WORKERS: 0
SOLVER:
  MAX_ITER: 3000
  LR_STEPS: [800, 1000]
  GAMMA: 0.1
  BATCH_SIZE: 32
  LR: 1e-3

OUTPUT_DIR: 'outputs/efficient_net_b3_ssd300_voc0826_hotbar'

step4::ssd 训练
python train.py --config-file configs/efficient_net_b3_ssd300_voc0724_hotbar.yaml   
# 使用efficient模型训练 效果比mobilenet_v2好


Downloading: "https://s3.amazonaws.com/amdegroot-models/vgg16_reducedfc.pth" to /home/user/.torch/models/vgg16_reducedfc.pth
cd /app/SSD/
python train.py --config-file configs/vgg_ssd300_voc0712_self.yaml
No such file or directory: 'datasets/VOC2007/ImageSets/Main/trainval.txt'

mkdir -p /app/SSD/datasets
mv VOC2007  /app/SSD/datasets

python train.py --config-file configs/vgg_ssd300_voc0712_self.yaml
from torch.utils.tensorboard import SummaryWriter
pip install tensorboard  -i https://pypi.tuna.tsinghua.edu.cn/simple  --proxy=http://f3410453:Idsbg2017@10.191.131.14:3128

vim /app/SSD/ssd/data/datasets/voc.py    # 类别修改为 yesscrew, noscrew
python train.py --config-file configs/vgg_ssd300_voc0712_self.yaml
训练成功了

测试一下 
python test.py --config-file configs/vgg_ssd300_voc0712_self.yaml
/app/SSD/datasets/VOC2007/ImageSets/Main/test.txt  # 会加载这个文件
2020-06-03 02:26:31,377 SSD.inference INFO: Loading checkpoint from outputs/vgg_ssd300_voc0712_self/model_final.pth
2020-06-03 02:26:31,531 SSD.inference INFO: Evaluating voc_2007_test dataset(36 images):
100%|##########################################################################################| 4/4 [00:06<00:00,  1.51s/it]
2020-06-03 02:26:37,579 SSD.inference INFO: mAP: 1.0000
yesscrew        : 1.0000
noscrew         : 1.0000


预测图片:
cd /app/SSD/demo
python demo.py --config-file configs/vgg_ssd300_voc0712_self.yaml --images_dir demo --ckpt outputs/vgg_ssd300_voc0712_self/model_final.pth
# Loaded weights from outputs/vgg_ssd300_voc0712_self/model_final.pth
Loaded weights from outputs/vgg_ssd300_voc0712_self/model_final.pth
(0001/0005) screw1100.jpg: objects 01 | load 002ms | inference 239ms | FPS 4
(0002/0005) screw1940.jpg: objects 01 | load 002ms | inference 016ms | FPS 63
(0003/0005) screw2680.jpg: objects 01 | load 001ms | inference 016ms | FPS 64
(0004/0005) screw330.jpg: objects 01 | load 001ms | inference 016ms | FPS 64
(0005/0005) screw4005.jpg: objects 01 | load 002ms | inference 016ms | FPS 64

docker 将容器保存为镜像
ps: 在生成镜像进行前把
docker commit  -a "liwei" -m "my pytorch1.4_cuda10_ssd"  c7b973540447  pytorch1.4_cuda10_ssd:v1 
docker images   # pytorch1.4_cuda10_ssd:v1

docker run --name=pytorch1.4_cuda10_ssd -it 
  --gpus=all 
  --ipc=host 
  --user="$(id -u):$(id -g)" 
  pytorch1.4_cuda10_ssd:v1  /bin/bash

cd /app_lee
python demo.py --config-file configs/vgg_ssd300_voc0712_self.yaml --images_dir demo --ckpt outputs/vgg_ssd300_voc0712_self/model_final.pth
docker cp f818ba0de65d:/app_lee/SSD/demo ./

ctrl p q
docker exec -it f818ba0de65d  /bin/bash #用这个

docker cp /home/bobuser/liwei/VOC2007 f818ba0de65d:/app_lee/SSD/datasets 
vim /app/SSD/ssd/data/datasets/voc.py    # 类别修改为 yesscrew, noscrew
python train.py --config-file configs/vgg_ssd300_voc0712_slabel.yaml
sudo docker cp /home/bobuser/liwei/demo f818ba0de65d:/app_lee/SSD/
python demo.py --config-file configs/vgg_ssd300_voc0712_slabel.yaml --images_dir demo
sudo docker cp f818ba0de65d:/app_lee/SSD/demo  /home/bobuser/liwei/demo

# 把 demo.py  修改了一下 变成接收单张图片判断
python demo_single_pic.py --config-file configs/vgg_ssd300_voc0712_slabel.yaml --image_path TA_gray7005_lee.jpg
#Loaded weights from outputs/vgg_ssd300_voc0712_slabel/model_final.pth
(0001/0001) TA_gray7005_lee.jpg: objects 01 | load 027ms | inference 387ms | FPS 3

sudo docker cp f818ba0de65d:/app_lee/SSD/demo  /home/bobuser/liwei/demo
sudo docker cp /home/bobuser/liwei/TA_gray_2 f818ba0de65d:/app_lee/SSD/
python demo.py --config-file configs/vgg_ssd300_voc0712_slabel.yaml --images_dir demo

sudo docker cp f818ba0de65d:/app_lee/SSD/demo/result/ /home/bobuser/liwei/
sudo docker cp /home/bobuser/liwei/TA_gray_test/ng f818ba0de65d:/app_lee/SSD/
sudo docker cp f818ba0de65d:/app_lee/SSD/ng /home/bobuser/liwei/

python demo.py --config-file configs/vgg_ssd300_voc0712_slabel.yaml --images_dir ng
sudo docker cp f818ba0de65d:/app_lee/SSD/demo/result /home/bobuser/liwei/

# 封装YOLOv4 镜像 (c7b973540447 容器中的数据卷会丢失)
docker commit  -a "liwei" -m "my pytorch1.4_cuda10_yolov4"  c7b973540447  pytorch1.4_cuda10_yolov4:v1  #yolov4 基础 images 3.84GB
docker run --name=pytorch1.4_cuda10_yolov4 -it 
  --gpus=all 
  --ipc=host 
  --user="$(id -u):$(id -g)" 
  --volume="/dataa/liwei/YOLOv4-master:/app" 
  pytorch1.4_cuda10_yolov4:v1  /bin/bash

docker exec -it 3816d18f4b6e  /bin/bash #用这个

numpy==1.18.2
torch==1.4.0
tensorboardX==2.0
scikit_image==0.16.2
matplotlib==2.2.3
tqdm==4.43.0
easydict==1.9
Pillow==7.1.2
skimage
opencv_python
pycocotools

pip install easydict==1.9 -i https://pypi.tuna.tsinghua.edu.cn/simple  --proxy=http://f3410453:Idsbg2017@10.191.131.14:3128
pip install pycocotools -i https://pypi.tuna.tsinghua.edu.cn/simple  --proxy=http://f3410453:Idsbg2017@10.191.131.14:3128


下载yolov4的权重 放在了 models 文件夹下
yolov4.pth(https://pan.baidu.com/s/1ZroDvoGScDgtE1ja_QqJVw Extraction code:xrq9)
yolov4.conv.137.pth(https://pan.baidu.com/s/1ovBie4YyVQQoUrC3AY0joA Extraction code:kcel)

#Inference
python demo.py <cfgFile> <weightFile> <imgFile>
python demo.py -cfgfile  cfg/yolov4.cfg  -weightfile models/yolov4.pth  -imgfile data/giraffe.jpg # 没有结果
python demo.py -cfgfile  cfg/yolov4.cfg  -weightfile models/yolov4.conv.137.pth  -imgfile data/giraffe.jpg  # 错
python demo.py -cfgfile  cfg/yolov4.cfg  -weightfile models/yolov4.weights  -imgfile data/giraffe.jpg  # yes
# ps yolov4.weights 是coco数据集上训练出来的权重(80分类), voc数据集(只有20类)

#用 demo.py 推理多张图片
python demo.py -cfgfile  cfg/yolov4.cfg  -weightfile models/yolov4.weights # CPU处理5张图片24s
python demo.py -cfgfile  cfg/yolov4.cfg  -weightfile models/yolov4.weights # GPU处理5张图片24s


docker commit  -a "liwei" -m "my pytorch1.4_cuda10_yolov4_src"  3816d18f4b6e  pytorch1.4_cuda10_yolov4:v2  #包含 yolov4 src images 3.84GB
docker run --name=pytorch1.4_cuda10_yolov4_src -it 
  --gpus=all 
  --ipc=host 
  --user="$(id -u):$(id -g)" 
  pytorch1.4_cuda10_yolov4:v2  /bin/bash

docker exec -it 0ee470ab4b6a  /bin/bash #用这个
cd /yolov4
python demo_bk.py -cfgfile  cfg/yolov4.cfg  -weightfile models/yolov4.weights  -imgfile data/giraffe.jpg
#pytorch_screws_train.py
# -*- coding:utf-8 -*-
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
#import matplotlib.pyplot as plt
import time
import os
import copy

#print("PyTorch Version: ", torch.__version__)
#print("Torchvision Version: ", torchvision.__version__)

def train_model(model, dataloaders, criterion, optimizer, scheduler, num_epochs=25, is_inception=False):
    since = time.time()

    val_acc_history = []

    best_model_wts = copy.deepcopy(model.state_dict())
    best_acc = 0.0

    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)

        print("the %d epoch learning rate : %f" % (epoch, optimizer.param_groups[0]['lr']))
        # 每个 epoch 包含 training 和 validation phase.
        for phase in ['train', 'val']:
            if phase == 'train':
                model.train()  # Set model to training mode
            else:
                model.eval()  # Set model to evaluate mode

            running_loss = 0.0
            running_corrects = 0

            # Iterate over data.
            for inputs, labels in dataloaders[phase]:
                inputs = inputs.to(device)
                labels = labels.to(device)

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward
                # track history if only in train
                with torch.set_grad_enabled(phase == 'train'):
                    if is_inception and phase == 'train':
                        outputs, aux_outputs = model(inputs)
                        loss1 = criterion(outputs, labels)
                        loss2 = criterion(aux_outputs, labels)
                        loss = loss1 + 0.4 * loss2
                    else:
                        outputs = model(inputs)
                        loss = criterion(outputs, labels)

                    _, preds = torch.max(outputs, 1)

                    # backward + optimize only if in training phase
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()

                # statistics
                running_loss += loss.item() * inputs.size(0)
                running_corrects += torch.sum(preds == labels.data)

            epoch_loss = running_loss / len(dataloaders[phase].dataset)
            epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)

            print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))

            # deep copy the model
            if phase == 'val' and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model_wts = copy.deepcopy(model.state_dict())
            if phase == 'val':
                val_acc_history.append(epoch_acc)

        scheduler.step()
        print()

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best val Acc: {:4f}'.format(best_acc))

    # load best model weights
    model.load_state_dict(best_model_wts)

    return model, val_acc_history


# when model use for feature extraction need set requires_grad=False
def set_parameter_requires_grad(model, feature_extracting):
    if feature_extracting:
        for param in model.parameters():
            param.requires_grad = False

# step 3 Network initialization and setup
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):
    model_ft = None
    input_size = 0

    if model_name == "resnet":
        # Resnet18
        model_ft = models.resnet18(pretrained=False)
        # print(model_ft)
        model_ft.load_state_dict(torch.load('./models/resnet18-5c106cde.pth'))
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "alexnet":
        # Alexnet
        model_ft = models.alexnet(pretrained=use_pretrained)
        # print(model_ft)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "vgg":
        # VGG11_bn
        model_ft = models.vgg11_bn(pretrained=use_pretrained)
        # print(model_ft)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "squeezenet":
        # Squeezenet
        model_ft = models.squeezenet1_0(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1, 1), stride=(1, 1))
        model_ft.num_classes = num_classes
        input_size = 224

    elif model_name == "densenet":
        # Densenet
        model_ft = models.densenet121(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "inception":
        """ 
        Inception v3
        Be careful, expects (299,299) sized images and has auxiliary output
        """
        model_ft = models.inception_v3(pretrained=use_pretrained)

        # load local weights
        weights = torch.load("/home/bobuser/.cache/torch/checkpoints/inception_v3_google-1a9a5a14.pth")
        model_ft.load_state_dict(weights)

        # print(model_ft)
        set_parameter_requires_grad(model_ft, feature_extract)

        # Handle the auxilary net
        num_ftrs = model_ft.AuxLogits.fc.in_features
        model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
        # Handle the primary net
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 299

    else:
        print("Invalid model name, exiting...")
        exit()

    return model_ft, input_size


# step 4  load data
data_transforms = {
    'train': transforms.Compose([
        # transforms.RandomResizedCrop(input_size),
        # transforms.RandomHorizontalFlip(),
        transforms.Resize((224,224)),# input PIL to size
        #transforms.ColorJitter(brightness=0.3, saturation=0.3),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
    'val': transforms.Compose([
        # transforms.Resize(input_size),
        # transforms.CenterCrop(input_size),
        transforms.Resize((224,224)),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
}

data_dir = "./screws_data"
# can selected nets ----> [resnet, alexnet, vgg, squeezenet, densenet, inception]
model_name = "resnet"
num_classes = 2
batch_size = 64
num_epochs = 80

# Whether to use for feature extraction: False, then finetune the whole model,  True :only update the network layer parameters of the last layer
feature_extract = True
print("Initializing Datasets and Dataloaders...")
# Create training and validation datasets
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
                  for x in ['train', 'val']}

# Create training and validation dataloaders
dataloaders_dict = {
x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=4)
for x in ['train', 'val']}

# CPU/GPU choice
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# step 5 Model initialization and Optimizer settings
model_ft, input_size = initialize_model(model_name,
                                        num_classes,
                                        feature_extract,
                                        use_pretrained=True)  # use_pretrained=True ---> use_pretrained=False
# ps : 239 has no external network Cannot download weights

print("model_ft ---> n ", model_ft)
model_ft = model_ft.to(device)  # train GPU/CPU

params_to_update = model_ft.parameters()
print("Params to learn:  ")
if feature_extract:
    params_to_update = []
    for name, param in model_ft.named_parameters():
        if param.requires_grad == True:
            params_to_update.append(param)
            print("t", name)
else:
    for name, param in model_ft.named_parameters():
        if param.requires_grad == True:
            print("t", name)

optimizer_ft = optim.SGD(params_to_update, lr=0.01, momentum=0.9)
# https://blog.csdn.net/Strive_For_Future/article/details/83213971
exp_lr_scheduler = optim.lr_scheduler.StepLR(optimizer_ft, step_size=50, gamma=0.1)

# step 6 Model training and evaluation
criterion = nn.CrossEntropyLoss()  

# Train and evaluate
model_ft, hist = train_model(model_ft,
                             dataloaders_dict,
                             criterion,
                             optimizer_ft,
                             exp_lr_scheduler,
                             num_epochs=num_epochs,
                             is_inception=(model_name == "inception"))

torch.save(model_ft, 'model_screws_resnet18_530.pth') # save the whole model
#test_screws_train_model.py
# -*- coding:utf-8 -*-
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torchvision
import matplotlib.pyplot as plt
from torchvision import datasets, models, transforms
import time
import os
import copy

print("PyTorch Version: ", torch.__version__)
print("Torchvision Version: ", torchvision.__version__)

#torch.save(model_ft, 'model_screws.pth') # save the whole model

data_transforms = {
    'test1219': transforms.Compose([
        transforms.Resize((224,224)),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
}

test_data_dir = './screws_data'
batch_size = 8
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


print("Initializing Datasets and Dataloaders...")
image_datasets = {x: datasets.ImageFolder(os.path.join(test_data_dir, x), data_transforms[x])
                  for x in ['test1219']}

dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], 
                batch_size=batch_size, shuffle=True, num_workers=0) for x in ['test1219']}

class_names, class_to_idx = image_datasets['test1219'].classes, image_datasets['test1219'].class_to_idx
dataset_sizes = {x: len(image_datasets[x]) for x in ['test1219']}
print("class_names, class_to_idx ---> ", class_names, class_to_idx)

def imshow(inp, title=None):
    """Imshow for Tensor."""
    inp = inp.numpy().transpose((1, 2, 0)) 
    mean = np.array([0.485, 0.456, 0.406])
    std = np.array([0.229, 0.224, 0.225])
    inp = std * inp + mean  
    inp = np.clip(inp, 0, 1)  
    plt.imshow(inp)

    if title is not None:
        plt.title(title)

    plt.pause(0.001)  

def test_visualize_model(model, num_images=8): 
    model.eval() 
    images_so_far = 0
    fig = plt.figure()

    with torch.no_grad():
        for i, (inputs, labels) in enumerate(dataloaders['test1219']):
            inputs = inputs.to(device)
            labels = labels.to(device)

            t_label_name = [class_names[i] for i in labels]

            print("t_label_name  ----> ", t_label_name)

            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)

            for j in range(inputs.size()[0]):
                images_so_far += 1
                ax = plt.subplot(num_images//2, 2, images_so_far)
                ax.axis('off')
                ax.set_title('predicted: {}'.format(class_names[preds[j]]))
                imshow(inputs.cpu().data[j])

            plt.show()
            images_so_far = 0 # 

def test_model_acc(model):  
    model.eval()  

    pred_corrects = 0
    with torch.no_grad():
        for i, (inputs, labels) in enumerate(dataloaders['test1219']):
            inputs = inputs.to(device)
            labels = labels.to(device)

            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)
            print("labels ----> ", labels.data)
            print("preds ------>", preds)
            print()

            pred_corrects += torch.sum(preds == labels.data)  # calcu all batch precision

        test_acc = pred_corrects.double() / dataset_sizes["test1219"]
        print("test_acc ----> ", test_acc.item()) # item() --> get single tensor val

def test_model_acc_by_class(model):
    model.eval()
    class_correct = list(0. for i in range(2)) #[0.0, 0.0]
    class_total = list(0. for i in range(2))
    with torch.no_grad():
        for data in dataloaders['test1219']:
            inputs,labels = data
            inputs = inputs.to(device)
            labels = labels.to(device)

            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)
            c = (preds == labels).squeeze()
            # may be insufficient data in the last batch  < batch_size
            for i in range(len(labels)): 
                label = labels[i]
                class_correct[label] += c[i].item()
                class_total[label] += 1

    for i in range(2):
        print('total %6d  Accuracy of %5s : %.3f %%' % (class_total[i], class_names[i], 100 * class_correct[i] / class_total[i]))

model = torch.load("model_screws_resnet18_530.pth")
#test_visualize_model(model, num_images=8)

test_model_acc(model)
test_model_acc_by_class(model)
#pred_single_screws.py
# -*- coding:utf-8 -*-
from __future__ import print_function
from __future__ import division
import torch
from torchvision import  transforms
from PIL import Image
import cv2
from utils import lp_wrapper, timer

data_transforms = {
    'test1219': transforms.Compose([
        transforms.Resize((224,224)),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
    'single_test': transforms.Compose([
            transforms.Resize((224,224)),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])

}

#@lp_wrapper
def test_single_img(model, cv2_img):
    model.eval()
    single_img_transform = data_transforms['single_test']

    # image_PIL = Image.open('image.jpg')
    image_PIL = Image.fromarray(cv2.cvtColor(cv2_img, cv2.COLOR_BGR2RGB))  # cv2 --> PIL.Image

    image_tensor = single_img_transform(image_PIL)

    image_tensor = torch.unsqueeze(image_tensor, 0)
    image_tensor = image_tensor.to("cpu") # image_tensor.to(device)
    outputs = model(image_tensor)
    _, pred = torch.max(outputs, 1)

    # print("pred ----> ", pred) # pred ---->  tensor([1])
    # print("pred.item() ----> ",type(pred.item()), pred.item()) # pred.item() ---->  <class 'int'> 1

    return pred.item()


@timer
def load_model():
    #model = torch.load("../models/model_screws.pth", map_location=device)
    model = torch.load("./model_screws_resnet18_530.pth", map_location="cpu")
    return model



if __name__ == '__main__':
    model = load_model()
    #img_path = "/dataa/data/screws_data/test1219/ng/test_test_3645_1.jpg"
    img_path = "./screws_data/val/ng/12-09_16-56_5442_s1.bmp"
    cv2_img = cv2.imread(img_path)

    pred = test_single_img(model, cv2_img=cv2_img)
    print(type(pred), pred)
#utils.py
# -*- coding:utf-8 -*-
import cv2
from imutils.paths import list_images, list_files
import os
import numpy as np
import time
from functools import wraps

from line_profiler import LineProfiler
lp = LineProfiler()

def timer(fn):
    @wraps(fn)
    def wrapper(*args, **kw):
        start = time.time()
        ret = fn(*args, **kw)
        print('The function {:*^20s}  run cost {:.4f} s'.format(fn.__name__, time.time()-start))
        return ret
    return wrapper

def lp_wrapper(fn): 
    @wraps(fn)
    def wrapper(*args, **kwargs):
        global lp
        lp_wrapper = lp(fn)
        res = lp_wrapper(*args, **kwargs)
        lp.print_stats()
        return res

    return wrapper

def video_2_pics(video_path, pics_save_dir, name_prefix=''):
    cap = cv2.VideoCapture(video_path)
    # fps = int(cap.get(cv2.CAP_PROP_FPS)) 
    # print('fps', fps)
    # w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))  # 1920
    # h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))  # 1080

    curFrame = 0 
    timeF = 5  
    while True:
        success, frame = cap.read()
        if success:
            if (curFrame % timeF == 0):  
                cv2.imwrite(pics_save_dir + name_prefix + str(curFrame) + '.jpg', frame)  

            curFrame = curFrame + 1
            cv2.waitKey(1)  
        else:
            print('end')
            break
    cap.release()

# video_2_pics('../video_2019-12-09_17-19_test.avi',
#              '../video_2019-12-09_17-19_test/')

def cv2_show(name, img):
    cv2.imshow(name,img)
    cv2.waitKey(50000)
    cv2.destroyAllWindows()

def draw_point_on_pic():
    img = cv2.imread("C:/Users/F7687778/Desktop/3G_CH/4310.jpg")
    #print(img.shape) # (1080, 1920, 3)
    point_size = 1
    point_color = (0, 0, 255) # BGR
    thickness = 4

    points_list = [(750, 300), (1250, 300), (750, 750), (1250, 750)]
    for point in points_list:
        cv2.circle(img, point, point_size, point_color, thickness)

    cv2_show("jj", img)

#draw_point_on_pic()

def crop_pics(sour_dir, save_dir):
    imagePaths = list_files(sour_dir, validExts=(".jpg", ".jpeg"))
    #print(list(imagePaths))
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    for imagePath in imagePaths:
        img_name = imagePath.rpartition("")[-1] # 2100.jpg
        img = cv2.imread(imagePath)
        cropped = img[650:800, 900:1150]  
        cv2.imwrite(save_dir + img_name, cropped, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
# crop_pics('../video_2019-12-09_17-19_test', '../2019-12-09_17-19_crop_new/')

def save_target(crop_frame, coordinate, count, save_folder):
    target = crop_frame[coordinate[1]: coordinate[3], coordinate[0]: coordinate[2]]
    if not os.path.exists(save_folder):
        os.makedirs(save_folder)
    cv2.imwrite(save_folder + str(count) + '.jpg', target, [int(cv2.IMWRITE_JPEG_QUALITY), 100])

def cal_width_height(path):
    imagePaths = list_files(path, validExts=(".jpg", ".jpeg"))
    width, height = {}, {}
    for imagePath in imagePaths:
        img = cv2.imread(imagePath)
        w = img.shape[1]
        h = img.shape[0]
        if w in width:
            width[w] += 1
        else:
            width[w] = 1
        if h in height:
            height[h] += 1
        else:
            height[h] = 1
    width_order = sorted(width.items(), key=lambda x: x[0], reverse=True)
    height_order = sorted(height.items(), key=lambda x: x[0], reverse=True)
    print(width_order)
    print(height_order)

def change_npy_images_path(sour_npy, s_dir, d_dir, des_npy):
    imagePaths = np.load(sour_npy)
    res_l = []
    for imagePath in imagePaths:
        new_imagePath = imagePath.replace(s_dir, d_dir)
        res_l.append(new_imagePath)

    imPaths = np.array(res_l, dtype="unicode")
    np.save(des_npy, imPaths)


# change_npy_images_path(sour_npy="../label_box/images.npy",
#                        s_dir="../2019-12-09_17-19_crop",
#                        d_dir="E:/project/5T_5B/2019-12-09_17-19_crop/",
#                        des_npy="../label_box/images.npy")
# imagePaths = np.load("../label_box/images.npy")
# print(imagePaths)

def combine_npy(annot, imgs, new_annot_path, new_imgs_path):
    new_annot, new_imgs = np.load(annot[0]), np.load(imgs[0])
    print(new_annot.shape)
    print(new_imgs.shape)
    for a in annot[1:]:
        annot_data = np.load(a)
        print(annot_data.shape)
        new_annot = np.concatenate((new_annot, annot_data))
    for i in imgs[1:]:
        img_data = np.load(i)
        print(img_data.shape)
        new_imgs = np.concatenate((new_imgs, img_data))
    # print(new_annot)
    # print(new_imgs)
    print(new_annot.shape)
    print(new_imgs.shape)
    np.save(new_annot_path, new_annot)
    np.save(new_imgs_path, new_imgs)
# combine_npy(['label_box/pa_w_L_annot.npy', 'label_box/pa_g_L_annot.npy'],
#             ['label_box/pa_w_L_images.npy', 'label_box/pa_g_L_images.npy'],
#             'label_box/pa_gw_L_annot.npy',
#             'label_box/pa_gw_L_images.npy')



@timer
def batch_rename(local_dir= "C:/Users/F7687778/Desktop/video_2019-12-17_16-07/"):
    imagePaths = os.listdir(local_dir)
    for imgPath in imagePaths:
        src = local_dir + imgPath
        dst = local_dir + 'tt_' + imgPath
        os.rename(src, dst)

# batch_rename()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值