print('hello world!')#万能测试
hello world!
测试GPU运行速度
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
else:
print(device_name)
/device:GPU:0
!nvidia-smi
Mon Oct 24 08:46:47 2022
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 460.32.03 Driver Version: 460.32.03 CUDA Version: 11.2 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |
| N/A 46C P0 28W / 70W | 312MiB / 15109MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
+-----------------------------------------------------------------------------+
import torch
print(torch.__version__)
加载TF2
%tensorflow_version 2.9.1 #Colab only includes TensorFlow 2.x; %tensorflow_version has no effect.
import tensorflow
print(tensorflow.__version__)
Colab only includes TensorFlow 2.x; %tensorflow_version has no effect.
2.9.2
添加Google的云硬盘到这个工作环境下
from google.colab import drive
drive.mount('/content/drive/')
Drive already mounted at /content/drive/; to attempt to forcibly remount, call drive.mount("/content/drive/", force_remount=True).
切换工作目录
目录名中最好不要有空格。如果有空格,用到此目录名时需要在空格前加\进行转义
%cd "/content/drive/MyDrive/Learing_COLAB"
%ls #显示当前文件夹下的目录
/content/drive/MyDrive/Learing_COLAB
GitHub源码.zip
import os
# Thisnotepath=os.path.abspath(__file__)#只在代码文件中好用,这里是命令行一样的东西,会报错
Thisnotepath=os.getcwd()
print(Thisnotepath)
/content/drive/MyDrive/Learing_COLAB
# import zipfile
# f = zipfile.ZipFile("/content/drive/MyDrive/Learing_COLAB/GitHub源码.zip",'r') # 压缩文件位置
# for file in f.namelist():
# f.extract(file,"../") # 解压位置
# f.close()
# /content/drive/MyDrive/deep-learning-for-image-processing-master
%cd "/content/drive/MyDrive/deep-learning-for-image-processing-master"
%ls #显示当前文件夹下的目录
/content/drive/MyDrive/deep-learning-for-image-processing-master
[0m[01;34marticle_link[0m/ [01;34mothers_project[0m/ README.md
[01;34mcourse_ppt[0m/ [01;34mpytorch_classification[0m/ summary_problem.md
[01;34mdata_set[0m/ [01;34mpytorch_keypoint[0m/ [01;34mtensorflow_classification[0m/
[01;34mdeploying_service[0m/ [01;34mpytorch_object_detection[0m/
LICENSE [01;34mpytorch_segmentation[0m/
用魔法命令运行.py文件
下载花类的数据集
import os
import zipfile
import matplotlib.pyplot as plt
import numpy as np
import requests
from six.moves import urllib
from tqdm import tqdm
def download_from_url(url, dst):
"""
@param: url to download file
@param: dst place to put the file
"""
file_size = int(urllib.request.urlopen(url).info().get('Content-Length', -1))
if os.path.exists(dst):
first_byte = os.path.getsize(dst)
else:
first_byte = 0
print(file_size)
if first_byte >= file_size:
return file_size
header = {"Range": "bytes=%s-%s" % (first_byte, file_size)}
pbar = tqdm(total=file_size, initial=first_byte, unit='B', unit_scale=True, desc=url.split('/')[-1])
req = requests.get(url, headers=header, stream=True)
with (open(dst, 'ab')) as f:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.update(1024)
pbar.close()
return file_size
def download_m2nist_if_not_exist():
"""
:return:
"""
data_rootdir = r'/content/drive/MyDrive/deep-learning-for-image-processing-master/data_set'#在和文件同级别的目录下,建立文件夹
if not os.path.exists(data_rootdir): # 保存路径不存在,则创建该路径
os.mkdir(data_rootdir)
#由于是爬虫,需要设置一下文件写入的名字和后缀
m2nist_zip_path = os.path.join(data_rootdir, 'flower_photos.tgz')
if os.path.exists(m2nist_zip_path):
return
os.makedirs(data_rootdir, exist_ok=True)
# 下载的链接地址
m2nist_zip_url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz'
# 执行下载
download_from_url(m2nist_zip_url, m2nist_zip_path)
# 解压
zipf = zipfile.ZipFile(m2nist_zip_path)
zipf.extractall(data_rootdir)
zipf.close()
download_m2nist_if_not_exist()
# import zipfile
# f = zipfile.ZipFile("/content/drive/MyDrive/deep-learning-for-image-processing-master/data_set/flower_photos.zip",'r') # 压缩文件位置
# for file in f.namelist():
# f.extract(file,"./flower_photos") # 解压位置
# f.close()
# tar zxvf filename.tgz
%cd "/content/drive/MyDrive/deep-learning-for-image-processing-master/data_set/flower_data/flower_photos"
%ls #显示当前文件夹下的目录
!tar zxvf r'/content/drive/MyDrive/deep-learning-for-image-processing-master/data_set/flower_photos.tgz'
#切换到当前目录
# %cd "/content/drive/MyDrive/deep-learning-for-image-processing-master"
# %ls #显示当前文件夹下的目录
/content/drive/MyDrive/deep-learning-for-image-processing-master/data_set/flower_data/flower_photos
[0m[01;34mdaisy[0m/ [01;34mdandelion[0m/ LICENSE.txt [01;34mroses[0m/ [01;34msunflowers[0m/ [01;34mtulips[0m/
tar (child): r/content/drive/MyDrive/deep-learning-for-image-processing-master/data_set/flower_photos.tgz: Cannot open: No such file or directory
tar (child): Error is not recoverable: exiting now
tar: Child returned status 2
tar: Error is not recoverable: exiting now
%cd "/content/drive/MyDrive/deep-learning-for-image-processing-master/data_set/flower_data/flower_photos"
%ls #显示当前文件夹下的目录
!tar zxvf r'/content/drive/MyDrive/deep-learning-for-image-processing-master/data_set/flower_photos.tgz'
%cd "/content/drive/MyDrive/deep-learning-for-image-processing-master/data_set"
# import os
from shutil import copy, rmtree
import random
def mk_file(file_path: str):
if os.path.exists(file_path):
# 如果文件夹存在,则先删除原文件夹在重新创建
rmtree(file_path)
os.makedirs(file_path)
def main():
# 保证随机可复现
random.seed(0)
# 将数据集中10%的数据划分到验证集中
split_rate = 0.1
# 指向你解压后的flower_photos文件夹
cwd = os.getcwd()
data_root = os.path.join(cwd, "flower_data")
origin_flower_path = os.path.join(data_root, "flower_photos")
assert os.path.exists(origin_flower_path), "path '{}' does not exist.".format(origin_flower_path)
flower_class = [cla for cla in os.listdir(origin_flower_path)
if os.path.isdir(os.path.join(origin_flower_path, cla))]
# 建立保存训练集的文件夹
train_root = os.path.join(data_root, "train")
mk_file(train_root)
for cla in flower_class:
# 建立每个类别对应的文件夹
mk_file(os.path.join(train_root, cla))
# 建立保存验证集的文件夹
val_root = os.path.join(data_root, "val")
mk_file(val_root)
for cla in flower_class:
# 建立每个类别对应的文件夹
mk_file(os.path.join(val_root, cla))
for cla in flower_class:
cla_path = os.path.join(origin_flower_path, cla)
images = os.listdir(cla_path)
num = len(images)
# 随机采样验证集的索引
eval_index = random.sample(images, k=int(num*split_rate))
for index, image in enumerate(images):
if image in eval_index:
# 将分配至验证集中的文件复制到相应目录
image_path = os.path.join(cla_path, image)
new_path = os.path.join(val_root, cla)
copy(image_path, new_path)
else:
# 将分配至训练集中的文件复制到相应目录
image_path = os.path.join(cla_path, image)
new_path = os.path.join(train_root, cla)
copy(image_path, new_path)
print("\r[{}] processing [{}/{}]".format(cla, index+1, num), end="") # processing bar
print()
print("processing done!")
if __name__ == '__main__':
main()
/content/drive/MyDrive/deep-learning-for-image-processing-master/data_set/flower_data/flower_photos
[0m[01;34mdaisy[0m/ [01;34mdandelion[0m/ LICENSE.txt [01;34mroses[0m/ [01;34msunflowers[0m/ [01;34mtulips[0m/
tar (child): r/content/drive/MyDrive/deep-learning-for-image-processing-master/data_set/flower_photos.tgz: Cannot open: No such file or directory
tar (child): Error is not recoverable: exiting now
tar: Child returned status 2
tar: Error is not recoverable: exiting now
/content/drive/MyDrive/deep-learning-for-image-processing-master/data_set
[daisy] processing [633/633]
[dandelion] processing [898/898]
[sunflowers] processing [806/806]
[tulips] processing [1317/1317]
[roses] processing [641/641]
processing done!
%cd "/content/drive/MyDrive/deep-learning-for-image-processing-master/data_set/flower_data"
%ls #显示当前文件夹下的目录
/content/drive/MyDrive/deep-learning-for-image-processing-master/data_set/flower_data
[0m[01;34mflower_photos[0m/ [01;34mtrain[0m/ [01;34mval[0m/
%cd "/content/drive/MyDrive/deep-learning-for-image-processing-master/pytorch_classification/Test3_vggnet"
%ls #显示当前文件夹下的目录
!python '/content/drive/MyDrive/deep-learning-for-image-processing-master/pytorch_classification/Test3_vggnet/train.py'
using cuda:0 device.
Using 2 dataloader workers every process
using 3868 images for training, 427 images for validation.
train epoch[1/30] loss:1.281: 100% 121/121 [01:03<00:00, 1.91it/s]
100% 14/14 [00:02<00:00, 5.00it/s]
[epoch 1] train_loss: 1.407 val_accuracy: 0.457
train epoch[2/30] loss:1.571: 100% 121/121 [00:59<00:00, 2.05it/s]
100% 14/14 [00:03<00:00, 3.50it/s]
[epoch 2] train_loss: 1.285 val_accuracy: 0.461
train epoch[3/30] loss:1.376: 100% 121/121 [00:59<00:00, 2.02it/s]
100% 14/14 [00:03<00:00, 3.67it/s]
[epoch 3] train_loss: 1.266 val_accuracy: 0.466
train epoch[4/30] loss:1.305: 100% 121/121 [00:59<00:00, 2.03it/s]
100% 14/14 [00:03<00:00, 3.72it/s]
[epoch 4] train_loss: 1.256 val_accuracy: 0.475
train epoch[5/30] loss:1.566: 100% 121/121 [00:59<00:00, 2.03it/s]
100% 14/14 [00:03<00:00, 3.54it/s]
[epoch 5] train_loss: 1.210 val_accuracy: 0.504
train epoch[6/30] loss:0.911: 100% 121/121 [01:00<00:00, 2.00it/s]
100% 14/14 [00:04<00:00, 3.43it/s]
[epoch 6] train_loss: 1.121 val_accuracy: 0.625
train epoch[7/30] loss:0.992: 100% 121/121 [00:59<00:00, 2.02it/s]
100% 14/14 [00:03<00:00, 3.53it/s]
[epoch 7] train_loss: 1.061 val_accuracy: 0.644
train epoch[8/30] loss:1.062: 100% 121/121 [00:59<00:00, 2.02it/s]
100% 14/14 [00:03<00:00, 3.71it/s]
[epoch 8] train_loss: 0.997 val_accuracy: 0.621
train epoch[9/30] loss:1.116: 100% 121/121 [00:59<00:00, 2.03it/s]
100% 14/14 [00:03<00:00, 3.57it/s]
[epoch 9] train_loss: 0.969 val_accuracy: 0.681
train epoch[10/30] loss:0.746: 100% 121/121 [00:59<00:00, 2.02it/s]
100% 14/14 [00:03<00:00, 3.68it/s]
[epoch 10] train_loss: 0.929 val_accuracy: 0.707
train epoch[11/30] loss:0.697: 100% 121/121 [00:59<00:00, 2.02it/s]
100% 14/14 [00:03<00:00, 3.60it/s]
[epoch 11] train_loss: 0.890 val_accuracy: 0.712
train epoch[12/30] loss:0.785: 100% 121/121 [00:59<00:00, 2.04it/s]
100% 14/14 [00:03<00:00, 3.69it/s]
[epoch 12] train_loss: 0.876 val_accuracy: 0.707
train epoch[13/30] loss:0.782: 100% 121/121 [00:59<00:00, 2.05it/s]
100% 14/14 [00:03<00:00, 3.65it/s]
[epoch 13] train_loss: 0.861 val_accuracy: 0.710
train epoch[14/30] loss:0.643: 100% 121/121 [00:59<00:00, 2.02it/s]
100% 14/14 [00:03<00:00, 3.92it/s]
[epoch 14] train_loss: 0.816 val_accuracy: 0.724
train epoch[15/30] loss:0.556: 100% 121/121 [00:59<00:00, 2.03it/s]
100% 14/14 [00:03<00:00, 3.79it/s]
[epoch 15] train_loss: 0.766 val_accuracy: 0.738
train epoch[16/30] loss:0.801: 100% 121/121 [00:59<00:00, 2.04it/s]
100% 14/14 [00:03<00:00, 3.59it/s]
[epoch 16] train_loss: 0.802 val_accuracy: 0.663
train epoch[17/30] loss:0.636: 100% 121/121 [00:59<00:00, 2.04it/s]
100% 14/14 [00:04<00:00, 3.35it/s]
[epoch 17] train_loss: 0.754 val_accuracy: 0.754
train epoch[18/30] loss:0.507: 100% 121/121 [01:00<00:00, 2.01it/s]
100% 14/14 [00:03<00:00, 3.64it/s]
[epoch 18] train_loss: 0.729 val_accuracy: 0.740
train epoch[19/30] loss:0.621: 100% 121/121 [00:59<00:00, 2.04it/s]
100% 14/14 [00:03<00:00, 3.58it/s]
[epoch 19] train_loss: 0.735 val_accuracy: 0.759
train epoch[20/30] loss:0.824: 100% 121/121 [00:59<00:00, 2.03it/s]
100% 14/14 [00:04<00:00, 3.44it/s]
[epoch 20] train_loss: 0.697 val_accuracy: 0.747
train epoch[21/30] loss:0.848: 100% 121/121 [00:59<00:00, 2.03it/s]
100% 14/14 [00:05<00:00, 2.58it/s]
[epoch 21] train_loss: 0.706 val_accuracy: 0.770
train epoch[22/30] loss:0.747: 100% 121/121 [00:59<00:00, 2.03it/s]
100% 14/14 [00:03<00:00, 3.55it/s]
[epoch 22] train_loss: 0.678 val_accuracy: 0.759
train epoch[23/30] loss:0.420: 100% 121/121 [00:59<00:00, 2.04it/s]
100% 14/14 [00:04<00:00, 3.45it/s]
[epoch 23] train_loss: 0.652 val_accuracy: 0.761
train epoch[24/30] loss:0.380: 100% 121/121 [00:59<00:00, 2.04it/s]
100% 14/14 [00:03<00:00, 3.63it/s]
[epoch 24] train_loss: 0.660 val_accuracy: 0.759
train epoch[25/30] loss:1.107: 100% 121/121 [00:59<00:00, 2.03it/s]
100% 14/14 [00:04<00:00, 3.13it/s]
[epoch 25] train_loss: 0.653 val_accuracy: 0.778
train epoch[26/30] loss:0.548: 100% 121/121 [00:59<00:00, 2.03it/s]
100% 14/14 [00:03<00:00, 3.60it/s]
[epoch 26] train_loss: 0.627 val_accuracy: 0.770
train epoch[27/30] loss:0.530: 100% 121/121 [00:59<00:00, 2.04it/s]
100% 14/14 [00:03<00:00, 3.59it/s]
[epoch 27] train_loss: 0.644 val_accuracy: 0.770
train epoch[28/30] loss:0.488: 100% 121/121 [00:59<00:00, 2.04it/s]
100% 14/14 [00:03<00:00, 3.72it/s]
[epoch 28] train_loss: 0.610 val_accuracy: 0.787
train epoch[29/30] loss:0.487: 100% 121/121 [01:00<00:00, 2.01it/s]
100% 14/14 [00:04<00:00, 3.50it/s]
[epoch 29] train_loss: 0.590 val_accuracy: 0.763
train epoch[30/30] loss:0.505: 100% 121/121 [00:59<00:00, 2.04it/s]
100% 14/14 [00:03<00:00, 3.75it/s]
[epoch 30] train_loss: 0.609 val_accuracy: 0.808
Finished Training
# /content/drive/MyDrive/deep-learning-for-image-processing-master/pytorch_classification/Test4_googlenet/train.py
%cd "/content/drive/MyDrive/deep-learning-for-image-processing-master/pytorch_classification/Test4_googlenet"
%ls
/content/drive/MyDrive/deep-learning-for-image-processing-master/pytorch_classification/Test4_googlenet
class_indices.json model.py predict.py train.py
!python '/content/drive/MyDrive/deep-learning-for-image-processing-master/pytorch_classification/Test4_googlenet/train.py'
using cuda:0 device.
Using 2 dataloader workers every process
using 3868 images for training, 427 images for validation.
train epoch[1/64] loss:2.411: 100% 16/16 [00:23<00:00, 1.44s/it]
100% 2/2 [00:02<00:00, 1.31s/it]
[epoch 1] train_loss: 5.018 val_accuracy: 0.389
train epoch[2/64] loss:1.711: 100% 16/16 [00:23<00:00, 1.45s/it]
100% 2/2 [00:02<00:00, 1.34s/it]
[epoch 2] train_loss: 2.123 val_accuracy: 0.539
train epoch[3/64] loss:1.510: 100% 16/16 [00:22<00:00, 1.41s/it]
100% 2/2 [00:02<00:00, 1.40s/it]
[epoch 3] train_loss: 1.817 val_accuracy: 0.520
train epoch[4/64] loss:2.249: 100% 16/16 [00:22<00:00, 1.40s/it]
100% 2/2 [00:02<00:00, 1.40s/it]
[epoch 4] train_loss: 1.821 val_accuracy: 0.597
train epoch[5/64] loss:1.562: 100% 16/16 [00:22<00:00, 1.44s/it]
100% 2/2 [00:04<00:00, 2.36s/it]
[epoch 5] train_loss: 1.710 val_accuracy: 0.593
train epoch[6/64] loss:1.582: 100% 16/16 [00:22<00:00, 1.40s/it]
100% 2/2 [00:02<00:00, 1.37s/it]
[epoch 6] train_loss: 1.578 val_accuracy: 0.651
train epoch[7/64] loss:1.163: 100% 16/16 [00:22<00:00, 1.41s/it]
100% 2/2 [00:02<00:00, 1.36s/it]
[epoch 7] train_loss: 1.457 val_accuracy: 0.674
train epoch[8/64] loss:1.430: 100% 16/16 [00:22<00:00, 1.44s/it]
100% 2/2 [00:02<00:00, 1.37s/it]
[epoch 8] train_loss: 1.466 val_accuracy: 0.642
train epoch[9/64] loss:1.180: 100% 16/16 [00:22<00:00, 1.39s/it]
100% 2/2 [00:02<00:00, 1.35s/it]
[epoch 9] train_loss: 1.440 val_accuracy: 0.660
train epoch[10/64] loss:1.068: 100% 16/16 [00:22<00:00, 1.42s/it]
100% 2/2 [00:02<00:00, 1.35s/it]
[epoch 10] train_loss: 1.327 val_accuracy: 0.618
train epoch[11/64] loss:1.482: 100% 16/16 [00:22<00:00, 1.39s/it]
100% 2/2 [00:02<00:00, 1.38s/it]
[epoch 11] train_loss: 1.357 val_accuracy: 0.604
train epoch[12/64] loss:1.660: 100% 16/16 [00:22<00:00, 1.40s/it]
100% 2/2 [00:02<00:00, 1.37s/it]
[epoch 12] train_loss: 1.465 val_accuracy: 0.717
train epoch[13/64] loss:1.518: 100% 16/16 [00:22<00:00, 1.41s/it]
100% 2/2 [00:02<00:00, 1.32s/it]
[epoch 13] train_loss: 1.296 val_accuracy: 0.714
train epoch[14/64] loss:1.100: 100% 16/16 [00:22<00:00, 1.42s/it]
100% 2/2 [00:02<00:00, 1.33s/it]
[epoch 14] train_loss: 1.215 val_accuracy: 0.766
train epoch[15/64] loss:1.881: 100% 16/16 [00:25<00:00, 1.60s/it]
100% 2/2 [00:02<00:00, 1.36s/it]
[epoch 15] train_loss: 1.183 val_accuracy: 0.672
train epoch[16/64] loss:1.119: 100% 16/16 [00:22<00:00, 1.44s/it]
100% 2/2 [00:02<00:00, 1.34s/it]
[epoch 16] train_loss: 1.310 val_accuracy: 0.700
train epoch[17/64] loss:1.028: 100% 16/16 [00:22<00:00, 1.41s/it]
100% 2/2 [00:02<00:00, 1.37s/it]
[epoch 17] train_loss: 1.193 val_accuracy: 0.721
train epoch[18/64] loss:1.090: 100% 16/16 [00:22<00:00, 1.40s/it]
100% 2/2 [00:02<00:00, 1.36s/it]
[epoch 18] train_loss: 1.141 val_accuracy: 0.698
train epoch[19/64] loss:0.692: 100% 16/16 [00:22<00:00, 1.42s/it]
100% 2/2 [00:02<00:00, 1.34s/it]
[epoch 19] train_loss: 1.177 val_accuracy: 0.742
train epoch[20/64] loss:1.115: 100% 16/16 [00:22<00:00, 1.41s/it]
100% 2/2 [00:02<00:00, 1.34s/it]
[epoch 20] train_loss: 1.061 val_accuracy: 0.585
train epoch[21/64] loss:1.457: 100% 16/16 [00:22<00:00, 1.42s/it]
100% 2/2 [00:02<00:00, 1.37s/it]
[epoch 21] train_loss: 1.262 val_accuracy: 0.735
train epoch[22/64] loss:0.905: 100% 16/16 [00:22<00:00, 1.41s/it]
100% 2/2 [00:02<00:00, 1.36s/it]
[epoch 22] train_loss: 1.106 val_accuracy: 0.747
train epoch[23/64] loss:0.857: 100% 16/16 [00:22<00:00, 1.43s/it]
100% 2/2 [00:02<00:00, 1.34s/it]
[epoch 23] train_loss: 1.036 val_accuracy: 0.724
train epoch[24/64] loss:0.910: 100% 16/16 [00:25<00:00, 1.57s/it]
100% 2/2 [00:02<00:00, 1.33s/it]
[epoch 24] train_loss: 1.020 val_accuracy: 0.740
train epoch[25/64] loss:1.008: 100% 16/16 [00:22<00:00, 1.39s/it]
100% 2/2 [00:02<00:00, 1.36s/it]
[epoch 25] train_loss: 1.030 val_accuracy: 0.674
train epoch[26/64] loss:1.175: 100% 16/16 [00:22<00:00, 1.42s/it]
100% 2/2 [00:02<00:00, 1.36s/it]
[epoch 26] train_loss: 1.054 val_accuracy: 0.686
train epoch[27/64] loss:0.620: 100% 16/16 [00:22<00:00, 1.39s/it]
100% 2/2 [00:02<00:00, 1.37s/it]
[epoch 27] train_loss: 1.016 val_accuracy: 0.733
train epoch[28/64] loss:0.860: 100% 16/16 [00:22<00:00, 1.43s/it]
100% 2/2 [00:02<00:00, 1.37s/it]
[epoch 28] train_loss: 0.951 val_accuracy: 0.766
train epoch[29/64] loss:0.746: 100% 16/16 [00:22<00:00, 1.41s/it]
100% 2/2 [00:02<00:00, 1.35s/it]
[epoch 29] train_loss: 0.929 val_accuracy: 0.747
train epoch[30/64] loss:1.387: 100% 16/16 [00:22<00:00, 1.42s/it]
100% 2/2 [00:02<00:00, 1.37s/it]
[epoch 30] train_loss: 0.976 val_accuracy: 0.738
train epoch[31/64] loss:0.824: 100% 16/16 [00:25<00:00, 1.57s/it]
100% 2/2 [00:02<00:00, 1.35s/it]
[epoch 31] train_loss: 0.921 val_accuracy: 0.766
train epoch[32/64] loss:0.996: 100% 16/16 [00:23<00:00, 1.46s/it]
100% 2/2 [00:02<00:00, 1.37s/it]
[epoch 32] train_loss: 0.973 val_accuracy: 0.672
train epoch[33/64] loss:1.363: 100% 16/16 [00:22<00:00, 1.41s/it]
100% 2/2 [00:02<00:00, 1.37s/it]
[epoch 33] train_loss: 1.106 val_accuracy: 0.740
train epoch[34/64] loss:1.219: 100% 16/16 [00:22<00:00, 1.39s/it]
100% 2/2 [00:02<00:00, 1.35s/it]
[epoch 34] train_loss: 0.967 val_accuracy: 0.726
train epoch[35/64] loss:0.863: 100% 16/16 [00:22<00:00, 1.42s/it]
100% 2/2 [00:02<00:00, 1.35s/it]
[epoch 35] train_loss: 1.026 val_accuracy: 0.752
train epoch[36/64] loss:0.723: 100% 16/16 [00:22<00:00, 1.41s/it]
100% 2/2 [00:02<00:00, 1.37s/it]
[epoch 36] train_loss: 0.864 val_accuracy: 0.756
train epoch[37/64] loss:0.785: 100% 16/16 [00:22<00:00, 1.40s/it]
100% 2/2 [00:02<00:00, 1.43s/it]
[epoch 37] train_loss: 0.817 val_accuracy: 0.740
train epoch[38/64] loss:0.574: 100% 16/16 [00:22<00:00, 1.42s/it]
100% 2/2 [00:04<00:00, 2.15s/it]
[epoch 38] train_loss: 0.946 val_accuracy: 0.787
train epoch[39/64] loss:1.012: 100% 16/16 [00:23<00:00, 1.48s/it]
100% 2/2 [00:02<00:00, 1.36s/it]
[epoch 39] train_loss: 0.880 val_accuracy: 0.761
train epoch[40/64] loss:0.936: 100% 16/16 [00:22<00:00, 1.41s/it]
100% 2/2 [00:02<00:00, 1.34s/it]
[epoch 40] train_loss: 0.843 val_accuracy: 0.817
train epoch[41/64] loss:0.640: 100% 16/16 [00:22<00:00, 1.44s/it]
100% 2/2 [00:02<00:00, 1.35s/it]
[epoch 41] train_loss: 0.763 val_accuracy: 0.796
train epoch[42/64] loss:0.833: 100% 16/16 [00:22<00:00, 1.43s/it]
100% 2/2 [00:02<00:00, 1.36s/it]
[epoch 42] train_loss: 0.770 val_accuracy: 0.787
train epoch[43/64] loss:0.655: 100% 16/16 [00:22<00:00, 1.41s/it]
100% 2/2 [00:02<00:00, 1.36s/it]
[epoch 43] train_loss: 0.816 val_accuracy: 0.766
train epoch[44/64] loss:0.646: 100% 16/16 [00:22<00:00, 1.39s/it]
100% 2/2 [00:02<00:00, 1.40s/it]
[epoch 44] train_loss: 0.830 val_accuracy: 0.815
train epoch[45/64] loss:0.748: 100% 16/16 [00:22<00:00, 1.43s/it]
100% 2/2 [00:02<00:00, 1.35s/it]
[epoch 45] train_loss: 0.769 val_accuracy: 0.817
train epoch[46/64] loss:0.792: 100% 16/16 [00:22<00:00, 1.42s/it]
100% 2/2 [00:02<00:00, 1.35s/it]
[epoch 46] train_loss: 0.742 val_accuracy: 0.794
train epoch[47/64] loss:0.554: 100% 16/16 [00:23<00:00, 1.45s/it]
100% 2/2 [00:04<00:00, 2.36s/it]
[epoch 47] train_loss: 0.739 val_accuracy: 0.789
train epoch[48/64] loss:0.321: 100% 16/16 [00:23<00:00, 1.46s/it]
100% 2/2 [00:02<00:00, 1.37s/it]
[epoch 48] train_loss: 0.701 val_accuracy: 0.778
train epoch[49/64] loss:0.836: 100% 16/16 [00:22<00:00, 1.42s/it]
100% 2/2 [00:02<00:00, 1.37s/it]
[epoch 49] train_loss: 0.720 val_accuracy: 0.756
train epoch[50/64] loss:0.345: 100% 16/16 [00:24<00:00, 1.52s/it]
100% 2/2 [00:02<00:00, 1.36s/it]
[epoch 50] train_loss: 0.798 val_accuracy: 0.799
train epoch[51/64] loss:0.420: 100% 16/16 [00:22<00:00, 1.41s/it]
100% 2/2 [00:02<00:00, 1.39s/it]
[epoch 51] train_loss: 0.767 val_accuracy: 0.815
train epoch[52/64] loss:0.714: 100% 16/16 [00:22<00:00, 1.43s/it]
100% 2/2 [00:02<00:00, 1.33s/it]
[epoch 52] train_loss: 0.733 val_accuracy: 0.789
train epoch[53/64] loss:0.989: 100% 16/16 [00:22<00:00, 1.40s/it]
100% 2/2 [00:02<00:00, 1.37s/it]
[epoch 53] train_loss: 0.724 val_accuracy: 0.810
train epoch[54/64] loss:0.623: 100% 16/16 [00:23<00:00, 1.44s/it]
100% 2/2 [00:02<00:00, 1.40s/it]
[epoch 54] train_loss: 0.684 val_accuracy: 0.824
train epoch[55/64] loss:0.752: 100% 16/16 [00:22<00:00, 1.42s/it]
100% 2/2 [00:02<00:00, 1.34s/it]
[epoch 55] train_loss: 0.665 val_accuracy: 0.778
train epoch[56/64] loss:0.630: 100% 16/16 [00:22<00:00, 1.41s/it]
100% 2/2 [00:02<00:00, 1.34s/it]
[epoch 56] train_loss: 0.692 val_accuracy: 0.806
train epoch[57/64] loss:0.579: 100% 16/16 [00:22<00:00, 1.42s/it]
100% 2/2 [00:03<00:00, 1.77s/it]
[epoch 57] train_loss: 0.652 val_accuracy: 0.806
train epoch[58/64] loss:0.640: 100% 16/16 [00:25<00:00, 1.56s/it]
100% 2/2 [00:02<00:00, 1.37s/it]
[epoch 58] train_loss: 0.680 val_accuracy: 0.782
train epoch[59/64] loss:0.799: 100% 16/16 [00:22<00:00, 1.39s/it]
100% 2/2 [00:02<00:00, 1.35s/it]
[epoch 59] train_loss: 0.669 val_accuracy: 0.787
train epoch[60/64] loss:0.784: 100% 16/16 [00:22<00:00, 1.40s/it]
100% 2/2 [00:02<00:00, 1.38s/it]
[epoch 60] train_loss: 0.718 val_accuracy: 0.796
train epoch[61/64] loss:0.454: 100% 16/16 [00:22<00:00, 1.40s/it]
100% 2/2 [00:02<00:00, 1.33s/it]
[epoch 61] train_loss: 0.666 val_accuracy: 0.801
train epoch[62/64] loss:0.754: 100% 16/16 [00:22<00:00, 1.44s/it]
100% 2/2 [00:02<00:00, 1.34s/it]
[epoch 62] train_loss: 0.667 val_accuracy: 0.838
train epoch[63/64] loss:0.257: 100% 16/16 [00:22<00:00, 1.42s/it]
100% 2/2 [00:02<00:00, 1.37s/it]
[epoch 63] train_loss: 0.611 val_accuracy: 0.820
train epoch[64/64] loss:0.315: 100% 16/16 [00:22<00:00, 1.42s/it]
100% 2/2 [00:02<00:00, 1.35s/it]
[epoch 64] train_loss: 0.618 val_accuracy: 0.787
Finished Training