Pytorch——SummaryWriter.add_image()的使用

用法:

import cv2
from PIL import Image
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms


# 绝对路径 F:\PycharmProjects\Learning1011\dataset\train\ants\0013035.jpg
# 相对路径 dataset/train/ants/0013035.jpg

img_path = "dataset/train/ants/0013035.jpg" #   图片路径
# 读取图片的两种方式
img = Image.open(img_path)  #   方式一
# img_PIL= cv2.imread(img_path)   # 方式二


writer = SummaryWriter("logs")  #   文件夹“logs”

# 1、transforms该如何使用(pytorch)
tensor_trans = transforms.ToTensor()    #  从transforms”工具箱“中选择一个class进行创建“自己的工具”
tensor_img = tensor_trans(img)  #   根据class需要的东西传入进去,返回一个结果

writer.add_image("Tensor_img", tensor_img)
# 参数("显示图像的窗口名",显示图片的名字,1表示一张图片,两张图片为2..
writer.close()  #   关闭

显示 :

1、在控制台输入:

tensorboard --logdir=logs#=文件名

2、 修改端口号方式:

 tensorboard --logdir=logs --port=6007#自己制定显示的端口号

在这里插入图片描述

结果:

在这里插入图片描述

import jittor as jt import jrender as jr jt.flags.use_cuda = 1 # 开启GPU加速 import os import tqdm import numpy as np import imageio import argparse # 获取当前文件所在目录路径和数据目录路径 current_dir = os.path.dirname(os.path.realpath(__file__)) data_dir = os.path.join(current_dir, 'data') def main(): # 创建命令行参数解析器 parser = argparse.ArgumentParser() parser.add_argument('-i', '--filename-input', type=str, default=os.path.join(data_dir, 'obj/spot/spot_triangulated.obj')) parser.add_argument('-o', '--output-dir', type=str, default=os.path.join(data_dir, 'results/output_render')) args = parser.parse_args() # other settings camera_distance = 2.732 elevation = 30 azimuth = 0 # load from Wavefront .obj file mesh = jr.Mesh.from_obj(args.filename_input, load_texture=True, texture_res=5, texture_type='surface', dr_type='softras') # create renderer with SoftRas renderer = jr.Renderer(dr_type='softras') os.makedirs(args.output_dir, exist_ok=True) # draw object from different view loop = tqdm.tqdm(list(range(0, 360, 4))) writer = imageio.get_writer(os.path.join(args.output_dir, 'rotation.gif'), mode='I') imgs = [] from PIL import Image for num, azimuth in enumerate(loop): # rest mesh to initial state mesh.reset_() loop.set_description('Drawing rotation') renderer.transform.set_eyes_from_angles(camera_distance, elevation, azimuth) rgb = renderer.render_mesh(mesh, mode='rgb') image = rgb.numpy()[0].transpose((1, 2, 0)) writer.append_data((255*image).astype(np.uint8)) writer.close() # draw object from different sigma and gamma loop = tqdm.tqdm(list(np.arange(-4, -2, 0.2))) renderer.transform.set_eyes_from_angles(camera_distance, elevation, 45) writer = imageio.get_writer(os.path.join(args.output_dir, 'bluring.gif'), mode='I') for num, gamma_pow in enumerate(loop): # rest mesh to initial state mesh.reset_() renderer.set_gamma(10**gamma_pow) renderer.set_sigma(10**(gamma_pow - 1)) loop.set_description('Drawing blurring') images = renderer.render_mesh(mesh, mode='rgb') image = images.numpy()[0].transpose((1, 2, 0)) # [image_size, image_size, RGB] writer.append_data((255*image).astype(np.uint8)) writer.close() # save to textured obj mesh.reset_() mesh.save_obj(os.path.join(args.output_dir, 'saved_spot.obj')) if __name__ == '__main__': main()在每行代码后添加注释
最新发布
06-07
# 引入所需的库 import jittor as jt import jrender as jr jt.flags.use_cuda = 1 # 开启GPU加速 import os import tqdm import numpy as np import imageio import argparse # 获取当前文件所在目录路径和数据目录路径 current_dir = os.path.dirname(os.path.realpath(__file__)) data_dir = os.path.join(current_dir, 'data') def main(): # 创建命令行参数解析器 parser = argparse.ArgumentParser() parser.add_argument('-i', '--filename-input', type=str, default=os.path.join(data_dir, 'obj/spot/spot_triangulated.obj')) # 输入文件路径 parser.add_argument('-o', '--output-dir', type=str, default=os.path.join(data_dir, 'results/output_render')) # 输出文件路径 args = parser.parse_args() # other settings camera_distance = 2.732 # 相机距离 elevation = 30 # 抬高角度 azimuth = 0 # 方位角度 # load from Wavefront .obj file mesh = jr.Mesh.from_obj(args.filename_input, load_texture=True, texture_res=5, texture_type='surface', dr_type='softras') # 从.obj文件载入模型 # create renderer with SoftRas renderer = jr.Renderer(dr_type='softras') # 创建渲染器 os.makedirs(args.output_dir, exist_ok=True) # draw object from different view loop = tqdm.tqdm(list(range(0, 360, 4))) # 视角变换循环 writer = imageio.get_writer(os.path.join(args.output_dir, 'rotation.gif'), mode='I') # 创建gif文件 imgs = [] from PIL import Image for num, azimuth in enumerate(loop): # rest mesh to initial state mesh.reset_() # 重置模型状态 loop.set_description('Drawing rotation') renderer.transform.set_eyes_from_angles(camera_distance, elevation, azimuth) # 设置相机位置和角度 rgb = renderer.render_mesh(mesh, mode='rgb') # 渲染模型 image = rgb.numpy()[0].transpose((1, 2, 0)) # 转置图片通道 writer.append_data((255*image).astype(np.uint8)) # 写入gif文件 writer.close() # draw object from different sigma and gamma loop = tqdm.tqdm(list(np.arange(-4, -2, 0.2))) # 模糊循环 renderer.transform.set_eyes_from_angles(camera_distance, elevation, 45) # 设置相机位置和角度 writer = imageio.get_writer(os.path.join(args.output_dir, 'bluring.gif'), mode='I') # 创建gif文件 for num, gamma_pow in enumerate(loop): # rest mesh to initial state mesh.reset_() # 重置模型状态 renderer.set_gamma(10**gamma_pow) # 设置gamma值 renderer.set_sigma(10**(gamma_pow - 1)) # 设置sigma值 loop.set_description('Drawing blurring') images = renderer.render_mesh(mesh, mode='rgb') # 渲染模型 image = images.numpy()[0].transpose((1, 2, 0)) # [image_size, image_size, RGB] writer.append_data((255*image).astype(np.uint8)) # 写入gif文件 writer.close() # save to textured obj mesh.reset_() # 重置模型状态 mesh.save_obj(os.path.join(args.output_dir, 'saved_spot.obj')) # 保存模型 if __name__ == '__main__': main()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值