自动生成图像标签代码

  1.   %%  
        %图片保存路径为:  
        %E:\image\car  
        %E:\image\person  
        %car和person是保存车和行人的文件夹  
        %这些文件夹还可以有多个,  
        %放在image文件夹里就行  
        %该代码的作用是将图片名字改成000123.jpg这种形式  
        %%  
        %clc;  
       % clear;  
          
       maindir='/home/cjj/downloads/re/re/';  
       %   maindir='/media/cjj/9EB23D0DB23CEB81/re/re/';
        name_long=5; %图片名字的长度,如000123.jpg为6,最多9位,可修改  
        num_begin=1; %图像命名开始的数字如000123.jpg开始的话就是123  
          
        subdir = dir(maindir);  
        n=1;  
          
        for i = 1:length(subdir)  
          if ~strcmp(subdir(i).name ,'.') && ~strcmp(subdir(i).name,'..')  
             subsubdir = dir(strcat(maindir,subdir(i).name));  
            for j=1:length(subsubdir)  
                 if ~strcmp(subsubdir(j).name ,'.') && ~strcmp(subsubdir(j).name,'..')  
                    img=imread([maindir,subdir(i).name,'/',subsubdir(j).name]);  
                    imshow(img);  
                    str=num2str(num_begin,'%09d');  
                    newname=strcat(str,'.jpg');  
                    newname=newname(end-(name_long+3):end);  
                    system(['rename ' [maindir,subdir(i).name,'/',subsubdir(j).name] ' ' newname]);  
                    num_begin=num_begin+1;  
                    fprintf('当前处理文件夹%s',subdir(i).name);  
                    fprintf('已经处理%d张图片\n',n);  
                    n=n+1;  
                   pause(0.1);%可以将暂停去掉  
                 end  
            end  
          end  
        end 

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
ACGAN(Auxiliary Classifier GAN)是一种生成对抗网络,它结合了分类器和生成器,可以生成具有特定属性的图像。下面是一个基于ACGAN的动漫头像生成代码和数据集。 代码: ```python import os import numpy as np import tensorflow as tf from tensorflow.keras import layers import matplotlib.pyplot as plt import cv2 # 超参数设置 latent_dim = 100 num_classes = 10 width = 64 height = 64 channels = 3 batch_size = 64 epochs = 50 img_dir = "anime_faces" # 加载数据集 def load_data(): images = [] for filename in os.listdir(img_dir): img = cv2.imread(os.path.join(img_dir, filename)) img = cv2.resize(img, (width, height)) images.append(img) return np.array(images, dtype="float32") / 255.0 # 构建生成器 def build_generator(): model = tf.keras.Sequential() model.add(layers.Dense(4 * 4 * 256, use_bias=False, input_shape=(latent_dim + num_classes,))) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Reshape((4, 4, 256))) assert model.output_shape == (None, 4, 4, 256) # 注意:batch size 没有限制 model.add(layers.Conv2DTranspose(128, (5, 5), strides=(2, 2), padding='same', use_bias=False)) assert model.output_shape == (None, 8, 8, 128) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)) assert model.output_shape == (None, 16, 16, 64) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Conv2DTranspose(channels, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')) assert model.output_shape == (None, height, width, channels) return model # 构建判别器 def build_discriminator(): model = tf.keras.Sequential() model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[height, width, channels + num_classes])) model.add(layers.LeakyReLU()) model.add(layers.Dropout(0.3)) model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same')) model.add(layers.LeakyReLU()) model.add(layers.Dropout(0.3)) model.add(layers.Flatten()) model.add(layers.Dense(1)) return model # 定义生成器和判别器 generator = build_generator() discriminator = build_discriminator() # 定义生成器和判别器的优化器 generator_optimizer = tf.keras.optimizers.Adam(1e-4) discriminator_optimizer = tf.keras.optimizers.Adam(1e-4) # 定义损失函数 cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True) categorical_crossentropy = tf.keras.losses.CategoricalCrossentropy(from_logits=True) # 定义训练过程 @tf.function def train_step(images, labels): # 生成随机噪声 noise = tf.random.normal([batch_size, latent_dim]) # 添加标签信息 noise = tf.concat([noise, labels], axis=1) with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: # 生成图片 generated_images = generator(noise, training=True) # 真实图片和生成图片的标签 real_labels = tf.ones((batch_size, 1)) fake_labels = tf.zeros((batch_size, 1)) real_and_labels = tf.concat([images, labels], axis=3) fake_and_labels = tf.concat([generated_images, labels], axis=3) # 判别器判别真实图片 real_discrimination = discriminator(real_and_labels, training=True) # 判别器判别生成图片 fake_discrimination = discriminator(fake_and_labels, training=True) # 计算判别器损失 real_discriminator_loss = cross_entropy(real_labels, real_discrimination) fake_discriminator_loss = cross_entropy(fake_labels, fake_discrimination) discriminator_loss = real_discriminator_loss + fake_discriminator_loss # 计算生成器损失 generator_loss = categorical_crossentropy(labels, fake_discrimination) # 计算生成器和判别器的梯度 generator_gradients = gen_tape.gradient(generator_loss, generator.trainable_variables) discriminator_gradients = disc_tape.gradient(discriminator_loss, discriminator.trainable_variables) # 更新生成器和判别器的参数 generator_optimizer.apply_gradients(zip(generator_gradients, generator.trainable_variables)) discriminator_optimizer.apply_gradients(zip(discriminator_gradients, discriminator.trainable_variables)) return generator_loss, discriminator_loss # 训练模型 def train(dataset, epochs): for epoch in range(epochs): for i in range(dataset.shape[0] // batch_size): # 获取真实图片 images = dataset[i * batch_size:(i + 1) * batch_size] # 获取真实标签 labels = tf.one_hot(np.random.randint(0, num_classes, batch_size), depth=num_classes) # 训练一次 generator_loss, discriminator_loss = train_step(images, labels) # 每10次迭代输出一次结果 if i % 10 == 0: print("Epoch: %d, Iteration: %d, Generator Loss: %f, Discriminator Loss: %f" % (epoch, i, generator_loss, discriminator_loss)) # 加载数据集 dataset = load_data() # 训练模型 train(dataset, epochs) # 生成新图片 def generate_images(num_images): # 生成随机噪声 noise = tf.random.normal([num_images, latent_dim]) # 随机标签 labels = tf.one_hot(np.random.randint(0, num_classes, num_images), depth=num_classes) # 添加标签信息 noise = tf.concat([noise, labels], axis=1) # 生成图片 generated_images = generator(noise, training=False) # 显示图片 fig = plt.figure(figsize=(4, 4)) for i in range(num_images): plt.subplot(4, 4, i+1) plt.imshow(generated_images[i, :, :, :]) plt.axis('off') plt.show() # 生成10张新图片 generate_images(10) ``` 数据集: 你可以在以下链接中找到适合ACGAN生成动漫头像的数据集: https://www.kaggle.com/splcher/animefacedataset 将数据集下载解压后,将其中的动漫头像图片放入一个文件夹中,例如 "anime_faces"。 注意:数据集中图片的尺寸可能不一致,需要将其缩放到统一的尺寸。在代码中,我们将图片缩放到了 64x64 的尺寸,你可以根据需要进行调整。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值