python transpose_Python numpy.transpose() 使用实例

这篇博客展示了如何在Python中使用numpy.transpose()函数,包括在图像预处理、数据转换和神经网络中的应用,例如调整图像尺寸、颜色通道转换、数据归一化以及张量转置等操作。
摘要由CSDN通过智能技术生成

The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.

Example 1

def preprocess(image):

"""Takes an image and apply preprocess"""

# ????????????

image = cv2.resize(image, (data_shape, data_shape))

# ?? BGR ? RGB

image = image[:, :, (2, 1, 0)]

# ?mean?????float

image = image.astype(np.float32)

# ? mean

image -= np.array([123, 117, 104])

# ??? [batch-channel-height-width]

image = np.transpose(image, (2, 0, 1))

image = image[np.newaxis, :]

# ?? ndarray

image = nd.array(image)

return image

Example 2

def transform(self, img, lbl):

img = img[:, :, ::-1]

img = img.astype(np.float64)

img -= self.mean

img = m.imresize(img, (self.img_size[0], self.img_size[1]))

# Resize scales images from 0 to 255, thus we need

# to divide by 255.0

img = img.astype(float) / 255.0

# NHWC -> NCWH

img = img.transpose(2, 0, 1)

lbl[lbl==255] = 0

lbl = lbl.astype(float)

lbl = m.imresize(lbl, (self.img_size[0], self.img_size[1]), 'nearest', mode='F')

lbl = lbl.astype(int)

img = torch.from_numpy(img).float()

lbl = torch.from_numpy(lbl).long()

return img, lbl

Example 3

def backPropagate(Z1, Z2, y, W2, b2):

## YOUR CODE HERE ##

E2 = 0

E1 = 0

Eb1 = 0

# E2 is the error in output layer. To find it we should exract estimated value from actual output.

# We should find 5 error because there are 5 node in output layer.

E2 = Z2 - y

## E1 is the error in the hidden layer. To find it we should use the error that we found in output layer and the weights between

## output and hidden layer

## We should find 30 error because there are 30 node in hidden layer.

E1 = np.dot(W2, np.transpose(E2))

## Eb1 is the error bias for hidden layer. To find it we should use the error that we found in output layer and the weights between

## output and bias layer

## We should find 1 error because there are 1 bias node in hidden layer.

Eb1 = np.dot(b2, np.transpose(E2))

####################

return E2, E1, Eb1

# calculate the gradients for weights between units and the bias weights

Example 4

def format_img(img, C):

img_min_side = float(C.im_size)

(height,width,_) = img.shape

if width <= height:

f = img_min_side/width

new_height = int(f * height)

new_width = int(img_min_side)

else:

f = img_min_side/height

new_width = int(f * width)

new_height = int(img_min_side)

fx = width/float(new_width)

fy = height/float(new_height)

img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)

img = img[:, :, (2, 1, 0)]

img = img.astype(np.float32)

img[:, :, 0] -= C.img_channel_mean[0]

img[:, :, 1] -= C.img_channel_mean[1]

img[:, :, 2] -= C.img_channel_mean[2]

img /= C.img_scaling_factor

img = np.transpose(img, (2, 0, 1))

img = np.expand_dims(img, axis=0)

return img, fx, fy

Example 5

def transform(self, img, lbl):

img = img[:, :, ::-1]

img = img.astype(np.float64)

img -= self.mean

img = m.imresize(img, (self.img_size[0], self.img_size[1]))

# Resize scales images from 0 to 255, thus we need

# to divide by 255.0

img = img.astype(float) / 255.0

# NHWC -> NCWH

img = img.transpose(2, 0, 1)

lbl = self.encode_segmap(lbl)

classes = np.unique(lbl)

lbl = lbl.astype(float)

lbl = m.imresize(lbl, (self.img_size[0], self.img_size[1]), 'nearest', mode='F')

lbl = lbl.astype(int)

assert(np.all(classes == np.unique(lbl)))

img = torch.from_numpy(img).float()

lbl = torch.from_numpy(lbl).long()

return img, lbl

Example 6

def calcGrads(X, Z1, Z2, E1, E2, Eb1):

## YOUR CODE HERE ##

d_W1 = 0

d_b1 = 0

d_W2 = 0

d_b2 = 0

## In here we should the derivatives for gradients. To find derivative, we should multiply.

# d_w2 is the derivative for weights between hidden layer and the output layer.

d_W2 = np.dot(np.transpose(E2), Z1)

# d_w1 is the derivative for weights between hidden layer and the input layer.

d_W1 = np.dot(E1, X)

# d_b2 is the derivative for weights between hidden layer bias and the output layer.

d_b2 = np.dot(np.transpose(E2), Eb1)

# d_b1 is the derivative for weights between hidden layer bias and the input layer.

d_b1 = np.dot(np.transpose(E1), 1)

####################

return d_W1, d_W2, d_b1, d_b2

# update the weights between units and the bias weights using a learning rate of alpha

Example 7

def updateWeights(W1, b1, W2, b2, alpha, d_W1, d_W2, d_b1, d_b2):

## YOUR CODE HERE ##

# W1 = 0

# b1 = 0

# W2 = 0

# b2 = 0

## Here we should update weights with usin the result that we found in calcGrads function

## W1 is weights between input and the hidden layer

W1 = W1 - alpha * (np.transpose(d_W1)) # 400*30

## W2 is weights between output and the hidden layer

W2 = W2 - alpha * (np.transpose(d_W2)) # 30*5

## b1 is weights between input bias and the hidden layer

b1 = b1 - alpha * d_b1

## b2 is weights between hidden layer bias and the output layer

b2 = b2 - alpha * (np.transpose(d_b2))

####################

return W1, b1, W2, b2

Example 8

def make_heatmaps_from_joints(input_size, heatmap_size, gaussian_variance, batch_joints):

# Generate ground-truth heatmaps from ground-truth 2d joints

scale_factor = input_size // heatmap_size

batch_gt_heatmap_np = []

for i in range(batch_joints.shape[0]):

gt_heatmap_np = []

invert_heatmap_np = np.ones(shape=(heatmap_size, heatmap_size))

for j in range(batch_joints.shape[1]):

cur_joint_heatmap = make_gaussian(heatmap_size,

gaussian_variance,

center=(batch_joints[i][j] // scale_factor))

gt_heatmap_np.append(cur_joint_heatmap)

invert_heatmap_np -= cur_joint_heatmap

gt_heatmap_np.append(invert_heatmap_np)

batch_gt_heatmap_np.append(gt_heatmap_np)

batch_gt_heatmap_np = np.asarray(batch_gt_heatmap_np)

batch_gt_heatmap_np = np.transpose(batch_gt_heatmap_np, (0, 2, 3, 1))

return batch_gt_heatmap_np

Example 9

def af_h5_to_np(input_path, outpath):

files = tables.open_file(input_path, mode = 'r+')

speaker_nodes = files.root._f_list_nodes()

for spk in speaker_nodes:

file_nodes = spk._f_list_nodes()

for fls in file_nodes:

file_name = fls._v_name

af_nodes = fls._f_list_nodes()

af_list = []

for fts in af_nodes:

features = fts[:]

mean = numpy.mean(features,1)

normalised_feats = list(numpy.transpose(features)/mean)

af_list += normalised_feats

numpy.save(outpath + file_name, numpy.array(af_list))

Example 10

def mahalanobis_distance(difference, num_random_features):

num_samples, _ = np.shape(difference)

sigma = np.cov(np.transpose(difference))

mu = np.mean(difference, 0)

if num_random_features == 1:

stat = float(num_samples * mu ** 2) / float(sigma)

else:

try:

linalg.inv(sigma)

except LinAlgError:

print('covariance matrix is singular. Pvalue returned is 1.1')

warnings.warn('covariance matrix is singular. Pvalue returned is 1.1')

return 0

stat = num_samples * mu.dot(linalg.solve(sigma, np.transpose(mu)))

return chi2.sf(stat, num_random_features)

Example 11

def sumIntensitiesMeme(

self,

t,

m,

node_vec,

etimes,

filterlatertimes=True,

):

if filterlatertimes:</

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是对代码的注释: ```python import numpy import scipy.special class NeuralNetwork(): def __init__(self,inputnodes,hiddennodes,outputnodes,learningrate): # 初始化神经网络的输入层、隐藏层、输出层的节点数以及学习率 self.inodes = inputnodes self.hnodes = hiddennodes self.onodes = outputnodes self.lr = learningrate # 初始化输入层到隐藏层和隐藏层到输出层的权重 # 对权重进行随机初始化,取值范围为均值为0,标准差为节点数的负平方根 self.wih = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.hnodes, self.inodes)) self.who = numpy.random.normal(0.0, pow(self.onodes, -0.5), (self.onodes, self.hnodes)) # 定义激活函数为 sigmoid 函数 self.activation_function = lambda x: scipy.special.expit(x) def train(self,input_list,target_list): # 将输入列表和目标列表转换为二维数组 inputs = numpy.array(input_list, ndmin=2).T targets = numpy.array(target_list, ndmin=2).T # 计算隐藏层的输入和输出 hidden_inputs = numpy.dot(self.wih, inputs) hidden_outputs = self.activation_function(hidden_inputs) # 计算输出层的输入和输出 final_inputs = numpy.dot(self.who, hidden_outputs) final_outputs = self.activation_function(final_inputs) # 计算输出层误差和隐藏层误差 output_errors = targets - final_outputs hidden_errors = numpy.dot(self.who.T, output_errors) # 更新隐藏层到输出层和输入层到隐藏层的权重 self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)), numpy.transpose(hidden_outputs)) self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)), numpy.transpose(inputs)) def query(self,input_list): # 将输入列表转换为二维数组 inputs = numpy.array(input_list, ndmin=2).T # 计算隐藏层的输入和输出 hidden_inputs = numpy.dot(self.wih, inputs) hidden_outputs = self.activation_function(hidden_inputs) # 计算输出层的输入和输出 final_inputs = numpy.dot(self.who, hidden_outputs) final_outputs = self.activation_function(final_inputs) # 返回最终输出 return final_outputs # 创建神经网络实例 input_nodes = 3 hidden_nodes = 3 output_nodes = 3 learning_rate = 0.3 n = NeuralNetwork(input_nodes,hidden_nodes,output_nodes,learning_rate) # 使用训练数据进行训练 training_data = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]] for data in training_data: n.train(data, data) # 使用测试数据进行测试 test_data = [0.2, 0.5, 0.8] print(n.query(test_data)) ``` 这个神经网络实现了一个简单的自编码器,训练数据和测试数据都是由一些简单的数字组成。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值