python dot_Python numpy.dot() 使用实例

本文通过多个示例详细介绍了Python中numpy库的dot()函数用法,包括矩阵乘法、PCA降维、反向传播算法等应用场景,帮助读者深入理解numpy.dot()的使用。
摘要由CSDN通过智能技术生成

Example 1

def rhoA(self):

# rhoA

rhoA = pd.DataFrame(0, index=np.arange(1), columns=self.latent)

for i in range(self.lenlatent):

weights = pd.DataFrame(self.outer_weights[self.latent[i]])

weights = weights[(weights.T != 0).any()]

result = pd.DataFrame.dot(weights.T, weights)

result_ = pd.DataFrame.dot(weights, weights.T)

S = self.data_[self.Variables['measurement'][

self.Variables['latent'] == self.latent[i]]]

S = pd.DataFrame.dot(S.T, S) / S.shape[0]

numerador = (

np.dot(np.dot(weights.T, (S - np.diag(np.diag(S)))), weights))

denominador = (

(np.dot(np.dot(weights.T, (result_ - np.diag(np.diag(result_)))), weights)))

rhoA_ = ((result)**2) * (numerador / denominador)

if(np.isnan(rhoA_.values)):

rhoA[self.latent[i]] = 1

else:

rhoA[self.latent[i]] = rhoA_.values

return rhoA.T

Example 2

def PCA(data, num_components=None):

# mean center the data

data -= data.mean(axis=0)

# calculate the covariance matrix

R = np.cov(data, rowvar=False)

# calculate eigenvectors & eigenvalues of the covariance matrix

# use 'eigh' rather than 'eig' since R is symmetric,

# the performance gain is substantial

V, E = np.linalg.eigh(R)

# sort eigenvalue in decreasing order

idx = np.argsort(V)[::-1]

E = E[:,idx]

# sort eigenvectors according to same index

V = V[idx]

# select the first n eigenvectors (n is desired dimension

# of rescaled data array, or dims_rescaled_data)

E = E[:, :num_components]

# carry out the transformation on the data using eigenvectors

# and return the re-scaled data, eigenvalues, and eigenvectors

return np.dot(E.T, data.T).T, V, E

Example 3

def backPropagate(Z1, Z2, y, W2, b2):

## YOUR CODE HERE ##

E2 = 0

E1 = 0

Eb1 = 0

# E2 is the error in output layer. To find it we should exract estimated value from actual output.

# We should find 5 error because there are 5 node in output layer.

E2 = Z2 - y

## E1 is the error in the hidden layer. To find it we should use the error that we found in output layer and the weights between

## output and hidden layer

## We should find 30 error because there are 30 node in hidden layer.

E1 = np.dot(W2, np.transpose(E2))

## Eb1 is the error bias for hidden layer. To find it we should use the error that we found in output layer and the weights between

## output and bias layer

## We should find 1 error because there are 1 bias node in hidden layer.

Eb1 = np.dot(b2, np.transpose(E2))

####################

return E2, E1, Eb1

# calculate the gradients for weights between units and the bias weights

Example 4

def get_nodal_differentiation_matrix(order,

s2c=None,c2s=None,

Dmodal=None):

"""

Returns the differentiation matrix for the first derivative

in the nodal basis

It goes without saying that this differentiation matrix is for the

reference cell.

"""

if Dmodal is None:

Dmodal = get_modal_differentiation_matrix(order)

if s2c is None or c2s is None:

s2c,c2s = get_vandermonde_matrices(order)

return np.dot(s2c,np.dot(Dmodal,c2s))

# ======================================================================

# Operators Outside Reference Cell

# ======================================================================

Example 5

def differentiate(self,grid_func,orderx,ordery):

"""Given a grid function defined on the colocation points,

differentiate it up to the appropriate order in each direction.

"""

assert type(orderx) is int

assert type(ordery) is int

assert orderx >= 0

assert ordery >= 0

if orderx > 0:

df = np.dot(self.stencil_x.PD,grid_func)

return self.differentiate(df,orderx-1,ordery)

if ordery > 0:

df = np.dot(grid_func,self.stencil_y.PD.transpose())

return self.differentiate(df,orderx,ordery-1)

#if orderx == 0 and ordery == 0:

return grid_func

Example 6

def fit(self, graphs, y=None):

rnd = check_random_state(self.random_state)

n_samples = len(graphs)

# get basis vectors

if self.n_components > n_samples:

n_components = n_samples

else:

n_components = self.n_components

n_components = min(n_samples, n_components)

inds = rnd.permutation(n_samples)

basis_inds = inds[:n_components]

basis = []

for ind in basis_inds:

basis.append(graphs[ind])

basis_kernel = self.kernel(basis, basis, **self._get_kernel_params())

# sqrt of kernel matrix on basis vectors

U, S, V = svd(basis_kernel)

S = np.maximum(S, 1e-12)

self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)

self.components_ = basis

self.component_indices_ = inds

return self

Example 7

def _ikf_iteration(self, x, n, ranges, h, H, z, estimate, R):

"""Update tracker based on a multi-range message.

Args:

multi_range_msg (uwb.msg.UWBMultiRangeWithOffsets): ROS multi-range message.

Returns:

new_estimate (StateEstimate): Updated position estimate.

"""

new_position = n[0:3]

self._compute_measurements_and_jacobians(ranges, new_position, h, H, z)

res = z - h

S = np.dot(np.dot(H, estimate.covariance), H.T) + R

K = np.dot(estimate.covariance, self._solve_equation_least_squares(S.T, H).T)

mahalanobis = np.sqrt(np.dot(self._solve_equation_least_squares(S.T, res).T, res))

if res.size not in self.outlier_thresholds:

self.outlier_thresholds[res.size] = scipy.stats.chi2.isf(self.outlier_threshold_quantile, res.size)

outlier_threshold = self.outlier_thresholds[res.size]

if mahalanobis < outlier_threshold:

n = x + np.dot(K, (res - np.dot(H, x - n)))

outlier_flag = False

else:

outlier_flag = True

return n, K, outlier_flag

Example 8

def normalized_distance(_a, _b):

"""Compute normalized distance between two points.

Computes 1 - a * b / ( ||a|| * ||b||)

Args:

_a (numpy.ndarray): array of size m

_b (numpy.ndarray): array of size m

Returns:

normalized distance between signatures (float)

Examples:

>>> a = gis.generate_signature('https://upload.wikimedia.org/wikipedia/commons/thumb/e/ec/Mona_Lisa,_by_Leonardo_da_Vinci,_from_C2RMF_retouched.jpg/687px-Mona_Lisa,_by_Leonardo_da_Vinci,_from_C2RMF_retouched.jpg')

>>> b = gis.generate_signature('https://pixabay.com/static/uploads/photo/2012/11/28/08/56/mona-lisa-67506_960_720.jpg')

>>> gis.normalized_distance(a, b)

0.0332806110382

"""

# return (1.0 - np.dot(_a, _b) / (np.linalg.norm(_a) * np.linalg.norm(_b)))

return np.dot(_a, _b) / (np.linalg.norm(_a) * np.linalg.norm(_b))

Example 9

def observed_perplexity(self, counts):

"""Compute perplexity = exp(entropy) of observed variables.

Perplexity is an information theoretic measure of the number of

clusters or latent classes. Perplexity is a real number in the range

[1, M], where M is model_num_clusters.

Args:

counts: A [V]-shap

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是对代码的注释: ```python import numpy import scipy.special class NeuralNetwork(): def __init__(self,inputnodes,hiddennodes,outputnodes,learningrate): # 初始化神经网络的输入层、隐藏层、输出层的节点数以及学习率 self.inodes = inputnodes self.hnodes = hiddennodes self.onodes = outputnodes self.lr = learningrate # 初始化输入层到隐藏层和隐藏层到输出层的权重 # 对权重进行随机初始化,取值范围为均值为0,标准差为节点数的负平方根 self.wih = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.hnodes, self.inodes)) self.who = numpy.random.normal(0.0, pow(self.onodes, -0.5), (self.onodes, self.hnodes)) # 定义激活函数为 sigmoid 函数 self.activation_function = lambda x: scipy.special.expit(x) def train(self,input_list,target_list): # 将输入列表和目标列表转换为二维数组 inputs = numpy.array(input_list, ndmin=2).T targets = numpy.array(target_list, ndmin=2).T # 计算隐藏层的输入和输出 hidden_inputs = numpy.dot(self.wih, inputs) hidden_outputs = self.activation_function(hidden_inputs) # 计算输出层的输入和输出 final_inputs = numpy.dot(self.who, hidden_outputs) final_outputs = self.activation_function(final_inputs) # 计算输出层误差和隐藏层误差 output_errors = targets - final_outputs hidden_errors = numpy.dot(self.who.T, output_errors) # 更新隐藏层到输出层和输入层到隐藏层的权重 self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)), numpy.transpose(hidden_outputs)) self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)), numpy.transpose(inputs)) def query(self,input_list): # 将输入列表转换为二维数组 inputs = numpy.array(input_list, ndmin=2).T # 计算隐藏层的输入和输出 hidden_inputs = numpy.dot(self.wih, inputs) hidden_outputs = self.activation_function(hidden_inputs) # 计算输出层的输入和输出 final_inputs = numpy.dot(self.who, hidden_outputs) final_outputs = self.activation_function(final_inputs) # 返回最终输出 return final_outputs # 创建神经网络实例 input_nodes = 3 hidden_nodes = 3 output_nodes = 3 learning_rate = 0.3 n = NeuralNetwork(input_nodes,hidden_nodes,output_nodes,learning_rate) # 使用训练数据进行训练 training_data = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]] for data in training_data: n.train(data, data) # 使用测试数据进行测试 test_data = [0.2, 0.5, 0.8] print(n.query(test_data)) ``` 这个神经网络实现了一个简单的自编码器,训练数据和测试数据都是由一些简单的数字组成。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值