python dot_Python numpy.dot() 使用实例

本文通过多个示例详细介绍了Python中numpy库的dot()函数用法,包括矩阵乘法、PCA降维、反向传播算法等应用场景,帮助读者深入理解numpy.dot()的使用。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

Example 1

def rhoA(self):

# rhoA

rhoA = pd.DataFrame(0, index=np.arange(1), columns=self.latent)

for i in range(self.lenlatent):

weights = pd.DataFrame(self.outer_weights[self.latent[i]])

weights = weights[(weights.T != 0).any()]

result = pd.DataFrame.dot(weights.T, weights)

result_ = pd.DataFrame.dot(weights, weights.T)

S = self.data_[self.Variables['measurement'][

self.Variables['latent'] == self.latent[i]]]

S = pd.DataFrame.dot(S.T, S) / S.shape[0]

numerador = (

np.dot(np.dot(weights.T, (S - np.diag(np.diag(S)))), weights))

denominador = (

(np.dot(np.dot(weights.T, (result_ - np.diag(np.diag(result_)))), weights)))

rhoA_ = ((result)**2) * (numerador / denominador)

if(np.isnan(rhoA_.values)):

rhoA[self.latent[i]] = 1

else:

rhoA[self.latent[i]] = rhoA_.values

return rhoA.T

Example 2

def PCA(data, num_components=None):

# mean center the data

data -= data.mean(axis=0)

# calculate the covariance matrix

R = np.cov(data, rowvar=False)

# calculate eigenvectors & eigenvalues of the covariance matrix

# use 'eigh' rather than 'eig' since R is symmetric,

# the performance gain is substantial

V, E = np.linalg.eigh(R)

# sort eigenvalue in decreasing order

idx = np.argsort(V)[::-1]

E = E[:,idx]

# sort eigenvectors according to same index

V = V[idx]

# select the first n eigenvectors (n is desired dimension

# of rescaled data array, or dims_rescaled_data)

E = E[:, :num_components]

# carry out the transformation on the data using eigenvectors

# and return the re-scaled data, eigenvalues, and eigenvectors

return np.dot(E.T, data.T).T, V, E

Example 3

def backPropagate(Z1, Z2, y, W2, b2):

## YOUR CODE HERE ##

E2 = 0

E1 = 0

Eb1 = 0

# E2 is the error in output layer. To find it we should exract estimated value from actual output.

# We should find 5 error because there are 5 node in output layer.

E2 = Z2 - y

## E1 is the error in the hidden layer. To find it we should use the error that we found in output layer and the weights between

## output and hidden layer

## We should find 30 error because there are 30 node in hidden layer.

E1 = np.dot(W2, np.transpose(E2))

## Eb1 is the error bias for hidden layer. To find it we should use the error that we found in output layer and the weights between

## output and bias layer

## We should find 1 error because there are 1 bias node in hidden layer.

Eb1 = np.dot(b2, np.transpose(E2))

####################

return E2, E1, Eb1

# calculate the gradients for weights between units and the bias weights

Example 4

def get_nodal_differentiation_matrix(order,

s2c=None,c2s=None,

Dmodal=None):

"""

Returns the differentiation matrix for the first derivative

in the nodal basis

It goes without saying that this differentiation matrix is for the

reference cell.

"""

if Dmodal is None:

Dmodal = get_modal_differentiation_matrix(order)

if s2c is None or c2s is None:

s2c,c2s = get_vandermonde_matrices(order)

return np.dot(s2c,np.dot(Dmodal,c2s))

# ======================================================================

# Operators Outside Reference Cell

# ======================================================================

Example 5

def differentiate(self,grid_func,orderx,ordery):

"""Given a grid function defined on the colocation points,

differentiate it up to the appropriate order in each direction.

"""

assert type(orderx) is int

assert type(ordery) is int

assert orderx >= 0

assert ordery >= 0

if orderx > 0:

df = np.dot(self.stencil_x.PD,grid_func)

return self.differentiate(df,orderx-1,ordery)

if ordery > 0:

df = np.dot(grid_func,self.stencil_y.PD.transpose())

return self.differentiate(df,orderx,ordery-1)

#if orderx == 0 and ordery == 0:

return grid_func

Example 6

def fit(self, graphs, y=None):

rnd = check_random_state(self.random_state)

n_samples = len(graphs)

# get basis vectors

if self.n_components > n_samples:

n_components = n_samples

else:

n_components = self.n_components

n_components = min(n_samples, n_components)

inds = rnd.permutation(n_samples)

basis_inds = inds[:n_components]

basis = []

for ind in basis_inds:

basis.append(graphs[ind])

basis_kernel = self.kernel(basis, basis, **self._get_kernel_params())

# sqrt of kernel matrix on basis vectors

U, S, V = svd(basis_kernel)

S = np.maximum(S, 1e-12)

self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)

self.components_ = basis

self.component_indices_ = inds

return self

Example 7

def _ikf_iteration(self, x, n, ranges, h, H, z, estimate, R):

"""Update tracker based on a multi-range message.

Args:

multi_range_msg (uwb.msg.UWBMultiRangeWithOffsets): ROS multi-range message.

Returns:

new_estimate (StateEstimate): Updated position estimate.

"""

new_position = n[0:3]

self._compute_measurements_and_jacobians(ranges, new_position, h, H, z)

res = z - h

S = np.dot(np.dot(H, estimate.covariance), H.T) + R

K = np.dot(estimate.covariance, self._solve_equation_least_squares(S.T, H).T)

mahalanobis = np.sqrt(np.dot(self._solve_equation_least_squares(S.T, res).T, res))

if res.size not in self.outlier_thresholds:

self.outlier_thresholds[res.size] = scipy.stats.chi2.isf(self.outlier_threshold_quantile, res.size)

outlier_threshold = self.outlier_thresholds[res.size]

if mahalanobis < outlier_threshold:

n = x + np.dot(K, (res - np.dot(H, x - n)))

outlier_flag = False

else:

outlier_flag = True

return n, K, outlier_flag

Example 8

def normalized_distance(_a, _b):

"""Compute normalized distance between two points.

Computes 1 - a * b / ( ||a|| * ||b||)

Args:

_a (numpy.ndarray): array of size m

_b (numpy.ndarray): array of size m

Returns:

normalized distance between signatures (float)

Examples:

>>> a = gis.generate_signature('https://upload.wikimedia.org/wikipedia/commons/thumb/e/ec/Mona_Lisa,_by_Leonardo_da_Vinci,_from_C2RMF_retouched.jpg/687px-Mona_Lisa,_by_Leonardo_da_Vinci,_from_C2RMF_retouched.jpg')

>>> b = gis.generate_signature('https://pixabay.com/static/uploads/photo/2012/11/28/08/56/mona-lisa-67506_960_720.jpg')

>>> gis.normalized_distance(a, b)

0.0332806110382

"""

# return (1.0 - np.dot(_a, _b) / (np.linalg.norm(_a) * np.linalg.norm(_b)))

return np.dot(_a, _b) / (np.linalg.norm(_a) * np.linalg.norm(_b))

Example 9

def observed_perplexity(self, counts):

"""Compute perplexity = exp(entropy) of observed variables.

Perplexity is an information theoretic measure of the number of

clusters or latent classes. Perplexity is a real number in the range

[1, M], where M is model_num_clusters.

Args:

counts: A [V]-shaped array of multinomial counts.

Returns:

A [V]-shaped numpy array of perplexity.

"""

V, E, M, R = self._VEMR

if counts is not None:

counts = np.ones(V, dtype=np.int8)

assert counts.shape == (V, )

assert counts.dtype == np.int8

assert np.all(counts > 0)

observed_entropy = np.empty(V, dtype=np.float32)

for v in range(V):

beg, end = self._ragged_index[v:v + 2]

probs = np.dot(self._feat_cond[beg:end, :], self._vert_probs[v, :])

observed_entropy[v] = multinomial_entropy(probs, counts[v])

return np.exp(observed_entropy)

Example 10

def get_covariance(self):

"""Compute data covariance with the generative model.

``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``

where S**2 contains the explained variances.

Returns

-------

cov : array, shape=(n_features, n_features)

Estimated covariance of data.

"""

components_ = self.components_

exp_var = self.explained_variance_

if self.whiten:

components_ = components_ * np.sqrt(exp_var[:, np.newaxis])

exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)

cov = np.dot(components_.T * exp_var_diff, components_)

cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace

return cov

Example 11

def transform(self, X):

"""Apply the dimensionality reduction on X.

X is projected on the first principal components previous extracted

from a training set.

Parameters

----------

X : array-like, shape (n_samples, n_features)

New data, where n_samples is the number of samples

and n_features is the number of features.

Returns

-------

X_new : array-like, shape (n_samples, n_components)

"""

check_is_fitted(self, 'mean_')

X = check_array(X)

if self.mean_ is not None:

X = X - self.mean_

X_transformed = np.dot(X, self.components_.T)

if self.whiten:

X_transformed /= np.sqrt(self.explained_variance_)

return X_transformed

Example 12

def score_samples(self, X):

"""Return the log-likelihood of each sample

See. "Pattern Recognition and Machine Learning"

by C. Bishop, 12.2.1 p. 574

or http://www.miketipping.com/papers/met-mppca.pdf

Parameters

----------

X: array, shape(n_samples, n_features)

The data.

Returns

-------

ll: array, shape (n_samples,)

Log-likelihood of each sample under the current model

"""

check_is_fitted(self, 'mean_')

X = check_array(X)

Xr = X - self.mean_

n_features = X.shape[1]

log_like = np.zeros(X.shape[0])

precision = self.get_precision()

log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)

log_like -= .5 * (n_features * log(2. * np.pi)

- fast_logdet(precision))

return log_like

Example 13

def optSigma2(self, U, s, y, covars, logdetXX, reml, ldeltamin=-5, ldeltamax=5):

#Prepare required matrices

Uy = U.T.dot(y).flatten()

UX = U.T.dot(covars)

if (U.shape[1] < U.shape[0]):

UUX = covars - U.dot(UX)

UUy = y - U.dot(Uy)

UUXUUX = UUX.T.dot(UUX)

UUXUUy = UUX.T.dot(UUy)

UUyUUy = UUy.T.dot(UUy)

else: UUXUUX, UUXUUy, UUyUUy = None, None, None

numIndividuals = U.shape[0]

ldeltaopt_glob = optimize.minimize_scalar(self.negLLevalLong, bounds=(-5, 5), method='Bounded', args=(s, Uy, UX, logdetXX, UUXUUX, UUXUUy, UUyUUy, numIndividuals, reml)).x

ll, sig2g, beta, r2 = self.negLLevalLong(ldeltaopt_glob, s, Uy, UX, logdetXX, UUXUUX, UUXUUy, UUyUUy, numIndividuals, reml, returnAllParams=True)

sig2e = np.exp(ldeltaopt_glob) * sig2g

return sig2g, sig2e, beta, ll

Example 14

def BorthogonalityTest(B, U):

"""

Test the frobenious norm of U^TBU - I_k

"""

BU = np.zeros(U.shape)

Bu = Vector()

u = Vector()

B.init_vector(Bu,0)

B.init_vector(u,1)

nvec = U.shape[1]

for i in range(0,nvec):

u.set_local(U[:,i])

B.mult(u,Bu)

BU[:,i] = Bu.get_local()

UtBU = np.dot(U.T, BU)

err = UtBU - np.eye(nvec, dtype=UtBU.dtype)

print("|| UtBU - I ||_F = ", np.linalg.norm(err, 'fro') )

Example 15

def _ireduce_linalg(arrays, func, **kwargs):

"""

Yield the cumulative reduction of a linag algebra function

"""

arrays = iter(arrays)

first = next(arrays)

second = next(arrays)

func = partial(func, **kwargs)

accumulator = func(first, second)

yield accumulator

for array in arrays:

# For some reason, np.dot(..., out = accumulator) did not produce results

# that were equal to numpy.linalg.multi_dot

func(accumulator, array, out = accumulator)

yield accumulator

Example 16

def idot(arrays):

"""

Yields the cumulative array inner product (dot product) of arrays.

Parameters

----------

arrays : iterable

Arrays to be reduced.

Yields

------

online_dot : ndarray

See Also

--------

numpy.linalg.multi_dot : Compute the dot product of two or more arrays in a single function call,

while automatically selecting the fastest evaluation order.

"""

yield from _ireduce_linalg(arrays, np.dot)

Example 17

def itensordot(arrays, axes = 2):

"""

Yields the cumulative array inner product (dot product) of arrays.

Parameters

----------

arrays : iterable

Arrays to be reduced.

axes : int or (2,) array_like

* integer_like: If an int N, sum over the last N axes of a

and the first N axes of b in order. The sizes of the corresponding axes must match.

* (2,) array_like: Or, a list of axes to be summed over, first sequence applying to a,

second to b. Both elements array_like must be of the same length.

Yields

------

online_tensordot : ndarray

See Also

--------

numpy.tensordot : Compute the tensordot on two tensors.

"""

yield from _ireduce_linalg(arrays, np.tensordot, axes = axes)

Example 18

def _convert(matrix, arr):

"""Do the color space conversion.

Parameters

----------

matrix : array_like

The 3x3 matrix to use.

arr : array_like

The input array.

Returns

-------

out : ndarray, dtype=float

The converted array.

"""

arr = _prepare_colorarray(arr)

arr = np.swapaxes(arr, 0, -1)

oldshape = arr.shape

arr = np.reshape(arr, (3, -1))

out = np.dot(matrix, arr)

out.shape = oldshape

out = np.swapaxes(out, -1, 0)

return np.ascontiguousarray(out)

Example 19

def rotate_point_cloud(batch_data):

""" Randomly rotate the point clouds to augument the dataset

rotation is per shape based along up direction

Input:

BxNx3 array, original batch of point clouds

Return:

BxNx3 array, rotated batch of point clouds

"""

rotated_data = np.zeros(batch_data.shape, dtype=np.float32)

for k in range(batch_data.shape[0]):

rotation_angle = np.random.uniform() * 2 * np.pi

cosval = np.cos(rotation_angle)

sinval = np.sin(rotation_angle)

rotation_matrix = np.array([[cosval, 0, sinval],

[0, 1, 0],

[-sinval, 0, cosval]])

shape_pc = batch_data[k, ...]

rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)

return rotated_data

Example 20

def rotate_point_cloud_by_angle(batch_data, rotation_angle):

""" Rotate the point cloud along up direction with certain angle.

Input:

BxNx3 array, original batch of point clouds

Return:

BxNx3 array, rotated batch of point clouds

"""

rotated_data = np.zeros(batch_data.shape, dtype=np.float32)

for k in range(batch_data.shape[0]):

#rotation_angle = np.random.uniform() * 2 * np.pi

cosval = np.cos(rotation_angle)

sinval = np.sin(rotation_angle)

rotation_matrix = np.array([[cosval, 0, sinval],

[0, 1, 0],

[-sinval, 0, cosval]])

shape_pc = batch_data[k, ...]

rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)

return rotated_data

Example 21

def computeStep(X, y, theta):

'''YOUR CODE HERE'''

function_result = np.array([0,0], dtype= np.float)

m = float(len(X))

d1 = 0.0

d2 = 0.0

for i in range(len(X)):

h1 = np.dot(theta.transpose(), X[i])

c1 = h1 - y[i]

d1 = d1 + c1

j1 = d1/m

for u in range(len(X)):

h2 = np.dot(theta.transpose(), X[u])

c2 = (h2 - y[u]) * X[u][1]

d2 = d2 + c2

j2 = d2/m

function_result[0] = j1

function_result[1] = j2

return function_result

# Part 4: Implement the cost function calculation

Example 22

def computeCost(X, y, theta):

'''YOUR CODE HERE'''

m = float(len(X))

d = 0

for i in range(len(X)):

h = np.dot(theta.transpose(), X[i])

c = (h - y[i])

c = (c **2)

d = (d + c)

j = (1.0 / (2 * m)) * d

return j

# Part 5: Prepare the data so that the input X has two columns: first a column of ones to accomodate theta0 and then a column of city population data

Example 23

def forwardPropagate(X, W1, b1, W2, b2):

## YOUR CODE HERE ##

Z1 = 0

Z2 = 0

S1 = 0

S2 = 0

## Here we should find 2 result: first for input and hidden layer then for hidden and output layer.

## First I found the result for every node in hidden layer then put them into activation function.

S1 = np.dot(X, W1)+ b1

Z1 = calcActivation(S1)

## Second I found the result for every node in output layer then put them into activation function.

S2 = np.dot(Z1, W2) + b2

Z2 = calcActivation(S2)

####################

return Z1, Z2

# calculate the cost

Example 24

def calcGrads(X, Z1, Z2, E1, E2, Eb1):

## YOUR CODE HERE ##

d_W1 = 0

d_b1 = 0

d_W2 = 0

d_b2 = 0

## In here we should the derivatives for gradients. To find derivative, we should multiply.

# d_w2 is the derivative for weights between hidden layer and the output layer.

d_W2 = np.dot(np.transpose(E2), Z1)

# d_w1 is the derivative for weights between hidden layer and the input layer.

d_W1 = np.dot(E1, X)

# d_b2 is the derivative for weights between hidden layer bias and the output layer.

d_b2 = np.dot(np.transpose(E2), Eb1)

# d_b1 is the derivative for weights between hidden layer bias and the input layer.

d_b1 = np.dot(np.transpose(E1), 1)

####################

return d_W1, d_W2, d_b1, d_b2

# update the weights between units and the bias weights using a learning rate of alpha

Example 25

def doesnt_match(self, words):

"""

Which word from the given list doesn't go with the others?

Example::

>>> trained_model.doesnt_match("breakfast cereal dinner lunch".split())

'cereal'

"""

words = [word for word in words if word in self.vocab] # filter out OOV words

logger.debug("using words %s" % words)

if not words:

raise ValueError("cannot select a word from an empty list")

# which word vector representation is furthest away from the mean?

selection = self.syn0norm[[self.vocab[word].index for word in words]]

mean = np.mean(selection, axis=0)

sim = np.dot(selection, mean / np.linalg.norm(mean))

return words[np.argmin(sim)]

Example 26

def __mul__(self, other):

"""

Left-multiply RigidTransform with another rigid transform

Two variants:

RigidTransform: Identical to oplus operation

ndarray: transform [N x 3] point set (X_2 = p_21 * X_1)

"""

if isinstance(other, DualQuaternion):

return DualQuaternion.from_dq(other.real * self.real,

other.dual * self.real + other.real * self.dual)

elif isinstance(other, float):

return DualQuaternion.from_dq(self.real * other, self.dual * other)

# elif isinstance(other, nd.array):

# X = np.hstack([other, np.ones((len(other),1))]).T

# return (np.dot(self.matrix, X).T)[:,:3]

else:

raise TypeError('__mul__ typeerror {:}'.format(type(other)))

Example 27

def interpolate(self, other, this_weight):

q0, q1 = np.roll(self.q, shift=1), np.roll(other.q, shift=1)

u = 1 - this_weight

assert(u >= 0 and u <= 1)

cos_omega = np.dot(q0, q1)

if cos_omega < 0:

result = -q0[:]

cos_omega = -cos_omega

else:

result = q0[:]

cos_omega = min(cos_omega, 1)

omega = math.acos(cos_omega)

sin_omega = math.sin(omega)

a = math.sin((1-u) * omega)/ sin_omega

b = math.sin(u * omega) / sin_omega

if abs(sin_omega) < 1e-6:

# direct linear interpolation for numerically unstable regions

result = result * this_weight + q1 * u

result /= math.sqrt(np.dot(result, result))

else:

result = result*a + q1*b

return Quaternion(np.roll(result, shift=-1))

# To conversions

Example 28

def quaternion_matrix(quaternion):

"""Return homogeneous rotation matrix from quaternion.

>>> R = quaternion_matrix([0.06146124, 0, 0, 0.99810947])

>>> numpy.allclose(R, rotation_matrix(0.123, (1, 0, 0)))

True

"""

q = numpy.array(quaternion[:4], dtype=numpy.float64, copy=True)

nq = numpy.dot(q, q)

if nq < _EPS:

return numpy.identity(4)

q *= math.sqrt(2.0 / nq)

q = numpy.outer(q, q)

return numpy.array((

(1.0-q[1, 1]-q[2, 2], q[0, 1]-q[2, 3], q[0, 2]+q[1, 3], 0.0),

( q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2], q[1, 2]-q[0, 3], 0.0),

( q[0, 2]-q[1, 3], q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1], 0.0),

( 0.0, 0.0, 0.0, 1.0)

), dtype=numpy.float64)

Example 29

def sampson_error(F, pts1, pts2):

"""

Computes the sampson error for F, and points pts1, pts2. Sampson

error is the first order approximation to the geometric error.

Remember that this is a squared error.

(x'^{T} * F * x)^2

-----------------

(F * x)_1^2 + (F * x)_2^2 + (F^T * x')_1^2 + (F^T * x')_2^2

where (F * x)_i^2 is the square of the i-th entry of the vector Fx

"""

x1, x2 = unproject_points(pts1).T, unproject_points(pts2).T

Fx1 = np.dot(F, x1)

Fx2 = np.dot(F, x2)

# Sampson distance as error measure

denom = Fx1[0]**2 + Fx1[1]**2 + Fx2[0]**2 + Fx2[1]**2

return ( np.diag(x1.T.dot(Fx2)) )**2 / denom

Example 30

def getMedianDistanceBetweenSamples(self, sampleSet=None) :

"""

Jaakkola's heuristic method for setting the width parameter of the Gaussian

radial basis function kernel is to pick a quantile (usually the median) of

the distribution of Euclidean distances between points having different

labels.

Reference:

Jaakkola, M. Diekhaus, and D. Haussler. Using the Fisher kernel method to detect

remote protein homologies. In T. Lengauer, R. Schneider, P. Bork, D. Brutlad, J.

Glasgow, H.- W. Mewes, and R. Zimmer, editors, Proceedings of the Seventh

International Conference on Intelligent Systems for Molecular Biology.

"""

numrows = sampleSet.shape[0]

samples = sampleSet

G = sum((samples * samples), 1)

Q = numpy.tile(G[:, None], (1, numrows))

R = numpy.tile(G, (numrows, 1))

distances = Q + R - 2 * numpy.dot(samples, samples.T)

distances = distances - numpy.tril(distances)

distances = distances.reshape(numrows**2, 1, order="F").copy()

return numpy.sqrt(0.5 * numpy.median(distances[distances > 0]))

Example 31

def refit_model(self):

"""Learns a new surrogate model using the data observed so far.

"""

# only fit the model if there is data for it.

if len(self.known_models) > 0:

self._build_feature_maps(self.known_models, self.ngram_maxlen, self.thres)

X = sp.vstack([ self._compute_features(mdl)

for mdl in self.known_models], "csr")

y = np.array(self.known_scores, dtype='float64')

#A = np.dot(X.T, X) + lamb * np.eye(X.shape[1])

#b = np.dot(X.T, y)

self.surr_model = lm.Ridge(self.lamb_ridge)

self.surr_model.fit(X, y)

# NOTE: if the search space has holes, it break. needs try/except module.

Example 32

def _update_ps(self, es):

if not self.is_initialized:

self.initialize(es)

if self._ps_updated_iteration == es.countiter:

return

z = es.sm.transform_inverse((es.mean - es.mean_old) / es.sigma_vec.scaling)

# works unless a re-parametrisation has been done

# assert Mh.vequals_approximately(z, np.dot(es.B, (1. / es.D) *

# np.dot(es.B.T, (es.mean - es.mean_old) / es.sigma_vec)))

z *= es.sp.weights.mueff**0.5 / es.sigma / es.sp.cmean

# zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz

if es.opts['CSA_clip_length_value'] is not None:

vals = es.opts['CSA_clip_length_value']

min_len = es.N**0.5 + vals[0] * es.N / (es.N + 2)

max_len = es.N**0.5 + vals[1] * es.N / (es.N + 2)

act_len = sum(z**2)**0.5

new_len = Mh.minmax(act_len, min_len, max_len)

if new_len != act_len:

z *= new_len / act_len

# z *= (es.N / sum(z**2))**0.5 # ==> sum(z**2) == es.N

# z *= es.const.chiN / sum(z**2)**0.5

self.ps = (1 - self.cs) * self.ps + np.sqrt(self.cs * (2 - self.cs)) * z

self._ps_updated_iteration = es.countiter

Example 33

def isotropic_mean_shift(self):

"""normalized last mean shift, under random selection N(0,I)

distributed.

Caveat: while it is finite and close to sqrt(n) under random

selection, the length of the normalized mean shift under

*systematic* selection (e.g. on a linear function) tends to

infinity for mueff -> infty. Hence it must be used with great

care for large mueff.

"""

z = self.sm.transform_inverse((self.mean - self.mean_old) /

self.sigma_vec.scaling)

# works unless a re-parametrisation has been done

# assert Mh.vequals_approximately(z, np.dot(es.B, (1. / es.D) *

# np.dot(es.B.T, (es.mean - es.mean_old) / es.sigma_vec)))

z /= self.sigma * self.sp.cmean

z *= self.sp.weights.mueff**0.5

return z

Example 34

def norm(self, x):

"""compute the Mahalanobis norm that is induced by the

statistical model / sample distribution, specifically by

covariance matrix ``C``. The expected Mahalanobis norm is

about ``sqrt(dimension)``.

Example

-------

>>> import cma, numpy as np

>>> sm = cma.sampler.GaussFullSampler(np.ones(10))

>>> x = np.random.randn(10)

>>> d = sm.norm(x)

`d` is the norm "in" the true sample distribution,

sampled points have a typical distance of ``sqrt(2*sm.dim)``,

where ``sm.dim`` is the dimension, and an expected distance of

close to ``dim**0.5`` to the sample mean zero. In the example,

`d` is the Euclidean distance, because C = I.

"""

return sum((np.dot(self.B.T, x) / self.D)**2)**0.5

Example 35

def ask(self):

"""sample lambda candidate solutions

distributed according to::

m + sigma * Normal(0,C) = m + sigma * B * D * Normal(0,I)

= m + B * D * sigma * Normal(0,I)

and return a `list` of the sampled "vectors".

"""

self.C.update_eigensystem(self.counteval,

self.params.lazy_gap_evals)

candidate_solutions = []

for k in range(self.params.lam): # repeat lam times

z = [self.sigma * eigenval**0.5 * self.randn(0, 1)

for eigenval in self.C.eigenvalues]

y = dot(self.C.eigenbasis, z)

candidate_solutions.append(plus(self.xmean, y))

return candidate_solutions

Example 36

def __init__(self, dimension, randn=np.random.randn, debug=False):

"""pass dimension of the underlying sample space

"""

try:

self.N = len(dimension)

std_vec = np.array(dimension, copy=True)

except TypeError:

self.N = dimension

std_vec = np.ones(self.N)

if self.N < 10:

print('Warning: Not advised to use VD-CMA for dimension < 10.')

self.randn = randn

self.dvec = std_vec

self.vvec = self.randn(self.N) / math.sqrt(self.N)

self.norm_v2 = np.dot(self.vvec, self.vvec)

self.norm_v = np.sqrt(self.norm_v2)

self.vn = self.vvec / self.norm_v

self.vnn = self.vn**2

self.pc = np.zeros(self.N)

self._debug = debug # plot covariance matrix

Example 37

def _get_params(self, weights, k):

"""Return the learning rate cone, cmu, cc depending on k

Parameters

----------

weights : list of float

the weight values for vectors used to update the distribution

k : int

the number of vectors for covariance matrix

Returns

-------

cone, cmu, cc : float in [0, 1]. Learning rates for rank-one, rank-mu,

and the cumulation factor for rank-one.

"""

w = np.array(weights)

mueff = np.sum(w[w > 0.])**2 / np.dot(w[w > 0.], w[w > 0.])

return self._get_params2(mueff, k)

Example 38

def covariance_matrix(self):

if self._debug:

# return None

ka = self.k_active

if ka > 0:

C = np.eye(self.N) + np.dot(self.V[:ka].T * self.S[:ka],

self.V[:ka])

C = (C * self.D).T * self.D

else:

C = np.diag(self.D**2)

C *= self.sigma**2

else:

# Fake Covariance Matrix for Speed

C = np.ones(1)

self.B = np.ones(1)

return C

Example 39

def _evalfull(self, x):

fadd = self.fopt

curshape, dim = self.shape_(x)

# it is assumed x are row vectors

if self.lastshape != curshape:

self.initwithsize(curshape, dim)

# TRANSFORMATION IN SEARCH SPACE

x = x - self.arrxopt # cannot be replaced with x -= arrxopt!

# COMPUTATION core

ftrue = dot(monotoneTFosc(x)**2, self.scales)

fval = self.noise(ftrue) # without noise

# FINALIZE

ftrue += fadd

fval += fadd

return fval, ftrue

Example 40

def _evalfull(self, x):

fadd = self.fopt

curshape, dim = self.shape_(x)

# it is assumed x are row vectors

if self.lastshape != curshape:

self.initwithsize(curshape, dim)

# TRANSFORMATION IN SEARCH SPACE

x = x - self.arrxopt # cannot be replaced with x -= arrxopt!

x = dot(x, self.linearTF) # TODO: check

# COMPUTATION core

idx = (x * self.arrxopt) > 0

x[idx] = self.alpha * x[idx]

ftrue = monotoneTFosc(np.sum(x**2, -1)) ** .9

fval = self.noise(ftrue)

# FINALIZE

ftrue += fadd

fval += fadd

return fval, ftrue

Example 41

def initwithsize(self, curshape, dim):

# DIM-dependent initialization

if self.dim != dim:

if self.zerox:

self.xopt = zeros(dim)

else:

self.xopt = compute_xopt(self.rseed, dim)

self.rotation = compute_rotation(self.rseed + 1e6, dim)

self.scales = self.condition ** linspace(0, 1, dim)

self.linearTF = dot(compute_rotation(self.rseed, dim),

diag(((self.condition / 10.)**.5) ** linspace(0, 1, dim)))

# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices

if self.lastshape != curshape:

self.dim = dim

self.lastshape = curshape

self.arrxopt = resize(self.xopt, curshape)

Example 42

def initwithsize(self, curshape, dim):

# DIM-dependent initialization

if self.dim != dim:

if self.zerox:

self.xopt = zeros(dim)

else:

self.xopt = compute_xopt(self.rseed, dim)

scale = max(1, dim ** .5 / 8.) # nota: different from scales in F8

self.linearTF = scale * compute_rotation(self.rseed, dim)

self.xopt = np.hstack(dot(.5 * np.ones((1, dim)), self.linearTF.T)) / scale ** 2

# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices

if self.lastshape != curshape:

self.dim = dim

self.lastshape = curshape

self.arrxopt = resize(self.xopt, curshape)

Example 43

def initwithsize(self, curshape, dim):

# DIM-dependent initialization

if self.dim != dim:

if self.zerox:

self.xopt = zeros(dim)

else:

self.xopt = compute_xopt(self.rseed, dim)

self.rotation = compute_rotation(self.rseed + 1e6, dim)

self.scales = (self.condition ** .5) ** linspace(0, 1, dim)

self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))

self.linearTF = dot(self.linearTF, self.rotation)

# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices

if self.lastshape != curshape:

self.dim = dim

self.lastshape = curshape

self.arrxopt = resize(self.xopt, curshape)

Example 44

def _evalfull(self, x):

fadd = self.fopt

curshape, dim = self.shape_(x)

# it is assumed x are row vectors

if self.lastshape != curshape:

self.initwithsize(curshape, dim)

# BOUNDARY HANDLING

# TRANSFORMATION IN SEARCH SPACE

x = x - self.arrxopt # cannot be replaced with x -= arrxopt!

x = dot(x, self.linearTF)

# COMPUTATION core

try:

ftrue = x[:, 0] ** 2 + self.alpha * np.sqrt(np.sum(x[:, 1:] ** 2, -1))

except IndexError:

ftrue = x[0] ** 2 + self.alpha * np.sqrt(np.sum(x[1:] ** 2, -1))

fval = self.noise(ftrue)

# FINALIZE

ftrue += fadd

fval += fadd

return fval, ftrue

Example 45

def distance(v1, v2, normalised_vectors=False):

"""

Returns the cosine distance between two vectors.

If the vectors are normalised, there is no need for the denominator, which is always one.

"""

if normalised_vectors:

return 1 - dot(v1, v2)

else:

return 1 - dot(v1, v2) / ( norm(v1) * norm(v2) )

Example 46

def convert_to_num(Ybin, verbose=True):

''' Convert binary targets to numeric vector (typically classification target values)'''

if verbose: print("\tConverting to numeric vector")

Ybin = np.array(Ybin)

if len(Ybin.shape) ==1:

return Ybin

classid=range(Ybin.shape[1])

Ycont = np.dot(Ybin, classid)

if verbose: print Ycont

return Ycont

Example 47

def build_2D_cov_matrix(sigmax,sigmay,angle,verbose=True):

"""

Build a covariance matrix for a 2D multivariate Gaussian

--- INPUT ---

sigmax Standard deviation of the x-compoent of the multivariate Gaussian

sigmay Standard deviation of the y-compoent of the multivariate Gaussian

angle Angle to rotate matrix by in degrees (clockwise) to populate covariance cross terms

verbose Toggle verbosity

--- EXAMPLE OF USE ---

import tdose_utilities as tu

covmatrix = tu.build_2D_cov_matrix(3,1,35)

"""

if verbose: print ' - Build 2D covariance matrix with varinaces (x,y)=('+str(sigmax)+','+str(sigmay)+\

') and then rotated '+str(angle)+' degrees'

cov_orig = np.zeros([2,2])

cov_orig[0,0] = sigmay**2.0

cov_orig[1,1] = sigmax**2.0

angle_rad = (180.0-angle) * np.pi/180.0 # The (90-angle) makes sure the same convention as DS9 is used

c, s = np.cos(angle_rad), np.sin(angle_rad)

rotmatrix = np.matrix([[c, -s], [s, c]])

cov_rot = np.dot(np.dot(rotmatrix,cov_orig),np.transpose(rotmatrix)) # performing rot * cov * rot^T

return cov_rot

# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =

Example 48

def get_continuous_object(grid_func,

xmin=LOCAL_XMIN,xmax=LOCAL_XMAX,

c2s=None):

"""

Maps the grid function grid_func, which is any field defined

on the colocation points to a continuous function that can

be evaluated.

Parameters

----------

xmin -- the minimum value of the domain

xmax -- the maximum value of the domain

c2s -- The Vandermonde matrix that maps the colocation representation

to the spectral representation

Returns

-------

An numpy polynomial object which can be called to be evaluated

"""

order = len(grid_func)-1

if c2s == None:

s2c,c2s = get_vandermonde_matrices(order)

spec_func = np.dot(c2s,grid_func)

my_interp = poly(spec_func,domain=[xmin,xmax])

return my_interp

# ======================================================================

# ======================================================================

# A convenience class that generates everything and can be called

# ======================================================================

Example 49

def differentiate(self,grid_func,order=1):

"""

Given a grid function defined on the colocation points,

returns its derivative of the appropriate order

"""

assert type(order) == int

assert order >= 0

if order == 0:

return grid_func

else:

return self.differentiate(np.dot(self.PD,grid_func),order-1)

Example 50

def to_continuum(self,grid_func):

coeffs_x = np.dot(self.stencil_x.c2s,grid_func)

coeffs_xy = np.dot(coeffs_x,self.stencil_y.c2s.transpose())

def f(x,y):

mx,my = [s._coord_global_to_ref(c) \

for c,s in zip([x,y],self.stencils)]

return pval2d(mx,my,coeffs_xy)

return f

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值