Example 1
def rhoA(self):
# rhoA
rhoA = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
weights = pd.DataFrame(self.outer_weights[self.latent[i]])
weights = weights[(weights.T != 0).any()]
result = pd.DataFrame.dot(weights.T, weights)
result_ = pd.DataFrame.dot(weights, weights.T)
S = self.data_[self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]]
S = pd.DataFrame.dot(S.T, S) / S.shape[0]
numerador = (
np.dot(np.dot(weights.T, (S - np.diag(np.diag(S)))), weights))
denominador = (
(np.dot(np.dot(weights.T, (result_ - np.diag(np.diag(result_)))), weights)))
rhoA_ = ((result)**2) * (numerador / denominador)
if(np.isnan(rhoA_.values)):
rhoA[self.latent[i]] = 1
else:
rhoA[self.latent[i]] = rhoA_.values
return rhoA.T
Example 2
def PCA(data, num_components=None):
# mean center the data
data -= data.mean(axis=0)
# calculate the covariance matrix
R = np.cov(data, rowvar=False)
# calculate eigenvectors & eigenvalues of the covariance matrix
# use 'eigh' rather than 'eig' since R is symmetric,
# the performance gain is substantial
V, E = np.linalg.eigh(R)
# sort eigenvalue in decreasing order
idx = np.argsort(V)[::-1]
E = E[:,idx]
# sort eigenvectors according to same index
V = V[idx]
# select the first n eigenvectors (n is desired dimension
# of rescaled data array, or dims_rescaled_data)
E = E[:, :num_components]
# carry out the transformation on the data using eigenvectors
# and return the re-scaled data, eigenvalues, and eigenvectors
return np.dot(E.T, data.T).T, V, E
Example 3
def backPropagate(Z1, Z2, y, W2, b2):
## YOUR CODE HERE ##
E2 = 0
E1 = 0
Eb1 = 0
# E2 is the error in output layer. To find it we should exract estimated value from actual output.
# We should find 5 error because there are 5 node in output layer.
E2 = Z2 - y
## E1 is the error in the hidden layer. To find it we should use the error that we found in output layer and the weights between
## output and hidden layer
## We should find 30 error because there are 30 node in hidden layer.
E1 = np.dot(W2, np.transpose(E2))
## Eb1 is the error bias for hidden layer. To find it we should use the error that we found in output layer and the weights between
## output and bias layer
## We should find 1 error because there are 1 bias node in hidden layer.
Eb1 = np.dot(b2, np.transpose(E2))
####################
return E2, E1, Eb1
# calculate the gradients for weights between units and the bias weights
Example 4
def get_nodal_differentiation_matrix(order,
s2c=None,c2s=None,
Dmodal=None):
"""
Returns the differentiation matrix for the first derivative
in the nodal basis
It goes without saying that this differentiation matrix is for the
reference cell.
"""
if Dmodal is None:
Dmodal = get_modal_differentiation_matrix(order)
if s2c is None or c2s is None:
s2c,c2s = get_vandermonde_matrices(order)
return np.dot(s2c,np.dot(Dmodal,c2s))
# ======================================================================
# Operators Outside Reference Cell
# ======================================================================
Example 5
def differentiate(self,grid_func,orderx,ordery):
"""Given a grid function defined on the colocation points,
differentiate it up to the appropriate order in each direction.
"""
assert type(orderx) is int
assert type(ordery) is int
assert orderx >= 0
assert ordery >= 0
if orderx > 0:
df = np.dot(self.stencil_x.PD,grid_func)
return self.differentiate(df,orderx-1,ordery)
if ordery > 0:
df = np.dot(grid_func,self.stencil_y.PD.transpose())
return self.differentiate(df,orderx,ordery-1)
#if orderx == 0 and ordery == 0:
return grid_func
Example 6
def fit(self, graphs, y=None):
rnd = check_random_state(self.random_state)
n_samples = len(graphs)
# get basis vectors
if self.n_components > n_samples:
n_components = n_samples
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = []
for ind in basis_inds:
basis.append(graphs[ind])
basis_kernel = self.kernel(basis, basis, **self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
Example 7
def _ikf_iteration(self, x, n, ranges, h, H, z, estimate, R):
"""Update tracker based on a multi-range message.
Args:
multi_range_msg (uwb.msg.UWBMultiRangeWithOffsets): ROS multi-range message.
Returns:
new_estimate (StateEstimate): Updated position estimate.
"""
new_position = n[0:3]
self._compute_measurements_and_jacobians(ranges, new_position, h, H, z)
res = z - h
S = np.dot(np.dot(H, estimate.covariance), H.T) + R
K = np.dot(estimate.covariance, self._solve_equation_least_squares(S.T, H).T)
mahalanobis = np.sqrt(np.dot(self._solve_equation_least_squares(S.T, res).T, res))
if res.size not in self.outlier_thresholds:
self.outlier_thresholds[res.size] = scipy.stats.chi2.isf(self.outlier_threshold_quantile, res.size)
outlier_threshold = self.outlier_thresholds[res.size]
if mahalanobis < outlier_threshold:
n = x + np.dot(K, (res - np.dot(H, x - n)))
outlier_flag = False
else:
outlier_flag = True
return n, K, outlier_flag
Example 8
def normalized_distance(_a, _b):
"""Compute normalized distance between two points.
Computes 1 - a * b / ( ||a|| * ||b||)
Args:
_a (numpy.ndarray): array of size m
_b (numpy.ndarray): array of size m
Returns:
normalized distance between signatures (float)
Examples:
>>> a = gis.generate_signature('https://upload.wikimedia.org/wikipedia/commons/thumb/e/ec/Mona_Lisa,_by_Leonardo_da_Vinci,_from_C2RMF_retouched.jpg/687px-Mona_Lisa,_by_Leonardo_da_Vinci,_from_C2RMF_retouched.jpg')
>>> b = gis.generate_signature('https://pixabay.com/static/uploads/photo/2012/11/28/08/56/mona-lisa-67506_960_720.jpg')
>>> gis.normalized_distance(a, b)
0.0332806110382
"""
# return (1.0 - np.dot(_a, _b) / (np.linalg.norm(_a) * np.linalg.norm(_b)))
return np.dot(_a, _b) / (np.linalg.norm(_a) * np.linalg.norm(_b))
Example 9
def observed_perplexity(self, counts):
"""Compute perplexity = exp(entropy) of observed variables.
Perplexity is an information theoretic measure of the number of
clusters or latent classes. Perplexity is a real number in the range
[1, M], where M is model_num_clusters.
Args:
counts: A [V]-shap