python中ix用法_Python numpy.ix_() 使用实例

The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.

Example 1

def test_large_fancy_indexing(self, level=rlevel):

# Large enough to fail on 64-bit.

nbits = np.dtype(np.intp).itemsize * 8

thesize = int((2**nbits)**(1.0/5.0)+1)

def dp():

n = 3

a = np.ones((n,)*5)

i = np.random.randint(0, n, size=thesize)

a[np.ix_(i, i, i, i, i)] = 0

def dp2():

n = 3

a = np.ones((n,)*5)

i = np.random.randint(0, n, size=thesize)

a[np.ix_(i, i, i, i, i)]

self.assertRaises(ValueError, dp)

self.assertRaises(ValueError, dp2)

Example 2

def graphlet_kernel(graphs, num_samples):

N = len(graphs)

Phi = np.zeros((N,2**15))

P = generate_permutation_matrix()

for i in range(len(graphs)):

n = graphs[i].number_of_nodes()

if n >= 6:

A = nx.to_numpy_matrix(graphs[i])

A = np.asarray(A, dtype=np.uint8)

for j in range(num_samples):

r = np.random.permutation(n)

window = A[np.ix_(r[:6],r[:6])]

Phi[i, graphlet_type(window)] += 1

Phi[i,:] /= num_samples

K = np.dot(Phi,np.dot(P,np.transpose(Phi)))

return K

Example 3

def MakeEquationSystem_volumeControl_extendedFP(w_lst_tmstp, wTip, EltChannel, EltTip, C, dt, Q, ElemArea):

Ccc = C[np.ix_(EltChannel, EltChannel)]

Cct = C[np.ix_(EltChannel, EltTip)]

A = np.hstack((Ccc,-np.ones((EltChannel.size,1),dtype=np.float64)))

A = np.vstack((A, np.ones((1, EltChannel.size + 1), dtype=np.float64)))

A[-1,-1] = 0

S = -np.dot(Ccc,w_lst_tmstp[EltChannel]) - np.dot(Cct,wTip)

S = np.append(S,Q * dt / ElemArea - (sum(wTip)-sum(w_lst_tmstp[EltTip])))

return A, S

#-----------------------------------------------------------------------------------------------------------------------

Example 4

def jw_number_restrict_operator(operator, n_electrons, n_qubits=None):

"""Restrict a Jordan-Wigner encoded operator to a given particle number

Args:

sparse_operator(ndarray or sparse): Numpy operator acting on

the space of n_qubits.

n_electrons(int): Number of particles to restrict the operator to

n_qubits(int): Number of qubits defining the total state

Returns:

new_operator(ndarray or sparse): Numpy operator restricted to

acting on states with the same particle number.

"""

if n_qubits is None:

n_qubits = int(numpy.log2(operator.shape[0]))

select_indices = jw_number_indices(n_electrons, n_qubits)

return operator[numpy.ix_(select_indices, select_indices)]

Example 5

def _M2_sparse_sym(Xvar, mask_X, Yvar, mask_Y, weights=None):

""" 2nd self-symmetric moment matrix exploiting zero input columns

Computes X'X + Y'Y and X'Y + Y'X

"""

assert len(mask_X) == len(mask_Y), 'X and Y need to have equal sizes for symmetrization'

Cxxyy = np.zeros((len(mask_X), len(mask_Y)))

Cxxyy[np.ix_(mask_X, mask_X)] = _M2_dense(Xvar, Xvar, weights=weights)

Cxxyy[np.ix_(mask_Y, mask_Y)] += _M2_dense(Yvar, Yvar, weights=weights)

Cxyyx = np.zeros((len(mask_X), len(mask_Y)))

Cxy = _M2_dense(Xvar, Yvar, weights=weights)

Cyx = _M2_dense(Yvar, Xvar, weights=weights)

Cxyyx[np.ix_(mask_X, mask_Y)] = Cxy

Cxyyx[np.ix_(mask_Y, mask_X)] += Cyx

return Cxxyy, Cxyyx

Example 6

def test_large_fancy_indexing(self, level=rlevel):

# Large enough to fail on 64-bit.

nbits = np.dtype(np.intp).itemsize * 8

thesize = int((2**nbits)**(1.0/5.0)+1)

def dp():

n = 3

a = np.ones((n,)*5)

i = np.random.randint(0, n, size=thesize)

a[np.ix_(i, i, i, i, i)] = 0

def dp2():

n = 3

a = np.ones((n,)*5)

i = np.random.randint(0, n, size=thesize)

a[np.ix_(i, i, i, i, i)]

self.assertRaises(ValueError, dp)

self.assertRaises(ValueError, dp2)

Example 7

def test_large_fancy_indexing(self, level=rlevel):

# Large enough to fail on 64-bit.

nbits = np.dtype(np.intp).itemsize * 8

thesize = int((2**nbits)**(1.0/5.0)+1)

def dp():

n = 3

a = np.ones((n,)*5)

i = np.random.randint(0, n, size=thesize)

a[np.ix_(i, i, i, i, i)] = 0

def dp2():

n = 3

a = np.ones((n,)*5)

i = np.random.randint(0, n, size=thesize)

a[np.ix_(i, i, i, i, i)]

self.assertRaises(ValueError, dp)

self.assertRaises(ValueError, dp2)

Example 8

def _cartesian_product(*arrays):

"""

Get the cartesian product of a number of arrays.

Parameters

----------

arrays : Iterable[np.ndarray]

The arrays to get a cartesian product of. Always sorted with respect

to the original array.

Returns

-------

out : np.ndarray

The overall cartesian product of all the input arrays.

"""

broadcastable = np.ix_(*arrays)

broadcasted = np.broadcast_arrays(*broadcastable)

rows, cols = np.prod(broadcasted[0].shape), len(broadcasted)

dtype = np.result_type(*arrays)

out = np.empty(rows * cols, dtype=dtype)

start, end = 0, rows

for a in broadcasted:

out[start:end] = a.reshape(-1)

start, end = end, end + rows

return out.reshape(cols, rows)

Example 9

def test_large_fancy_indexing(self, level=rlevel):

# Large enough to fail on 64-bit.

nbits = np.dtype(np.intp).itemsize * 8

thesize = int((2**nbits)**(1.0/5.0)+1)

def dp():

n = 3

a = np.ones((n,)*5)

i = np.random.randint(0, n, size=thesize)

a[np.ix_(i, i, i, i, i)] = 0

def dp2():

n = 3

a = np.ones((n,)*5)

i = np.random.randint(0, n, size=thesize)

a[np.ix_(i, i, i, i, i)]

self.assertRaises(ValueError, dp)

self.assertRaises(ValueError, dp2)

Example 10

def _find_motif(self, data, row_indices):

"""Finds the largest xMOTIF (this is the direct implementation of the

pseucode of the FindMotif() procedure described in the original paper).

"""

num_rows, num_cols = data.shape

best_motif = Bicluster([], [])

seeds = np.random.choice(num_cols, self.num_seeds, replace=False)

for s in seeds:

seed_col = data[row_indices, s][:, np.newaxis]

for i in range(self.num_sets):

cols_set = np.random.choice(num_cols, self.set_size, replace=False)

rows_comp_data = seed_col == data[np.ix_(row_indices, cols_set)]

selected_rows = np.array([y for x, y in enumerate(row_indices) if np.all(rows_comp_data[x])], np.int)

seed_values = data[selected_rows, s][:, np.newaxis]

cols_comp_data = seed_values == data[selected_rows]

selected_cols = np.array([k for k in range(num_cols) if np.all(cols_comp_data[:, k])])

if len(selected_cols) >= self.alpha * num_cols and len(selected_rows) > len(best_motif.rows):

best_motif = Bicluster(selected_rows, selected_cols)

return best_motif

Example 11

def _find_constrained_bicluster(self, data):

"""Find a k x l bicluster."""

num_rows, num_cols = data.shape

k = random.randint(1, math.ceil(num_rows / 2))

l = random.randint(1, math.ceil(num_cols / 2))

cols = np.random.choice(num_cols, size=l, replace=False)

old_avg, avg = float('-inf'), 0.0

while abs(avg - old_avg) > self.tol:

old_avg = avg

row_sums = np.sum(data[:, cols], axis=1)

rows = bn.argpartition(row_sums, num_rows - k)[-k:] # this is usually faster than rows = np.argsort(row_sums)[-k:]

col_sums = np.sum(data[rows, :], axis=0)

cols = bn.argpartition(col_sums, num_cols - l)[-l:] # this is usually faster than cols = np.argsort(col_sums)[-l:]

avg = np.mean(data[np.ix_(rows, cols)])

return Bicluster(rows, cols)

Example 12

def compute_activity_matrix(self, xywrap, thwrap, wdim, pcw):

"""Compute the activation of pose cells. Taken from Renato de Pontes Pereira"""

# The goal is to return an update matrix that can be added/subtracted

# from the posecell matrix

pca_new = np.zeros([PC_DIM_XY, PC_DIM_XY, PC_DIM_TH])

# for nonzero posecell values

indices = np.nonzero(self.posecells)

for i,j,k in itertools.izip(*indices):

pca_new[np.ix_(xywrap[i:i+wdim],

xywrap[j:j+wdim],

thwrap[k:k+wdim])] += self.posecells[i,j,k]*pcw

return pca_new

Example 13

def test_large_fancy_indexing(self, level=rlevel):

# Large enough to fail on 64-bit.

nbits = np.dtype(np.intp).itemsize * 8

thesize = int((2**nbits)**(1.0/5.0)+1)

def dp():

n = 3

a = np.ones((n,)*5)

i = np.random.randint(0, n, size=thesize)

a[np.ix_(i, i, i, i, i)] = 0

def dp2():

n = 3

a = np.ones((n,)*5)

i = np.random.randint(0, n, size=thesize)

a[np.ix_(i, i, i, i, i)]

self.assertRaises(ValueError, dp)

self.assertRaises(ValueError, dp2)

Example 14

def section_by_index(array, index, axis=0):

"""

Take the slice of `array` indexed by entries of `index`

along the specified `axis`.

"""

# alternative `axisindex` implementation

# that avoids the index arithmetic

# uses `numpy` fancy indexing instead

# possible index values for each dimension represented

# as `numpy` arrays all having the shape of `index`

indices = np.ix_(*[np.arange(dim) for dim in index.shape])

# the slice is taken along `axis`

# except for the array `index` itself, the other indices

# do nothing except trigger `numpy` fancy indexing

fancy_index = indices[:axis] + (index,) + indices[axis:]

# result has the same shape as `index`

return array[fancy_index]

Example 15

def get_element_type_subset_indices(self):

"""

It is currently required that the element of two matching atoms is the same.

This constructs indices to e.g. the carbon-carbon submatrix.

"""

# TODO: this is redundant if the elements does not have to match

unique_elements = np.unique(self.reactants_elements)

subset_indices = np.empty(unique_elements.size, dtype=object)

for i, element in enumerate(unique_elements):

rows = np.where(self.reactants_elements == element)[0]

cols = np.where(self.products_elements == element)[0]

subset_indices[i] = np.ix_(rows,cols)

return subset_indices

Example 16

def test_large_fancy_indexing(self, level=rlevel):

# Large enough to fail on 64-bit.

nbits = np.dtype(np.intp).itemsize * 8

thesize = int((2**nbits)**(1.0/5.0)+1)

def dp():

n = 3

a = np.ones((n,)*5)

i = np.random.randint(0, n, size=thesize)

a[np.ix_(i, i, i, i, i)] = 0

def dp2():

n = 3

a = np.ones((n,)*5)

i = np.random.randint(0, n, size=thesize)

a[np.ix_(i, i, i, i, i)]

self.assertRaises(ValueError, dp)

self.assertRaises(ValueError, dp2)

Example 17

def test_regression_1(self):

# Test empty inputs create ouputs of indexing type, gh-5804

# Test both lists and arrays

for func in (range, np.arange):

a, = np.ix_(func(0))

assert_equal(a.dtype, np.intp)

Example 18

def test_shape_and_dtype(self):

sizes = (4, 5, 3, 2)

# Test both lists and arrays

for func in (range, np.arange):

arrays = np.ix_(*[func(sz) for sz in sizes])

for k, (a, sz) in enumerate(zip(arrays, sizes)):

assert_equal(a.shape[k], sz)

assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k))

assert_(np.issubdtype(a.dtype, int))

Example 19

def test_bool(self):

bool_a = [True, False, True, True]

int_a, = np.nonzero(bool_a)

assert_equal(np.ix_(bool_a)[0], int_a)

Example 20

def test_1d_only(self):

idx2d = [[1, 2, 3], [4, 5, 6]]

assert_raises(ValueError, np.ix_, idx2d)

Example 21

def _gaus_condition(self, xi):

if np.ma.count_masked(xi) == 0:

return xi

a = xi.mask

b = ~xi.mask

xb = xi[b].data

Laa = self.prec[np.ix_(a, a)]

Lab = self.prec[np.ix_(a, b)]

xfill = np.empty_like(xi)

xfill[b] = xb

xfill[a] = self.mean[a] - solve(Laa, Lab.dot(xb - self.mean[b]))

return xfill

Example 22

def gacPathCondEntropy(IminuszW, cluster_i, cluster_j):

# Compute conditional complexity from the subpart of the weighted adjacency matrix

# Inputs:

# - IminuszW: the matrix (I - z*P)

#- cluster_i: index vector of cluster i

#- cluster_j: index vector of cluster j

# Output:

#- L_ij - the sum of conditional complexities of cluster i and j after merging.

# by Wei Zhang (wzhang009 at gmail.com), June, 8, 2011

num_i = np.size(cluster_i)

num_j = np.size(cluster_j)

# detecting cross elements (this check costs much and is unnecessary)

ijGroupIndex = np.append(cluster_i, cluster_j)

y_ij = np.zeros((num_i + num_j, 2)) # [y_i, y_j]

y_ij[:num_i, 0] = 1

y_ij[num_i:, 1] = 1

idx = np.ix_(ijGroupIndex, ijGroupIndex)

L_ij = scipy.linalg.inv(IminuszW[idx]).dot(y_ij)

L_ij = sum(L_ij[:num_i, 0]) / (num_i * num_i) + sum(L_ij[num_i:, 1]) / (num_j * num_j)

return L_ij

Example 23

def reconstruct_original_mat(self, thresh, intracluster_weight=0):

"""

reconstruct a similarity matrix with size equals to the original one, from the reduced similarity matrix

:param thresh: a threshold parameter to prune the edges of the graph

:param intracluster_weight: the weight to assign at each connection generated by the expansion of a cluster

:return: the reconstructed graph

"""

reconstructed_mat = np.zeros((self.N, self.N))

r_nodes = self.classes > 0

reconstructed_mat[np.ix_(r_nodes, r_nodes)] = intracluster_weight

for r in range(2, self.k + 1):

r_nodes = self.classes == r

reconstructed_mat[np.ix_(r_nodes, r_nodes)] = intracluster_weight

for s in range(1, r):

if self.is_weighted:

cl_pair = WeightedClassesPair(self.sim_mat, self.adj_mat, self.classes, r, s, self.epsilon)

else:

cl_pair = ClassesPair(self.adj_mat, self.classes, r, s, self.epsilon)

s_nodes = self.classes == s

if cl_pair.bip_density > thresh:

reconstructed_mat[np.ix_(r_nodes, s_nodes)] = reconstructed_mat[np.ix_(s_nodes, r_nodes)] = cl_pair.bip_density

np.fill_diagonal(reconstructed_mat, 0.0)

return reconstructed_mat

Example 24

def __init__(self, adj_mat, classes, r, s, epsilon):

self.r = r

self.s = s

self.index_map = np.where(classes == r)[0]

self.index_map = np.vstack((self.index_map, np.where(classes == s)[0]))

self.bip_adj_mat = adj_mat[np.ix_(self.index_map[0], self.index_map[1])]

self.n = self.bip_adj_mat.shape[0]

self.bip_avg_deg = self.bip_avg_degree()

self.bip_density = self.compute_bip_density()

self.epsilon = epsilon

Example 25

def __init__(self, sim_mat, adj_mat, classes, r, s, epsilon):

self.r = r

self.s = s

self.index_map = np.where(classes == r)[0]

self.index_map = np.vstack((self.index_map, np.where(classes == s)[0]))

self.bip_sim_mat = sim_mat[np.ix_(self.index_map[0], self.index_map[1])]

self.bip_adj_mat = adj_mat[np.ix_(self.index_map[0], self.index_map[1])]

self.n = self.bip_sim_mat.shape[0]

self.bip_avg_deg = self.bip_avg_degree()

self.bip_density = self.compute_bip_density()

self.epsilon = epsilon

Example 26

def bin_sizes(self):

sizes1 = np.cos(self.get_bin_left_edges(0)) - np.cos(self.get_bin_right_edges(0))

sizes2 = self.get_bin_widths(1)

return reduce(np.multiply, np.ix_(sizes1, sizes2))

Example 27

def bin_sizes(self):

sizes1 = (self.get_bin_right_edges(0) ** 3 - self.get_bin_left_edges(0) ** 3) / 3

sizes2 = np.cos(self.get_bin_left_edges(1)) - np.cos(self.get_bin_right_edges(1))

sizes3 = self.get_bin_widths(2)

# Hopefully correct

return reduce(np.multiply, np.ix_(sizes1, sizes2,sizes3))

#return np.outer(sizes, sizes2, self.get_bin_widths(2)) # Correct

Example 28

def bin_sizes(self):

sizes1 = 0.5 * (self.get_bin_right_edges(0) ** 2 - self.get_bin_left_edges(0) ** 2)

sizes2 = self.get_bin_widths(1)

sizes3 = self.get_bin_widths(2)

return reduce(np.multiply, np.ix_(sizes1, sizes2, sizes3))

Example 29

def reduce_distmat(full_dist_mat,

gal_templateids,

probe_templateids,

reduce_type=ReduceType.MeanMin):

# Get unique template indices and there positions for keeping initial order

#gal_tuids,gal_tuind=np.unique(gal_templateids,return_index=True)

#probe_tuids,probe_tuind=np.unique(probe_templateids,return_index=True)

gal_tuids, gal_tuind = np.unique(

[str(x) for x in gal_templateids], return_index=True)

probe_tuids, probe_tuind = np.unique(

[str(x) for x in probe_templateids], return_index=True)

red_dist_mat = np.zeros((len(gal_tuids), len(probe_tuids)))

# Loop on gallery

for g, gtupos in enumerate(gal_tuind):

gutid = gal_templateids[gtupos]

gt_pos = np.where(gal_templateids == gutid)[0]

# Loop on probe

for p, ptupos in enumerate(probe_tuind):

putid = probe_templateids[ptupos]

pt_pos = np.where(probe_templateids == putid)[0]

# Get appropriate distance

#print g,p

dist_val = 0.0

# TO BE FIXED

if reduce_type == ReduceType.MeanMin:

dist_val = np.mean(np.min(full_dist_mat[np.ix_(gt_pos, pt_pos)]))

else:

dist_val = np.amin(full_dist_mat[np.ix_(gt_pos, pt_pos)])

red_dist_mat[g, p] = dist_val

return red_dist_mat, gal_tuind, probe_tuind

Example 30

def test_regression_1(self):

# Test empty inputs create ouputs of indexing type, gh-5804

# Test both lists and arrays

for func in (range, np.arange):

a, = np.ix_(func(0))

assert_equal(a.dtype, np.intp)

Example 31

def test_shape_and_dtype(self):

sizes = (4, 5, 3, 2)

# Test both lists and arrays

for func in (range, np.arange):

arrays = np.ix_(*[func(sz) for sz in sizes])

for k, (a, sz) in enumerate(zip(arrays, sizes)):

assert_equal(a.shape[k], sz)

assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k))

assert_(np.issubdtype(a.dtype, int))

Example 32

def test_bool(self):

bool_a = [True, False, True, True]

int_a, = np.nonzero(bool_a)

assert_equal(np.ix_(bool_a)[0], int_a)

Example 33

def test_1d_only(self):

idx2d = [[1, 2, 3], [4, 5, 6]]

assert_raises(ValueError, np.ix_, idx2d)

Example 34

def _compute_log_likelihood(self, X):

seq_len = X.shape[0]

n_states = self.n_components

n_dim = X.shape[1]

p = np.zeros((seq_len,n_states))

for i in range(seq_len):

miss = np.isnan(X[i])

p[i] = np.sum(miss * np.log(self.miss_probs_) + (1-miss) * np.log(1-self.miss_probs_), axis=1)

if not np.all(miss):

for state in range(n_states):

mean = self.means_[state][miss==0]

cov = self.covars_[state][np.ix_(miss==0,miss==0)]

p[i][state] = p[i][state] + np.log(multivariate_normal.pdf(X[i][miss==0],mean=mean,cov=cov))

return p

Example 35

def split_data(self, X, y, i):

sub_dict = {}

unique_val = np.unique(X[:, i])

c = range(i) + range(i + 1, X.shape[1])

for val in unique_val:

indice = np.where(X[:, i] == val)[0]

# print indice.shape

sub_dict[val] = (X[np.ix_(indice, c)], y[indice])

return sub_dict # sub_data, sub_target

Example 36

def _extract_pairwise(self, X, y, n, is_train=True):

if self.cache is not None and (n, True, is_train) in self.cache:

return self.cache[n, True, is_train]

if not hasattr(X, "shape"):

raise ValueError("Precomputed kernels or affinity matrices have "

"to be passed as arrays or sparse matrices.")

if X.shape[0] != X.shape[1]:

raise ValueError("X should be a square kernel matrix")

train, test = self.splits[n]

result = X[np.ix_(train if is_train else test, train)]

if self.cache is not None:

self.cache[n, True, is_train] = result

return result

Example 37

def compute_new_medoid(self,cluster, distances):

mask = np.ones(distances.shape)

mask[np.ix_(cluster,cluster)] = 0.

cluster_distances = np.ma.masked_array(data=distances, mask=mask, fill_value=10e9)

costs = cluster_distances.sum(axis=1)

return costs.argmin(axis=0, fill_value=10e9)

Example 38

def get_global_stiffness(self,msz):

pass

#~ ni, nj = self.get_nodes()

#~ self.keg = np.zeros((msz,msz))

#~ idx = np.ix_([ni.label, nj.label],[ni.label, nj.label])

#~ row = np.array([ni.label, ni.label, nj.label, nj.label])

#~ col = np.array([ni.label, nj.label, ni.label, nj.label])

#~ data = self.get_element_stiffness().reshape(-1)

#~ print data, row, col

#~ self.keg = csr_matrix((data, (row, col)), shape=(msz,msz)).toarray()

#~ return self.keg

Example 39

def generate_permutation_matrix():

P = np.zeros((2**15,2**15),dtype=np.uint8)

for a in range(2):

for b in range(2):

for c in range(2):

for d in range(2):

for e in range(2):

for f in range(2):

for g in range(2):

for h in range(2):

for i in range(2):

for j in range(2):

for k in range(2):

for l in range(2):

for m in range(2):

for n in range(2):

for o in range(2):

A = np.array([[0,a,b,c,d,e],[a,0,f,g,h,i],[b,f,0,j,k,l],[c,g,j,0,m,n],[d,h,k,m,0,o],[e,i,l,n,o,0]])

perms = multiset_permutations(np.array(range(6),dtype=np.uint8))

Per = np.zeros((factorial(6),6),dtype=np.uint8)

ind = 0

for permutation in perms:

Per[ind,:] = permutation

ind += 1

for p in range(factorial(6)):

A_per = A[np.ix_(Per[p,:],Per[p,:])]

P[graphlet_type(A), graphlet_type(A_per)] = 1

return P

Example 40

def MakeEquationSystem_mechLoading_sameFP(w_LoadedElts, EltCrack, EltLoaded, C):

C_Crack = C[np.ix_(EltCrack, EltCrack)]

A = np.hstack((C_Crack, -np.ones((EltCrack.size, 1), dtype=np.float64)))

A = np.vstack((A, np.zeros((1, EltCrack.size + 1), dtype=np.float64)))

A[-1, np.where(EltCrack == EltLoaded)[0]] = 1

S = np.zeros((EltCrack.size + 1), dtype=np.float64)

S[-1] = w_LoadedElts

return A, S

#-----------------------------------------------------------------------------------------------------------------------

Example 41

def MakeEquationSystem_mechLoading_extendedFP(wTip, EltChannel, EltTip, C, EltLoaded, w_loaded):

Ccc = C[np.ix_(EltChannel, EltChannel)]

Cct = C[np.ix_(EltChannel, EltTip)]

A = np.hstack((Ccc, -np.ones((EltChannel.size, 1),dtype=np.float64)))

A = np.vstack((A,np.zeros((1,EltChannel.size+1),dtype=np.float64)))

A[-1, np.where(EltChannel == EltLoaded)[0]] = 1

S = - np.dot(Cct, wTip)

S = np.append(S, w_loaded)

return A, S

#-----------------------------------------------------------------------------------------------------------------------

Example 42

def MakeEquationSystem_volumeControl_sameFP(w, EltCrack, C, dt, Q, ElemArea):

C_Crack = C[np.ix_(EltCrack, EltCrack)]

A = np.hstack((C_Crack,-np.ones((EltCrack.size,1),dtype=np.float64)))

A = np.vstack((A,np.ones((1,EltCrack.size+1),dtype=np.float64)))

A[-1,-1] = 0

S = -np.dot(C_Crack,w[EltCrack])

S = np.append(S,Q * dt / ElemArea)

return A, S

#-----------------------------------------------------------------------------------------------------------------------

Example 43

def maybe_convert_ix(*args):

"""

We likely want to take the cross-product

"""

ixify = True

for arg in args:

if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)):

ixify = False

if ixify:

return np.ix_(*args)

else:

return args

Example 44

def fit(self, X):

"""Sample a training set.

Parameters

----------

X: array-like

training set to sample observations from.

Returns

----------

self: obj

fitted instance with stored sample.

"""

self.train_shape = X.shape

sample_idx = {}

for i in range(2):

dim_size = min(X.shape[i], self.size)

sample_idx[i] = permutation(X.shape[i])[:dim_size]

sample = X[ix_(sample_idx[0], sample_idx[1])]

self.sample_idx_ = sample_idx

self.sample_ = sample

return self

Example 45

def is_train(self, X):

"""Check if an array is the training set.

Parameters

----------

X: array-like

training set to sample observations from.

Returns

----------

self: obj

fitted instance with stored sample.

"""

if not hasattr(self, "train_shape"):

raise NotFittedError("This IdTrain instance is not fitted yet.")

if not self._check_shape(X):

return False

idx = self.sample_idx_

try:

# Grab sample from `X`

sample = X[ix_(idx[0], idx[1])]

return array_equal(sample, self.sample_)

except IndexError:

# If index is out of bounds, X.shape < training_set.shape

# -> X is not the training set

return False

Example 46

def _M2_sparse(Xvar, mask_X, Yvar, mask_Y, weights=None):

""" 2nd moment matrix exploiting zero input columns """

C = np.zeros((len(mask_X), len(mask_Y)))

C[np.ix_(mask_X, mask_Y)] = _M2_dense(Xvar, Yvar, weights=weights)

return C

Example 47

def cartesian_product(arrays):

""" Returns Cartesian product of given arrays (x and y): cartesian_product([x,y]) """

broadcastable = np.ix_(*arrays)

broadcasted = np.broadcast_arrays(*broadcastable)

rows, cols = reduce(np.multiply, broadcasted[0].shape), len(broadcasted)

out = np.empty(rows * cols, dtype=broadcasted[0].dtype)

start, end = 0, rows

for a in broadcasted:

out[start:end] = a.reshape(-1)

start, end = end, end + rows

# Return value(s)

return out.reshape(cols, rows).T

Example 48

def test_regression_1(self):

# Test empty inputs create ouputs of indexing type, gh-5804

# Test both lists and arrays

for func in (range, np.arange):

a, = np.ix_(func(0))

assert_equal(a.dtype, np.intp)

Example 49

def test_shape_and_dtype(self):

sizes = (4, 5, 3, 2)

# Test both lists and arrays

for func in (range, np.arange):

arrays = np.ix_(*[func(sz) for sz in sizes])

for k, (a, sz) in enumerate(zip(arrays, sizes)):

assert_equal(a.shape[k], sz)

assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k))

assert_(np.issubdtype(a.dtype, int))

Example 50

def test_bool(self):

bool_a = [True, False, True, True]

int_a, = np.nonzero(bool_a)

assert_equal(np.ix_(bool_a)[0], int_a)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值