python中floor的用法_Python numpy.floor() 使用实例

本文介绍了Python中numpy库的floor()函数,用于向下取整。通过多个示例展示了该函数在不同场景下的应用,包括音频信号的短时傅立叶变换、滤波器设计、图像处理、数据集划分、股票分红处理等场景,展示了其在数值计算和数据处理中的重要性。
摘要由CSDN通过智能技术生成

The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.

Example 1

def stft(sig, frameSize, overlapFac=0.75, window=np.hanning):

""" short time fourier transform of audio signal """

win = window(frameSize)

hopSize = int(frameSize - np.floor(overlapFac * frameSize))

# zeros at beginning (thus center of 1st window should be for sample nr. 0)

# samples = np.append(np.zeros(np.floor(frameSize / 2.0)), sig)

samples = np.array(sig, dtype='float64')

# cols for windowing

cols = np.ceil((len(samples) - frameSize) / float(hopSize)) + 1

# zeros at end (thus samples can be fully covered by frames)

# samples = np.append(samples, np.zeros(frameSize))

frames = stride_tricks.as_strided(

samples,

shape=(cols, frameSize),

strides=(samples.strides[0] * hopSize, samples.strides[0])).copy()

frames *= win

return np.fft.rfft(frames)

# all the definition of the flowing variable can be found

# train_net.py

Example 2

def fftfilt(b, x, *n):

N_x = len(x)

N_b = len(b)

N = 2**np.arange(np.ceil(np.log2(N_b)),np.floor(np.log2(N_x)))

cost = np.ceil(N_x / (N - N_b + 1)) * N * (np.log2(N) + 1)

N_fft = int(N[np.argmin(cost)])

N_fft = int(N_fft)

# Compute the block length:

L = int(N_fft - N_b + 1)

# Compute the transform of the filter:

H = np.fft.fft(b,N_fft)

y = np.zeros(N_x, x.dtype)

i = 0

while i <= N_x:

il = np.min([i+L,N_x])

k = np.min([i+N_fft,N_x])

yt = np.fft.ifft(np.fft.fft(x[i:il],N_fft)*H,N_fft) # Overlap..

y[i:k] = y[i:k] + yt[:k-i] # and add

i += L

return y

Example 3

def __call__(self, batch):

images, labels = zip(*batch)

imgH = self.imgH

imgW = self.imgW

if self.keep_ratio:

ratios = []

for image in images:

w, h = image.size

ratios.append(w / float(h))

ratios.sort()

max_ratio = ratios[-1]

imgW = int(np.floor(max_ratio * imgH))

imgW = max(imgH * self.min_ratio, imgW) # assure imgH >= imgW

transform = resizeNormalize((imgW, imgH))

images = [transform(image) for image in images]

images = torch.cat([t.unsqueeze(0) for t in images], 0)

return images, labels

Example 4

def split_episodes(self, episode_paths, n_train, n_valid, n_test, seed=None, use_all=True):

"""Split episodes between training, validation and test sets.

seed: random seed (have split performed consistently every time)"""

if seed is not None:

random_state = np.random.get_state()

np.random.seed(seed)

np.random.shuffle(episode_paths)

np.random.set_state(random_state)

else:

np.random.shuffle(episode_paths)

if use_all:

multiplier = float(len(episode_paths)) / float(n_train + n_valid + n_test)

n_train = int(math.floor(multiplier * n_train))

n_valid = int(math.floor(multiplier * n_valid))

n_test = int(math.floor(multiplier * n_test))

assert n_train + n_valid + n_test <= len(episode_paths)

return (episode_paths[:n_train], episode_paths[n_train:n_train + n_valid],

episode_paths[n_train + n_test:n_train + n_test + n_test])

Example 5

def split_episodes(self, episode_paths, n_train, n_valid, n_test, seed=None, use_all=True):

"""Split episodes between training, validation and test sets.

seed: random seed (have split performed consistently every time)"""

if seed is not None:

random_state = np.random.get_state()

np.random.seed(seed)

np.random.shuffle(episode_paths)

np.random.set_state(random_state)

else:

np.random.shuffle(episode_paths)

if use_all:

multiplier = float(len(episode_paths)) / float(n_train + n_valid + n_test)

n_train = int(math.floor(multiplier * n_train))

n_valid = int(math.floor(multiplier * n_valid))

n_test = int(math.floor(multiplier * n_test))

assert n_train + n_valid + n_test <= len(episode_paths)

return (episode_paths[:n_train], episode_paths[n_train:n_train + n_valid],

episode_paths[n_train + n_test:n_train + n_test + n_test])

Example 6

def split_episodes(self, episode_paths, n_train, n_valid, n_test, seed=None, use_all=True):

"""Split episodes between training, validation and test sets.

seed: random seed (have split performed consistently every time)"""

if seed is not None:

random_state = np.random.get_state()

np.random.seed(seed)

np.random.shuffle(episode_paths)

np.random.set_state(random_state)

else:

np.random.shuffle(episode_paths)

if use_all:

multiplier = float(len(episode_paths)) / float(n_train + n_valid + n_test)

n_train = int(math.floor(multiplier * n_train))

n_valid = int(math.floor(multiplier * n_valid))

n_test = int(math.floor(multiplier * n_test))

assert n_train + n_valid + n_test <= len(episode_paths)

return (episode_paths[:n_train], episode_paths[n_train:n_train + n_valid],

episode_paths[n_train + n_test:n_train + n_test + n_test])

Example 7

def split_episodes(self, episode_paths, n_train, n_valid, n_test, seed=None, use_all=True):

"""Split episodes between training, validation and test sets.

seed: random seed (have split performed consistently every time)"""

if seed is not None:

random_state = np.random.get_state()

np.random.seed(seed)

np.random.shuffle(episode_paths)

np.random.set_state(random_state)

else:

np.random.shuffle(episode_paths)

if use_all:

multiplier = float(len(episode_paths)) / float(n_train + n_valid + n_test)

n_train = int(math.floor(multiplier * n_train))

n_valid = int(math.floor(multiplier * n_valid))

n_test = int(math.floor(multiplier * n_test))

assert n_train + n_valid + n_test <= len(episode_paths)

return (episode_paths[:n_train], episode_paths[n_train:n_train + n_valid],

episode_paths[n_train + n_test:n_train + n_test + n_test])

Example 8

def earn_dividend(self, dividend):

"""

Register the number of shares we held at this dividend's ex date so

that we can pay out the correct amount on the dividend's pay date.

"""

assert dividend['sid'] == self.sid

out = {'id': dividend['id']}

# stock dividend

if dividend['payment_sid']:

out['payment_sid'] = dividend['payment_sid']

out['share_count'] = np.floor(self.amount

* float(dividend['ratio']))

# cash dividend

if dividend['net_amount']:

out['cash_amount'] = self.amount * dividend['net_amount']

elif dividend['gross_amount']:

out['cash_amount'] = self.amount * dividend['gross_amount']

payment_owed = zp.dividend_payment(out)

return payment_owed

Example 9

def handle_data(self, data):

if self.target_shares == 0:

assert 0 not in self.portfolio.positions

self.order(self.sid(0), 10)

self.target_shares = 10

return

else:

assert self.portfolio.positions[0]['amount'] == \

self.target_shares, "Orders not filled immediately."

assert self.portfolio.positions[0]['last_sale_price'] == \

data[0].price, "Orders not filled at current price."

self.order_percent(self.sid(0), .001)

if isinstance(self.sid(0), Equity):

self.target_shares += np.floor(

(.001 * self.portfolio.portfolio_value) / data[0].price

)

if isinstance(self.sid(0), Future):

self.target_shares += np.floor(

(.001 * self.portfolio.portfolio_value) /

(data[0].price * self.sid(0).multiplier)

)

Example 10

def int_bilin_MT(f, x, y):

# assume x, y are in pixel

fint = np.zeros(len(x))

for i in range(len(x)):

t = y[i] - np.floor(y[i])

u = x[i] - np.floor(x[i])

y0 = f[np.int(np.floor(y[i])), np.int(np.floor(x[i]))]

y1 = f[np.int(np.floor(y[i])) + 1, np.int(np.floor(x[i]))]

y2 = f[np.int(np.floor(y[i])) + 1, np.int(np.floor(x[i])) + 1]

y3 = f[np.int(np.floor(y[i])), np.int(np.floor(x[i])) + 1]

fint[i] = t * u * (y0 - y1 + y2 - y3)

fint[i] += t * (y1 - y0)

fint[i] += u * (y3 - y0)

fint[i] += y0

return fint

Example 11

def expand_to_chunk_size(self, chunk_size, offset=Vec(0,0,0, dtype=int)):

"""

Align a potentially non-axis aligned bbox to the grid by growing it

to the nearest grid lines.

Required:

chunk_size: arraylike (x,y,z), the size of chunks in the

dataset e.g. (64,64,64)

Optional:

offset: arraylike (x,y,z), the starting coordinate of the dataset

"""

chunk_size = np.array(chunk_size, dtype=np.float32)

result = self.clone()

result = result - offset

result.minpt = np.floor(result.minpt / chunk_size) * chunk_size

result.maxpt = np.ceil(result.maxpt / chunk_size) * chunk_size

return result + offset

Example 12

def shrink_to_chunk_size(self, chunk_size, offset=Vec(0,0,0, dtype=int)):

"""

Align a potentially non-axis aligned bbox to the grid by shrinking it

to the nearest grid lines.

Required:

chunk_size: arraylike (x,y,z), the size of chunks in the

dataset e.g. (64,64,64)

Optional:

offset: arraylike (x,y,z), the starting coordinate of the dataset

"""

chunk_size = np.array(chunk_size, dtype=np.float32)

result = self.clone()

result = result - offset

result.minpt = np.ceil(result.minpt / chunk_size) * chunk_size

result.maxpt = np.floor(result.maxpt / chunk_size) * chunk_size

return result + offset

Example 13

def resize_image(image,target_shape, pad_value = 0):

assert isinstance(target_shape, list) or isinstance(target_shape, tuple)

add_shape, subs_shape = [], []

image_shape = image.shape

shape_difference = np.asarray(target_shape, dtype=int) - np.asarray(image_shape,dtype=int)

for diff in shape_difference:

if diff < 0:

subs_shape.append(np.s_[int(np.abs(np.ceil(diff/2))):int(np.floor(diff/2))])

add_shape.append((0, 0))

else:

subs_shape.append(np.s_[:])

add_shape.append((int(np.ceil(1.0*diff/2)),int(np.floor(1.0*diff/2))))

output = np.pad(image, tuple(add_shape), 'constant', constant_values=(pad_value, pad_value))

output = output[subs_shape]

return output

Example 14

def get_mask_boundaries(self,image_shape,mask_shape,ROI_mask):

half_segment_dimensions = np.zeros((len(image_shape), 2), dtype='int32')

for index, dim in enumerate(image_shape):

if dim % 2 == 0:

half_segment_dimensions[index, :] = [dim / 2 - 1, dim / 2]

else:

half_segment_dimensions[index, :] = [np.floor(dim / 2)] * 2

mask_boundaries = np.zeros(mask_shape, dtype='int32')

mask_boundaries[half_segment_dimensions[0][0]:-half_segment_dimensions[0][1],

half_segment_dimensions[1][0]:-half_segment_dimensions[1][1],

half_segment_dimensions[2][0]:-half_segment_dimensions[2][1]] = 1

if ROI_mask is None:

return mask_boundaries

else:

return mask_boundaries * ROI_mask

Example 15

def logTickValues(self, minVal, maxVal, size, stdTicks):

## start with the tick spacing given by tickValues().

## Any level whose spacing is < 1 needs to be converted to log scale

ticks = []

for (spacing, t) in stdTicks:

if spacing >= 1.0:

ticks.append((spacing, t))

if len(ticks) < 3:

v1 = int(np.floor(minVal))

v2 = int(np.ceil(maxVal))

#major = list(range(v1+1, v2))

minor = []

for v in range(v1, v2):

minor.extend(v + np.log10(np.arange(1, 10)))

minor = [x for x in minor if x>minVal and x

ticks.append((None, minor))

return ticks

Example 16

def logTickValues(self, minVal, maxVal, size, stdTicks):

## start with the tick spacing given by tickValues().

## Any level whose spacing is < 1 needs to be converted to log scale

ticks = []

for (spacing, t) in stdTicks:

if spacing >= 1.0:

ticks.append((spacing, t))

if len(ticks) < 3:

v1 = int(np.floor(minVal))

v2 = int(np.ceil(maxVal))

#major = list(range(v1+1, v2))

minor = []

for v in range(v1, v2):

minor.extend(v + np.log10(np.arange(1, 10)))

minor = [x for x in minor if x>minVal and x

ticks.append((None, minor))

return ticks

Example 17

def get_batch_idx(self, idx, **kwargs):

if self.mode == 'train':

new_idx = []

# self.log.info('Label IDX: {}'.format(idx))

if self.stats_provider is None:

label_ids = [ii % self._real_size for ii in idx]

else:

# print idx, self.stats_provider.get_size()

stats_batch = self.stats_provider.get_batch_idx(idx)

label_ids = []

for ii in xrange(len(idx)):

label_ids.append(np.argmax(stats_batch['y_gt'][ii]))

for ii in label_ids:

data_group = self.data_provider.label_idx[ii]

num_ids = len(data_group)

kk = int(np.floor(self.rnd.uniform(0, num_ids)))

new_idx.append(data_group[kk])

else:

new_idx = idx

return self.data_provider.get_batch_idx(new_idx)

Example 18

def transform(self, images):

if self._aug_flag:

transformed_images =\

np.zeros([images.shape[0], self._imsize, self._imsize, 3])

ori_size = images.shape[1]

for i in range(images.shape[0]):

h1 = np.floor((ori_size - self._imsize) * np.random.random())

w1 = np.floor((ori_size - self._imsize) * np.random.random())

cropped_image =\

images[i][w1: w1 + self._imsize, h1: h1 + self._imsize, :]

if random.random() > 0.5:

transformed_images[i] = np.fliplr(cropped_image)

else:

transformed_images[i] = cropped_image

return transformed_images

else:

return images

Example 19

def gaussian_kernel(kernel_shape, sigma=None):

"""

Get 2D Gaussian kernel

:param kernel_shape: kernel size

:param sigma: sigma of Gaussian distribution

:return: 2D Gaussian kernel

"""

kern = numpy.zeros((kernel_shape, kernel_shape), dtype='float32')

# get sigma from kernel size

if sigma is None:

sigma = 0.3*((kernel_shape-1.)*0.5 - 1.) + 0.8

def gauss(x, y, s):

Z = 2. * numpy.pi * s ** 2.

return 1. / Z * numpy.exp(-(x ** 2. + y ** 2.) / (2. * s ** 2.))

mid = numpy.floor(kernel_shape / 2.)

for i in xrange(0, kernel_shape):

for j in xrange(0, kernel_shape):

kern[i, j] = gauss(i - mid, j - mid, sigma)

return kern / kern.sum()

Example 20

def _hpd_interval(self, x, width):

"""

Code adapted from pymc3.stats.calc_min_interval:

https://github.com/pymc-devs/pymc3/blob/master/pymc3/stats.py

"""

x = np.sort(x)

n = len(x)

interval_idx_inc = int(np.floor(width * n))

n_intervals = n - interval_idx_inc

interval_width = x[interval_idx_inc:] - x[:n_intervals]

if len(interval_width) == 0:

raise ValueError('Too few elements for interval calculation')

min_idx = np.argmin(interval_width)

hdi_min = x[min_idx]

hdi_max = x[min_idx + interval_idx_inc]

index = ['hpd{}_{}'.format(width, x) for x in ['lower', 'upper']]

return pd.Series([hdi_min, hdi_max], index=index)

Example 21

def SLdshear(inputArray, k, axis):

"""

Computes the discretized shearing operator for a given inputArray, shear

number k and axis.

This version is adapted such that the MATLAB indexing can be used here in the

Python version.

"""

axis = axis - 1

if k==0:

return inputArray

rows = np.asarray(inputArray.shape)[0]

cols = np.asarray(inputArray.shape)[1]

shearedArray = np.zeros((rows, cols), dtype=inputArray.dtype)

if axis == 0:

for col in range(cols):

shearedArray[:,col] = np.roll(inputArray[:,col], int(k * np.floor(cols/2-col)))

else:

for row in range(rows):

shearedArray[row,:] = np.roll(inputArray[row,:], int(k * np.floor(rows/2-row)))

return shearedArray

Example 22

def value_to_bin_index(val, **kwargs):

"""Convert value to bin index

Convert a numeric or timestamp column to an integer bin index.

:param bin_width: bin_width value needed to convert column to an integer bin index

:param bin_offset: bin_offset value needed to convert column to an integer bin index

"""

try:

# NOTE this notation also works for timestamps

bin_width = kwargs.get('bin_width', 1)

bin_offset = kwargs.get('bin_offset', 0)

bin_index = int(np.floor((val - bin_offset) / bin_width))

return bin_index

except BaseException:

pass

return val

Example 23

def value_to_bin_center(val, **kwargs):

"""Convert value to bin center

Convert a numeric or timestamp column to a common bin center value.

:param bin_width: bin_width value needed to convert column to a common bin center value

:param bin_offset: bin_offset value needed to convert column to a common bin center value

"""

try:

# NOTE this notation also works for timestamps, and does not change the

# unit

bin_width = kwargs.get('bin_width', 1)

bin_offset = kwargs.get('bin_offset', 0)

bin_index = int(np.floor((val - bin_offset) / bin_width))

obj_type = type(bin_width)

return bin_offset + obj_type((bin_index + 0.5) * bin_width)

except BaseException:

pass

return val

Example 24

def save_fft(fil,audio_in):

samples = len(audio_in)

fft_size = 2**int(floor(log(samples)/log(2.0)))

freq = fft(audio_in[0:fft_size])

s_data = numpy.zeros(fft_size/2)

x_data = numpy.zeros(fft_size/2)

peak = 0;

for j in xrange(fft_size/2):

if (abs(freq[j]) > peak):

peak = abs(freq[j])

for j in xrange(fft_size/2):

x_data[j] = log(2.0*(j+1.0)/fft_size);

if (x_data[j] < -10):

x_data[j] = -10

s_data[j] = 10.0*log(abs(freq[j])/peak)/log(10.0)

plt.ylim([-50,0])

plt.plot(x_data,s_data)

plt.title('fft log power')

plt.grid()

fields = fil.split('.')

plt.savefig(fields[0]+'_fft.png', bbox_inches="tight")

plt.clf()

plt.close()

Example 25

def _gene_embed_space(self,vec):

shape = vec.shape

vec = vec.flatten()

combo_neg_idx = np.array([1 if vec[i]<0 else 0 for i in range(len(vec))])

vec_pos = np.abs(vec)

int_part = np.floor(vec_pos)

frac_part = np.round(vec_pos - int_part,2)

bi_int_part=[] #?????????????signature???????

for i in range(len(int_part)):

bi=list(bin(int(int_part[i]))[2:])

bie = [0] * (16 - len(bi))

bie.extend(bi)

bi_int_part.append(np.array(bie,dtype=np.uint16))

bi_int_part = np.array(bi_int_part)

sig = []

for i in range(len(bi_int_part)):

sig.append(bi_int_part[i][10])

sig = np.array(sig).reshape(shape)

return np.array(bi_int_part),frac_part.reshape(shape),combo_neg_idx.reshape(shape),sig

Example 26

def _gene_embed_space(self,vec):

shape = vec.shape

vec = vec.flatten()

combo_neg_idx = np.array([1 if vec[i]<0 else 0 for i in range(len(vec))])

vec_pos = np.abs(vec)

int_part = np.floor(vec_pos)

frac_part = np.round(vec_pos - int_part,2)

bi_int_part=[] #?????????????signature???????

for i in range(len(int_part)):

bi=list(bin(int(int_part[i]))[2:])

bie = [0] * (16 - len(bi))

bie.extend(bi)

bi_int_part.append(np.array(bie,dtype=np.uint16))

bi_int_part = np.array(bi_int_part)

sig = []

for i in range(len(bi_int_part)):

sig.append(bi_int_part[i][10])

sig = np.array(sig).reshape(shape)

return np.array(bi_int_part),frac_part.reshape(shape),combo_neg_idx.reshape(shape),sig

Example 27

def M(self):

"""Returns the :math:`M` matrix of integers that determine points at which the

functions are sampled in the unit cell.

Examples:

For `S = [2, 2, 1]`, the returned matrix is:

.. code-block:: python

np.ndarray([[0,0,0],

[1,0,0],

[0,1,0],

[1,1,0]], dtype=int)

"""

if self._M is None:

ms = np.arange(np.prod(self.S, dtype=int))

m1 = np.fmod(ms, self.S[0])

m2 = np.fmod(np.floor(ms/self.S[0]), self.S[1])

m3 = np.fmod(np.floor(ms/(self.S[0]*self.S[1])), self.S[2])

#Make sure we explicitly use an integer array; it's faster.

self._M = np.asarray(np.vstack((m1, m2, m3)).T, dtype=int)

return self._M

Example 28

def _latvec_plot(self, R=True, withpts=False, legend=False):

"""Plots the lattice vectors (for real or reciprocal space).

"""

import matplotlib.pyplot as plt

from mpl_toolkits.mplot3d import Axes3D

fig = plt.figure()

ax = fig.gca(projection='3d')

vecs = self.R if R else self.K

for i in range(3):

steps = np.linspace(0, 1, np.floor(10*np.linalg.norm(vecs[:,i])))

Ri = vecs[:,i]

Ri.shape = (1, 3)

steps.shape = (len(steps), 1)

line = np.dot(steps, Ri)

ax.plot(line[:,0], line[:,1], line[:,2], label="R{0:d}".format(i+1))

if withpts:

pts = self.r if R else self.G

ax.scatter(pts[:,0], pts[:,1], pts[:,2], color='k')

if legend:

ax.legend()

return (fig, ax)

Example 29

def cumultativesumstest(binin):

''' The focus of this test is the maximal excursion (from zero) of the random walk defined by the cumulative sum of adjusted (-1, +1) digits in the sequence. The purpose of the test is to determine whether the cumulative sum of the partial sequences occurring in the tested sequence is too large or too small relative to the expected behavior of that cumulative sum for random sequences. This cumulative sum may be considered as a random walk. For a random sequence, the random walk should be near zero. For non-random sequences, the excursions of this random walk away from zero will be too large.'''

n = len(binin)

ss = [int(el) for el in binin]

sc = map(sumi, ss)

cs = np.cumsum(sc)

z = max(abs(cs))

ra = 0

start = int(np.floor(0.25 * np.floor(-n / z) + 1))

stop = int(np.floor(0.25 * np.floor(n / z) - 1))

pv1 = []

for k in xrange(start, stop + 1):

pv1.append(sst.norm.cdf((4 * k + 1) * z / np.sqrt(n)) - sst.norm.cdf((4 * k - 1) * z / np.sqrt(n)))

start = int(np.floor(0.25 * np.floor(-n / z - 3)))

stop = int(np.floor(0.25 * np.floor(n / z) - 1))

pv2 = []

for k in xrange(start, stop + 1):

pv2.append(sst.norm.cdf((4 * k + 3) * z / np.sqrt(n)) - sst.norm.cdf((4 * k + 1) * z / np.sqrt(n)))

pval = 1

pval -= reduce(su, pv1)

pval += reduce(su, pv2)

return pval

Example 30

def rel_crop(im, rel_cx, rel_cy, crop_size):

map_size = im.shape[1]

r = crop_size / 2

abs_cx = rel_cx * map_size

abs_cy = rel_cy * map_size

na = np.floor([abs_cy-r, abs_cy+r, abs_cx-r, abs_cx+r]).astype(np.int32)

a = np.clip(na, 0, map_size)

px0 = a[2] - na[2]

px1 = na[3] - a[3]

py0 = a[0] - na[0]

py1 = na[1] - a[1]

crop = im[a[0]:a[1], a[2]:a[3]]

crop = np.pad(crop, ((py0, py1), (px0, px1), (0, 0)),

mode='reflect')

assert crop.shape == (crop_size, crop_size, im.shape[2])

return crop

Example 31

def _downsample_mask(X, pct):

""" Create a boolean mask indicating which subset of X should be

evaluated.

"""

if pct < 1.0:

Mask = np.zeros(X.shape, dtype=np.bool)

m = X.shape[-2]

n = X.shape[-1]

nToEval = np.round(pct*m*n).astype(np.int32)

idx = sobol(2, nToEval ,0)

idx[0] = np.floor(m*idx[0])

idx[1] = np.floor(n*idx[1])

idx = idx.astype(np.int32)

Mask[:,:,idx[0], idx[1]] = True

else:

Mask = np.ones(X.shape, dtype=np.bool)

return Mask

Example 32

def update_output(self, x):

N, C, H, W = x.shape

pool_height, pool_width = self.kW, self.kH

stride = self.dW

assert (

H - pool_height) % stride == 0 or H == pool_height, 'Invalid height'

assert (

W - pool_width) % stride == 0 or W == pool_width, 'Invalid width'

out_height = int(np.floor((H - pool_height) / stride + 1))

out_width = int(np.floor((W - pool_width) / stride + 1))

x_split = x.reshape(N * C, 1, H, W)

x_cols = im2col_cython(

x_split, pool_height, pool_width, padding=0, stride=stride)

x_cols_avg = np.mean(x_cols, axis=0)

out = x_cols_avg.reshape(

out_height, out_width, N, C).transpose(2, 3, 0, 1)

self.x_shape = x.shape

self.x_cols = x_cols

self.output = out

return self.output

Example 33

def extract_top_plane_nodes(nodefile, top_face):

"""

:param nodefile:

:param top_face:

:return: planeNodeIDs

"""

import numpy as np

import fem_mesh

top_face = np.array(top_face)

nodeIDcoords = fem_mesh.load_nodeIDs_coords(nodefile)

[snic, axes] = fem_mesh.SortNodeIDs(nodeIDcoords)

# extract spatially-sorted node IDs on a the top z plane

axis = int(np.floor(np.divide(top_face.nonzero(), 2)))

if np.mod(top_face.nonzero(), 2) == 1:

plane = (axis, axes[axis].max())

else:

plane = (axis, axes[axis].min())

planeNodeIDs = fem_mesh.extractPlane(snic, axes, plane)

return planeNodeIDs

Example 34

def timer(s, v='', nloop=500, nrep=3):

units = ["s", "ms", "µs", "ns"]

scaling = [1, 1e3, 1e6, 1e9]

print("%s : %-50s : " % (v, s), end=' ')

varnames = ["%ss,nm%ss,%sl,nm%sl" % tuple(x*4) for x in 'xyz']

setup = 'from __main__ import numpy, ma, %s' % ','.join(varnames)

Timer = timeit.Timer(stmt=s, setup=setup)

best = min(Timer.repeat(nrep, nloop)) / nloop

if best > 0.0:

order = min(-int(numpy.floor(numpy.log10(best)) // 3), 3)

else:

order = 3

print("%d loops, best of %d: %.*g %s per loop" % (nloop, nrep,

3,

best * scaling[order],

units[order]))

Example 35

def dec_round(num, dprec=4, rnd='down', rto_zero=False):

"""

Round up/down numeric ``num`` at specified decimal ``dprec``.

Parameters

----------

num: float

dprec: int

Decimal position for truncation.

rnd: str (default: 'down')

Set as 'up' or 'down' to return a rounded-up or rounded-down value.

rto_zero: bool (default: False)

Use a *round-towards-zero* method, e.g., ``floor(-3.5) == -3``.

Returns

----------

float (default: rounded-up)

"""

dprec = 10**dprec

if rnd == 'up' or (rnd == 'down' and rto_zero and num < 0.):

return np.ceil(num*dprec)/dprec

elif rnd == 'down' or (rnd == 'up' and rto_zero and num < 0.):

return np.floor(num*dprec)/dprec

return np.round(num, dprec)

Example 36

def __call__(self, batch):

images, labels = zip(*batch)

imgH = self.imgH

imgW = self.imgW

if self.keep_ratio:

ratios = []

for image in images:

w, h = image.size

ratios.append(w / float(h))

ratios.sort()

max_ratio = ratios[-1]

imgW = int(np.floor(max_ratio * imgH))

imgW = max(imgH * self.min_ratio, imgW) # assure imgH >= imgW

transform = resizeNormalize((imgW, imgH))

images = [transform(image) for image in images]

images = torch.cat([t.unsqueeze(0) for t in images], 0)

return images, labels

Example 37

def shuffle_to_training_data(self, expert_data, on_policy_data, expert_fail_data):

data = np.vstack([expert_data['data'], on_policy_data['data'], expert_fail_data['data']])

classes = np.vstack([expert_data['classes'], on_policy_data['classes'], expert_fail_data['classes']])

domains = np.vstack([expert_data['domains'], on_policy_data['domains'], expert_fail_data['domains']])

sample_range = data.shape[0]*data.shape[1]

all_idxs = np.random.permutation(sample_range)

t_steps = data.shape[1]

data_matrix = np.zeros(shape=(sample_range, self.im_height, self.im_width, self.im_channels))

data_matrix_two = np.zeros(shape=(sample_range, self.im_height, self.im_width, self.im_channels))

class_matrix = np.zeros(shape=(sample_range, 2))

dom_matrix = np.zeros(shape=(sample_range, 2))

for one_idx, iter_step in zip(all_idxs, range(0, sample_range)):

traj_key = np.floor(one_idx/t_steps)

time_key = one_idx % t_steps

time_key_plus_one = min(time_key + 3, t_steps-1)

data_matrix[iter_step, :, :, :] = data[traj_key, time_key, :, :, :]

data_matrix_two[iter_step, :, :, :] = data[traj_key, time_key_plus_one, :, :, :]

class_matrix[iter_step, :] = classes[traj_key, time_key, :]

dom_matrix[iter_step, :] = domains[traj_key, time_key, :]

return data_matrix, data_matrix_two, dom_matrix, class_matrix

Example 38

def transform_to_yolo_labels(self, labels):

"""

Transform voc_label_parser' result to yolo label.

:param labels: [is_obj, x, y, w, h, class_probs..], ...

:return: yolo label

"""

yolo_label = np.zeros([self.side, self.side, (1 + self.coords) + self.classes]).astype(np.float32)

shuffle(labels)

for label in labels:

yolo_box = self.convert_to_yolo_box(self.ori_im_shape[::-1], list(label[2:]))

assert np.max(yolo_box) < 1

[loc_y, loc_x] = [int(np.floor(yolo_box[1] * self.side)), int(np.floor(yolo_box[0] * self.side))]

yolo_label[loc_y][loc_x][0] = 1.0 # is obj

yolo_label[loc_y][loc_x][1:5] = yolo_box # bbox

yolo_label[loc_y][loc_x][5:] = 0 # only one obj in one grid

yolo_label[loc_y][loc_x][4+label[0]] = 1.0 # class

return yolo_label

Example 39

def julian_date(hUTC, dayofyear, year):

""" Julian calendar date

Args:

hUTC: fractional hour (UTC time)

dayofyear (int):

year (int):

Returns:

the julian date

Details:

World Meteorological Organization (2006).Guide to meteorological

instruments and methods of observation. Geneva, Switzerland.

"""

delta = year - 1949

leap = numpy.floor(delta / 4.)

return 2432916.5 + delta * 365 + leap + dayofyear + hUTC / 24.

Example 40

def unknown_feature_extractor(x, sr, win_len, shift_len, barks, inner_win, inner_shift, win_type, method_version):

x_spectrum = stft_extractor(x, win_len, shift_len, win_type)

coef = get_fft_bark_mat(sr, win_len, barks, 20, sr//2)

bark_spect = np.matmul(coef, x_spectrum)

ams = np.zeros((barks, inner_win//2+1, (bark_spect.shape[1] - inner_win)//inner_shift))

for i in range(barks):

channel_stft = stft_extractor(bark_spect[i, :], inner_win, inner_shift, 'hanning')

if method_version == 'v1':

ams[i, :, :] = 20 * np.log(np.abs(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift]))

elif method_version == 'v2':

channel_amplitude = np.abs(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift])

channel_angle = np.angle(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift])

channel_angle = channel_angle - (np.floor(channel_angle / (2.*np.pi)) * (2.*np.pi))

ams[i, :, :] = np.power(channel_amplitude, 1./3.) * channel_angle

else:

ams[i, :, :] = np.abs(channel_stft)

return ams

Example 41

def ams_extractor(x, sr, win_len, shift_len, barks, inner_win, inner_shift, win_type, method_version):

x_spectrum = stft_extractor(x, win_len, shift_len, win_type)

coef = get_fft_bark_mat(sr, win_len, barks, 20, sr//2)

bark_spect = np.matmul(coef, x_spectrum)

ams = np.zeros((barks, inner_win//2+1, (bark_spect.shape[1] - inner_win)//inner_shift))

for i in range(barks):

channel_stft = stft_extractor(bark_spect[i, :], inner_win, inner_shift, 'hanning')

if method_version == 'v1':

ams[i, :, :] = 20 * np.log(np.abs(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift]))

elif method_version == 'v2':

channel_amplitude = np.abs(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift])

channel_angle = np.angle(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift])

channel_angle = channel_angle - (np.floor(channel_angle / (2.*np.pi)) * (2.*np.pi))

ams[i, :, :] = np.power(channel_amplitude, 1./3.) * channel_angle

else:

ams[i, :, :] = np.abs(channel_stft)

return ams

Example 42

def getTimeDerivative(I, Win):

dw = np.floor(Win/2)

t = np.arange(-dw, dw+1)

sigma = 0.4*dw

xgaussf = t*np.exp(-t**2 / (2*sigma**2))

#Normalize by L1 norm to control for length of window

xgaussf = xgaussf/np.sum(np.abs(xgaussf))

xgaussf = xgaussf[:, None]

IRet = scipy.signal.convolve2d(I, xgaussf, 'valid')

validIdx = np.arange(dw, I.shape[0]-dw, dtype='int64')

return [IRet, validIdx]

#############################################################

#### FAST TIME DELAY EMBEDDING, Tau = 1 #####

#############################################################

#Input: I: P x N Video with frames along the columns

#W: Windows

#Ouput: Mu: P x W video with mean frames along the columns

Example 43

def mu_law(x, mu=255, int8=False):

"""A TF implementation of Mu-Law encoding.

Args:

x: The audio samples to encode.

mu: The Mu to use in our Mu-Law.

int8: Use int8 encoding.

Returns:

out: The Mu-Law encoded int8 data.

"""

out = tf.sign(x) * tf.log(1 + mu * tf.abs(x)) / np.log(1 + mu)

out = tf.floor(out * 128)

if int8:

out = tf.cast(out, tf.int8)

return out

Example 44

def impad_gpu(y_gpu, sf):

sf = np.array(sf)

shape = (np.array(y_gpu.shape) + sf).astype(np.uint32)

dtype = y_gpu.dtype

block_size = (16,16,1)

grid_size = (int(np.ceil(float(shape[1])/block_size[0])),

int(np.ceil(float(shape[0])/block_size[1])))

preproc = _generate_preproc(dtype, shape)

mod = SourceModule(preproc + kernel_code, keep=True)

padded_gpu = cua.empty((int(shape[0]), int(shape[1])), dtype)

impad_fun = mod.get_function("impad")

upper_left = np.uint32(np.floor(sf / 2.))

original_size = np.uint32(np.array(y_gpu.shape))

impad_fun(padded_gpu.gpudata, y_gpu.gpudata,

upper_left[1], upper_left[0],

original_size[0], original_size[1],

block=block_size, grid=grid_size)

return padded_gpu

Example 45

def laplace_stack_gpu(y_gpu, mode='valid'):

"""

This funtion computes the Laplacian of each slice of a stack of images

"""

shape = np.array(y_gpu.shape).astype(np.uint32)

dtype = y_gpu.dtype

block_size = (6,int(np.floor(512./6./float(shape[0]))),int(shape[0]))

grid_size = (int(np.ceil(float(shape[1])/block_size[0])),

int(np.ceil(float(shape[0])/block_size[1])))

shared_size = int((2+block_size[0])*(2+block_size[1])*(2+block_size[2])

*dtype.itemsize)

preproc = _generate_preproc(dtype, (shape[1],shape[2]))

mod = SourceModule(preproc + kernel_code, keep=True)

laplace_fun_gpu = mod.get_function("laplace_stack_same")

laplace_gpu = cua.empty((y_gpu.shape[0], y_gpu.shape[1], y_gpu.shape[2]),

y_gpu.dtype)

laplace_fun_gpu(laplace_gpu.gpudata, y_gpu.gpudata,

block=block_size, grid=grid_size, shared=shared_size)

return laplace_gpu

Example 46

def laplace3d_gpu(y_gpu):

shape = np.array(y_gpu.shape).astype(np.uint32)

dtype = y_gpu.dtype

block_size = (6,int(np.floor(512./6./float(shape[0]))),int(shape[0]))

grid_size = (int(np.ceil(float(shape[1])/block_size[0])),

int(np.ceil(float(shape[0])/block_size[1])))

shared_size = int((2+block_size[0])*(2+block_size[1])*(2+block_size[2])

*dtype.itemsize)

preproc = _generate_preproc(dtype, (shape[1],shape[2]))

mod = SourceModule(preproc + kernel_code, keep=True)

laplace_fun_gpu = mod.get_function("laplace3d_same")

laplace_gpu = cua.empty((y_gpu.shape[0], y_gpu.shape[1], y_gpu.shape[2]),

y_gpu.dtype)

laplace_fun_gpu(laplace_gpu.gpudata, y_gpu.gpudata,

block=block_size, grid=grid_size, shared=shared_size)

return laplace_gpu

Example 47

def wsparsify(w_gpu, percentage):

"""

Keeps only as many entries nonzero as specified by percentage.

"""

w = w_gpu.get()

vals = sort(w)[::-1]

idx = floor(prod(w.shape()) * percentage/100)

zw_gpu = cua.zeros_like(w_gpu) # gpu array filled with zeros

tw_gpu = cua.empty_like(w_gpu) # gpu array containing threshold

tw_gpu.fill(vals[idx])

w_gpu = cua.if_positive(w_gpu > tw_gpu, w_gpu, zw_gpu)

del zw_gpu

del tw_gpu

return w_gpu

Example 48

def sparsify(x, percentage):

"""

Keeps only as many entries nonzero as specified by percentage.

Note that only the larges values are kept.

--------------------------------------------------------------------------

Usage:

Call: y = sparsify(x, percentage)

Input: x input ndarray x

percentage percentage of nonzero entries in y

Output: sparsified version of x

--------------------------------------------------------------------------

Copyright (C) 2011 Michael Hirsch

"""

vals = np.sort(x.flatten())[::-1]

idx = np.floor(np.prod(x.shape) * percentage/100)

x[x < vals[idx]] = 0

return x

Example 49

def stft(sig, frameSize, overlapFac=0.75, window=np.hanning):

""" short time fourier transform of audio signal """

win = window(frameSize)

hopSize = int(frameSize - np.floor(overlapFac * frameSize))

# zeros at beginning (thus center of 1st window should be for sample nr. 0)

# samples = np.append(np.zeros(np.floor(frameSize / 2.0)), sig)

samples = np.array(sig, dtype='float64')

# cols for windowing

cols = np.floor((len(samples) - frameSize) / float(hopSize))

# zeros at end (thus samples can be fully covered by frames)

# samples = np.append(samples, np.zeros(frameSize))

frames = stride_tricks.as_strided(

samples,

shape=(cols, frameSize),

strides=(samples.strides[0] * hopSize, samples.strides[0])).copy()

frames *= win

return np.fft.rfft(frames)

Example 50

def stft(sig, frameSize, overlapFac=0.75, window=np.hanning):

""" short time fourier transform of audio signal """

win = window(frameSize)

hopSize = int(frameSize - np.floor(overlapFac * frameSize))

# zeros at beginning (thus center of 1st window should be for sample nr. 0)

# samples = np.append(np.zeros(np.floor(frameSize / 2.0)), sig)

samples = np.array(sig, dtype='float64')

# cols for windowing

cols = np.ceil((len(samples) - frameSize) / float(hopSize)) + 1

# zeros at end (thus samples can be fully covered by frames)

# samples = np.append(samples, np.zeros(frameSize))

frames = stride_tricks.as_strided(

samples,

shape=(cols, frameSize),

strides=(samples.strides[0] * hopSize, samples.strides[0])).copy()

frames *= win

return np.fft.rfft(frames)

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值