The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.
Example 1
def preprocess(image):
"""Takes an image and apply preprocess"""
# ????????????
image = cv2.resize(image, (data_shape, data_shape))
# ?? BGR ? RGB
image = image[:, :, (2, 1, 0)]
# ?mean?????float
image = image.astype(np.float32)
# ? mean
image -= np.array([123, 117, 104])
# ??? [batch-channel-height-width]
image = np.transpose(image, (2, 0, 1))
image = image[np.newaxis, :]
# ?? ndarray
image = nd.array(image)
return image
Example 2
def transform(self, img, lbl):
img = img[:, :, ::-1]
img = img.astype(np.float64)
img -= self.mean
img = m.imresize(img, (self.img_size[0], self.img_size[1]))
# Resize scales images from 0 to 255, thus we need
# to divide by 255.0
img = img.astype(float) / 255.0
# NHWC -> NCWH
img = img.transpose(2, 0, 1)
lbl[lbl==255] = 0
lbl = lbl.astype(float)
lbl = m.imresize(lbl, (self.img_size[0], self.img_size[1]), 'nearest', mode='F')
lbl = lbl.astype(int)
img = torch.from_numpy(img).float()
lbl = torch.from_numpy(lbl).long()
return img, lbl
Example 3
def backPropagate(Z1, Z2, y, W2, b2):
## YOUR CODE HERE ##
E2 = 0
E1 = 0
Eb1 = 0
# E2 is the error in output layer. To find it we should exract estimated value from actual output.
# We should find 5 error because there are 5 node in output layer.
E2 = Z2 - y
## E1 is the error in the hidden layer. To find it we should use the error that we found in output layer and the weights between
## output and hidden layer
## We should find 30 error because there are 30 node in hidden layer.
E1 = np.dot(W2, np.transpose(E2))
## Eb1 is the error bias for hidden layer. To find it we should use the error that we found in output layer and the weights between
## output and bias layer
## We should find 1 error because there are 1 bias node in hidden layer.
Eb1 = np.dot(b2, np.transpose(E2))
####################
return E2, E1, Eb1
# calculate the gradients for weights between units and the bias weights
Example 4
def format_img(img, C):
img_min_side = float(C.im_size)
(height,width,_) = img.shape
if width <= height:
f = img_min_side/width
new_height = int(f * height)
new_width = int(img_min_side)
else:
f = img_min_side/height
new_width = int(f * width)
new_height = int(img_min_side)
fx = width/float(new_width)
fy = height/float(new_height)
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
img = img[:, :, (2, 1, 0)]
img = img.astype(np.float32)
img[:, :, 0] -= C.img_channel_mean[0]
img[:, :, 1] -= C.img_channel_mean[1]
img[:, :, 2] -= C.img_channel_mean[2]
img /= C.img_scaling_factor
img = np.transpose(img, (2, 0, 1))
img = np.expand_dims(img, axis=0)
return img, fx, fy
Example 5
def transform(self, img, lbl):
img = img[:, :, ::-1]
img = img.astype(np.float64)
img -= self.mean
img = m.imresize(img, (self.img_size[0], self.img_size[1]))
# Resize scales images from 0 to 255, thus we need
# to divide by 255.0
img = img.astype(float) / 255.0
# NHWC -> NCWH
img = img.transpose(2, 0, 1)
lbl = self.encode_segmap(lbl)
classes = np.unique(lbl)
lbl = lbl.astype(float)
lbl = m.imresize(lbl, (self.img_size[0], self.img_size[1]), 'nearest', mode='F')
lbl = lbl.astype(int)
assert(np.all(classes == np.unique(lbl)))
img = torch.from_numpy(img).float()
lbl = torch.from_numpy(lbl).long()
return img, lbl
Example 6
def calcGrads(X, Z1, Z2, E1, E2, Eb1):
## YOUR CODE HERE ##
d_W1 = 0
d_b1 = 0
d_W2 = 0
d_b2 = 0
## In here we should the derivatives for gradients. To find derivative, we should multiply.
# d_w2 is the derivative for weights between hidden layer and the output layer.
d_W2 = np.dot(np.transpose(E2), Z1)
# d_w1 is the derivative for weights between hidden layer and the input layer.
d_W1 = np.dot(E1, X)
# d_b2 is the derivative for weights between hidden layer bias and the output layer.
d_b2 = np.dot(np.transpose(E2), Eb1)
# d_b1 is the derivative for weights between hidden layer bias and the input layer.
d_b1 = np.dot(np.transpose(E1), 1)
####################
return d_W1, d_W2, d_b1, d_b2
# update the weights between units and the bias weights using a learning rate of alpha
Example 7
def updateWeights(W1, b1, W2, b2, alpha, d_W1, d_W2, d_b1, d_b2):
## YOUR CODE HERE ##
# W1 = 0
# b1 = 0
# W2 = 0
# b2 = 0
## Here we should update weights with usin the result that we found in calcGrads function
## W1 is weights between input and the hidden layer
W1 = W1 - alpha * (np.transpose(d_W1)) # 400*30
## W2 is weights between output and the hidden layer
W2 = W2 - alpha * (np.transpose(d_W2)) # 30*5
## b1 is weights between input bias and the hidden layer
b1 = b1 - alpha * d_b1
## b2 is weights between hidden layer bias and the output layer
b2 = b2 - alpha * (np.transpose(d_b2))
####################
return W1, b1, W2, b2
Example 8
def make_heatmaps_from_joints(input_size, heatmap_size, gaussian_variance, batch_joints):
# Generate ground-truth heatmaps from ground-truth 2d joints
scale_factor = input_size // heatmap_size
batch_gt_heatmap_np = []
for i in range(batch_joints.shape[0]):
gt_heatmap_np = []
invert_heatmap_np = np.ones(shape=(heatmap_size, heatmap_size))
for j in range(batch_joints.shape[1]):
cur_joint_heatmap = make_gaussian(heatmap_size,
gaussian_variance,
center=(batch_joints[i][j] // scale_factor))
gt_heatmap_np.append(cur_joint_heatmap)
invert_heatmap_np -= cur_joint_heatmap
gt_heatmap_np.append(invert_heatmap_np)
batch_gt_heatmap_np.append(gt_heatmap_np)
batch_gt_heatmap_np = np.asarray(batch_gt_heatmap_np)
batch_gt_heatmap_np = np.transpose(batch_gt_heatmap_np, (0, 2, 3, 1))
return batch_gt_heatmap_np
Example 9
def af_h5_to_np(input_path, outpath):
files = tables.open_file(input_path, mode = 'r+')
speaker_nodes = files.root._f_list_nodes()
for spk in speaker_nodes:
file_nodes = spk._f_list_nodes()
for fls in file_nodes:
file_name = fls._v_name
af_nodes = fls._f_list_nodes()
af_list = []
for fts in af_nodes:
features = fts[:]
mean = numpy.mean(features,1)
normalised_feats = list(numpy.transpose(features)/mean)
af_list += normalised_feats
numpy.save(outpath + file_name, numpy.array(af_list))
Example 10
def mahalanobis_distance(difference, num_random_features):
num_samples, _ = np.shape(difference)
sigma = np.cov(np.transpose(difference))
mu = np.mean(difference, 0)
if num_random_features == 1:
stat = float(num_samples * mu ** 2) / float(sigma)
else:
try:
linalg.inv(sigma)
except LinAlgError:
print('covariance matrix is singular. Pvalue returned is 1.1')
warnings.warn('covariance matrix is singular. Pvalue returned is 1.1')
return 0
stat = num_samples * mu.dot(linalg.solve(sigma, np.transpose(mu)))
return chi2.sf(stat, num_random_features)
Example 11
def sumIntensitiesMeme(
self,
t,
m,
node_vec,
etimes,
filterlatertimes=True,
):
if filterlatertimes:</