The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.
Example 1
def roi(img,vertices):
# blank mask:
mask = np.zeros_like(img)
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, 255)
# returning the image only where mask pixels are nonzero
masked = cv2.bitwise_and(img, mask)
return masked
Example 2
def roll_zeropad(a, shift, axis=None):
a = np.asanyarray(a)
if shift == 0: return a
if axis is None:
n = a.size
reshape = True
else:
n = a.shape[axis]
reshape = False
if np.abs(shift) > n:
res = np.zeros_like(a)
elif shift < 0:
shift += n
zeros = np.zeros_like(a.take(np.arange(n-shift), axis))
res = np.concatenate((a.take(np.arange(n-shift,n), axis), zeros), axis)
else:
zeros = np.zeros_like(a.take(np.arange(n-shift,n), axis))
res = np.concatenate((zeros, a.take(np.arange(n-shift), axis)), axis)
if reshape:
return res.reshape(a.shape)
else:
return res
Example 3
def dbFun(_x,_original_vals, f):
db = DBSCAN(eps=0.3, min_samples=20).fit(_x)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
#print(labels)
n_clusters_ = len(set(labels)) - (1 if -1 else 0)
#gettingCharacteristics(_x, core_samples_mask, labels, n_clusters_,
#_original_vals)
print("Wait plotting clusters.....")
plotCluster(_x, labels, core_samples_mask, n_clusters_, f)
return
##############################################################################################
# Plotting the cluster after the result of DBSCAN
Example 4
def test_op(self):
logits = np.random.randn(self.sequence_length, self.batch_size,
self.vocab_size)
logits = logits.astype(np.float32)
sequence_length = np.array([1, 2, 3, 4])
targets = np.random.randint(0, self.vocab_size,
[self.sequence_length, self.batch_size])
losses = seq2seq_losses.cross_entropy_sequence_loss(logits, targets,
sequence_length)
with self.test_session() as sess:
losses_ = sess.run(losses)
# Make sure all losses not past the sequence length are > 0
np.testing.assert_array_less(np.zeros_like(losses_[:1, 0]), losses_[:1, 0])
np.testing.assert_array_less(np.zeros_like(losses_[:2, 1]), losses_[:2, 1])
np.testing.assert_array_less(np.zeros_like(losses_[:3, 2]), losses_[:3, 2])
# Make sure all losses past the sequence length are 0
np.testing.assert_array_equal(losses_[1:, 0], np.zeros_like(losses_[1:, 0]))
np.testing.assert_array_equal(losses_[2:, 1], np.zeros_like(losses_[2:, 1]))
np.testing.assert_array_equal(losses_[3:, 2], np.zeros_like(losses_[3:, 2]))
Example 5
def __init__(self, input_shape, output_shape):
self.input_shape = input_shape
self.input = np.zeros((output_shape[0], self.input_shape[0] * self.input_shape[1] *
self.input_shape[2]),dtype=np.float32)
self.output = np.zeros(output_shape, dtype=np.float32)
self.output_raw = np.zeros_like(self.output)
self.output_error = np.zeros_like(self.output)
self.output_average = np.zeros(self.output.shape[1], dtype=np.float32)
self.weights = np.random.normal(0, np.sqrt(2.0 / (self.output.shape[1] + self.input.shape[1])),
size=(self.input.shape[1], self.output.shape[1])).astype(np.float32)
self.gradient = np.zeros_like(self.weights)
self.reconstruction = np.zeros_like(self.weights)
self.errors = np.zeros_like(self.weights)
self.output_ranks = np.zeros(self.output.shape[1], dtype=np.int32)
self.learning_rate = 1
self.norm_limit = 0.1
Example 6
def gen_batches(data, n_seqs, n_steps):
"""Create a generator that returns batches of size n_seqs x n_steps."""
characters_per_batch = n_seqs * n_steps
n_batches = len(data) // characters_per_batch
# Keep only enough characters to make full batches
data = data[:n_batches*characters_per_batch]
data = data.reshape([n_seqs, -1])
for n in range(0, data.shape[1], n_steps):
x = data[:, n:n+n_steps]
y = np.zeros_like(x)
y[:, :-1], y[:, -1] = x[:, 1:], x[:, 0]
yield x, y
#-------------------------------------------------------------------------------
# Parse commandline
#-------------------------------------------------------------------------------
Example 7
def filter(self, p):
"""
Parameters
----------
p: NDArray
Filtering input which is 2D or 3D with format
HW or HWC
Returns
-------
ret: NDArray
Filtering output whose shape is same with input
"""
p = to_32F(p)
if len(p.shape) == 2:
return self._Filter.filter(p)
elif len(p.shape) == 3:
channels = p.shape[2]
ret = np.zeros_like(p, dtype=np.float32)
for c in range(channels):
ret[:, :, c] = self._Filter.filter(p[:, :, c])
return ret
Example 8
def _process_label(self, fn):
"""
TODO: Fix one-indexing to zero-index;
retained one-index due to uint8 constraint
"""
mat = loadmat(fn, squeeze_me=True)
_labels = mat['seglabel'].astype(np.uint8)
# _labels -= 1 # (move to zero-index)
labels = np.zeros_like(_labels)
for (idx, name) in enumerate(mat['names']):
try:
value = SUNRGBDDataset.target_hash[name]
except:
value = 0
mask = _labels == idx+1
labels[mask] = value
return self._pad_image(labels)
Example 9
def recall_from_IoU(IoU, samples=500):
"""
plot recall_vs_IoU_threshold
"""
if not (isinstance(IoU, list) or IoU.ndim == 1):
raise ValueError('IoU needs to be a list or 1-D')
iou = np.float32(IoU)
# Plot intersection over union
IoU_thresholds = np.linspace(0.0, 1.0, samples)
recall = np.zeros_like(IoU_thresholds)
for idx, IoU_th in enumerate(IoU_thresholds):
tp, relevant = 0, 0
inds, = np.where(iou >= IoU_th)
recall[idx] = len(inds) * 1.0 / len(IoU)
return recall, IoU_thresholds
# =====================================================================
# Generic utility functions for object recognition
# ---------------------------------------------------------------------
Example 10
def reset_index(self):
"""Reset index to range based
"""
dfs = self.to_delayed()
sizes = np.asarray(compute(*map(delayed(len), dfs)))
prefixes = np.zeros_like(sizes)
prefixes[1:] = np.cumsum(sizes[:-1])
@delayed
def fix_index(df, startpos):
return df.set_index(np.arange(start=startpos,
stop=startpos + len(df),
dtype=np.intp))
outdfs = [fix_index(df, startpos)
for df, startpos in zip(dfs, prefixes)]
return from_delayed(outdfs)
Example 11
def recoded_features(self, inputs, layer=-1, inverse_fn=ielu):
hidden = self.get_hidden_values(inputs, store=True, layer=layer).eval()
bench = self.get_reconstructed_input(np.zeros_like(hidden),
layer=layer).eval().squeeze()
if inverse_fn: ibench = inverse_fn(bench)
results = []
for h in range(hidden.shape[-1]):
hidden_h = np.zeros_like(hidden)
hidden_h[..., h] = hidden[..., h]
feature = self.get_reconstructed_input(hidden_h, layer=layer).eva