[转载] python truediv_Python numpy.true_divide() 使用实例

参考链接: Python中的numpy.bitwise_xor

The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.

 Example 1

 def test_zero_safe_divide(self):

 from blmath.numerics.operations import zero_safe_divide

 numerator = np.ones((5, 5))

 numerator[3, 3] = 0.

 denominator = np.ones((5, 5))

 denominator[2, 2] = 0.

 denominator[3, 3] = 0.

 with warnings.catch_warnings():

 warnings.simplefilter("ignore", RuntimeWarning)

 true_divide = np.true_divide(numerator, denominator)

 safe_divide = zero_safe_divide(numerator, denominator)

 self.assertTrue(np.isinf(true_divide[2, 2]))

 self.assertEqual(safe_divide[2, 2], 0.)

 self.assertTrue(np.isnan(true_divide[3, 3]))

 self.assertEqual(safe_divide[3, 3], 0.)

 Example 2

 def zero_safe_divide(a, b, default_error_value=0.):

 """Element-wise division that accounts for floating point errors.

 Both invalid floating-point (e.g. 0. / 0.) and divide be zero errors are

 suppressed. Resulting values (NaN and Inf respectively) are replaced with

 `default_error_value`.

 """

 import numpy as np

 with np.errstate(invalid='ignore', divide='ignore'):

 quotient = np.true_divide(a, b)

 bad_value_indices = np.logical_or(

 np.isnan(quotient), np.isinf(quotient))

 quotient[bad_value_indices] = default_error_value

 return quotient

 Example 3

 def V_short(self,eta):

 sum0 = np.zeros(7,dtype=float)

 sum1 = np.zeros(7,dtype=float)

 for n1,n2 in product(range(self.N1+1),range(self.N2+1)):

 wdo = comb(self.N1,n1,exact=True)*comb(self.N2,n2,exact=True)

 wdox10 = comb(self.N1-1,n1,exact=True)*comb(self.N2,n2,exact=True)

 wdox11 = comb(self.N1-1,n1-1,exact=True)*comb(self.N2,n2,exact=True)

 wdox20 = comb(self.N1,n1,exact=True)*comb(self.N2-1,n2,exact=True)

 wdox21 = comb(self.N1,n1,exact=True)*comb(self.N2-1,n2-1,exact=True)

 w = np.asarray([wdox10,wdox20,wdox11,wdox21,wdo,wdo,wdo])

 pz0,pz1 = self.p_n_given_z(n1,n2)

 counts = [self.N1-n1,self.N2-n2,n1,n2,1,1,1]

 Q = (eta*pz0*counts*(1-self.pZgivenA)+eta*pz1*counts*self.pZgivenA).sum()

 ratio = np.nan_to_num(np.true_divide(pz0*(1-self.pZgivenA)+pz1*self.pZgivenA,Q))

 sum0 += np.asfarray(w*pz0*ratio)

 sum1 += np.asfarray(w*pz1*ratio)

 result = self.pZgivenA*sum1+(1-self.pZgivenA)*sum0

 return result

 Example 4

 def run(self,T,model):

 if T <= model.K: # result is not defined if the horizon is shorter than the number of actions

 self.best_action = None

 return np.nan

 actions = range(0,model.K)

 self.trials = np.ones(model.K)

 self.success = model.sample_multiple(actions,1)

 for t in range(model.K,T):

 arm = argmax_rand(self.upper_bound(t))

 self.trials[arm] += 1

 self.success[arm] +=model.sample_multiple(arm,1)

 mu = np.true_divide(self.success,self.trials)

 self.best_action = argmax_rand(mu)

 return max(model.expected_rewards) - model.expected_rewards[self.best_action]

 Example 5

 def fit(self, x, y, verbose=True):

 #setting data attributes for the model instance

 X = tfidf_to_counts(x)

 #splitting by target class so we can calculate the log-count ratio

 X_pos = X[np.where(y == 1)]

 X_neg = X[np.where(y == 0)]

 self.r = log_count_ratio(X_pos, X_neg)

 #setting the npos and nneg variables

 n_pos = X_pos.shape[0]

 n_neg = X_neg.shape[0]

 #getting the bais for the MNB model

 self.nb_bias = np.log(np.true_divide(n_pos, n_neg))

 #trains, tests, and assesses the performance of the model

 Example 6

 def test_NotImplemented_not_returned(self):

 # See gh-5964 and gh-2091. Some of these functions are not operator

 # related and were fixed for other reasons in the past.

 binary_funcs = [

 np.power, np.add, np.subtract, np.multiply, np.divide,

 np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,

 np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,

 np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,

 np.logical_and, np.logical_or, np.logical_xor, np.maximum,

 np.minimum, np.mod

 ]

 # These functions still return NotImplemented. Will be fixed in

 # future.

 # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]

 a = np.array('1')

 b = 1

 for f in binary_funcs:

 assert_raises(TypeError, f, a, b)

 Example 7

 def __itruediv__(self, other):

 """

 True divide self by other in-place.

 """

 other_data = getdata(other)

 dom_mask = _DomainSafeDivide().__call__(self._data, other_data)

 other_mask = getmask(other)

 new_mask = mask_or(other_mask, dom_mask)

 # The following 3 lines control the domain filling

 if dom_mask.any():

 (_, fval) = ufunc_fills[np.true_divide]

 other_data = np.where(dom_mask, fval, other_data)

 self._mask |= new_mask

 self._data.__itruediv__(np.where(self._mask, self.dtype.type(1),

 other_data))

 return self

 Example 8

 def load_files(avg_file, std_file):

 # load files

 with open(avg_file) as f:

 avg = simplejson.load(f)

 with open(std_file) as f:

 std = simplejson.load(f)

 std = np.array(std)

 print std

 std = np.true_divide(std, 2.)

 print std

 avg = np.array(avg)

 avg_upper = avg + std

 avg_lower = avg - std

 return avg, avg_upper, avg_lower

 Example 9

 def test_NotImplemented_not_returned(self):

 # See gh-5964 and gh-2091. Some of these functions are not operator

 # related and were fixed for other reasons in the past.

 binary_funcs = [

 np.power, np.add, np.subtract, np.multiply, np.divide,

 np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,

 np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,

 np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,

 np.logical_and, np.logical_or, np.logical_xor, np.maximum,

 np.minimum, np.mod

 ]

 # These functions still return NotImplemented. Will be fixed in

 # future.

 # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]

 a = np.array('1')

 b = 1

 for f in binary_funcs:

 assert_raises(TypeError, f, a, b)

 Example 10

 def __itruediv__(self, other):

 """

 True divide self by other in-place.

 """

 other_data = getdata(other)

 dom_mask = _DomainSafeDivide().__call__(self._data, other_data)

 other_mask = getmask(other)

 new_mask = mask_or(other_mask, dom_mask)

 # The following 3 lines control the domain filling

 if dom_mask.any():

 (_, fval) = ufunc_fills[np.true_divide]

 other_data = np.where(dom_mask, fval, other_data)

 self._mask |= new_mask

 self._data.__itruediv__(np.where(self._mask, self.dtype.type(1),

 other_data))

 return self

 Example 11

 def test_NotImplemented_not_returned(self):

 # See gh-5964 and gh-2091. Some of these functions are not operator

 # related and were fixed for other reasons in the past.

 binary_funcs = [

 np.power, np.add, np.subtract, np.multiply, np.divide,

 np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,

 np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,

 np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,

 np.logical_and, np.logical_or, np.logical_xor, np.maximum,

 np.minimum, np.mod

 ]

 # These functions still return NotImplemented. Will be fixed in

 # future.

 # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]

 a = np.array('1')

 b = 1

 for f in binary_funcs:

 assert_raises(TypeError, f, a, b)

 Example 12

 def __itruediv__(self, other):

 """

 True divide self by other in-place.

 """

 other_data = getdata(other)

 dom_mask = _DomainSafeDivide().__call__(self._data, other_data)

 other_mask = getmask(other)

 new_mask = mask_or(other_mask, dom_mask)

 # The following 3 lines control the domain filling

 if dom_mask.any():

 (_, fval) = ufunc_fills[np.true_divide]

 other_data = np.where(dom_mask, fval, other_data)

 self._mask |= new_mask

 self._data.__itruediv__(np.where(self._mask, self.dtype.type(1),

 other_data))

 return self

 Example 13

 def test_NotImplemented_not_returned(self):

 # See gh-5964 and gh-2091. Some of these functions are not operator

 # related and were fixed for other reasons in the past.

 binary_funcs = [

 np.power, np.add, np.subtract, np.multiply, np.divide,

 np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,

 np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,

 np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,

 np.logical_and, np.logical_or, np.logical_xor, np.maximum,

 np.minimum, np.mod

 ]

 # These functions still return NotImplemented. Will be fixed in

 # future.

 # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]

 a = np.array('1')

 b = 1

 for f in binary_funcs:

 assert_raises(TypeError, f, a, b)

 Example 14

 def __itruediv__(self, other):

 """

 True divide self by other in-place.

 """

 other_data = getdata(other)

 dom_mask = _DomainSafeDivide().__call__(self._data, other_data)

 other_mask = getmask(other)

 new_mask = mask_or(other_mask, dom_mask)

 # The following 3 lines control the domain filling

 if dom_mask.any():

 (_, fval) = ufunc_fills[np.true_divide]

 other_data = np.where(dom_mask, fval, other_data)

 self._mask |= new_mask

 self._data.__itruediv__(np.where(self._mask, self.dtype.type(1),

 other_data))

 return self

 Example 15

 def _entropy(self, y, return_class_counts=False):

 """ Entropy for the classes in the array y

 :math: \sum_{x \in X} p(x) \log_{2}(1/p(x)) :math: from

 https://en.wikipedia.org/wiki/ID3_algorithm

 Parameters

 ----------

 y : nparray of shape [n remaining attributes]

 containing the class names

 Returns

 -------

 : float

 information for remaining examples given feature

 """

 n = y.shape[0]

 if n <= 0:

 return 0

 classes, count = unique(y)

 p = np.true_divide(count, n)

 res = np.abs(np.sum(np.multiply(p, np.log2(p))))

 if return_class_counts:

 return res, np.vstack((classes, count)).T

 else:

 return res

 Example 16

 def _info_nominal(self, x, y):

 """ Info for nominal feature feature_values

 :math: p(a)H(a) :math: from

 https://en.wikipedia.org/wiki/ID3_algorithm

 Parameters

 ----------

 x : np.array of shape [n remaining examples]

 containing feature values

 y : np.array of shape [n remaining examples]

 containing relevent class

 Returns

 -------

 : float

 information for remaining examples given feature

 """

 info = 0

 n = x.shape[0]

 items, count = unique(x)

 for value, p in zip(items, count):

 info += p * self._entropy(y[x == value])

 return CalcRecord(CalcRecord.NOM,

 info * np.true_divide(1, n),

 attribute_counts=count)

 Example 17

 def bilinearResize(images, ratiox, ratioy):

 '''

 images: 4D image batch

 ratiox, ratioy: magnification ratio. Positive integer.

 '''

 b, h, w, c = [v.value for v in images.get_shape()]

 sidex = 2 * ratiox - 1

 sidey = 2 * ratioy - 1

 interpolatex = np.true_divide((ratiox - np.abs(np.arange(sidex) - ratiox + 1)), ratiox)

 interpolatey = np.true_divide((ratioy - np.abs(np.arange(sidey) - ratioy + 1)), ratioy)

 weight = np.outer(interpolatex, interpolatey).astype(np.float32)

 weights = np.zeros((sidex,sidey,c,c), dtype=np.float32)

 for i in range(c):

 weights[:,:,i,i] = weight

 out_shape = [b, h*ratiox, w*ratioy, c]

 strides = [1, ratiox, ratioy, 1]

 kernel = tf.constant(weights, name='bilinear_convt_weights')

 return tf.nn.conv2d_transpose(images, weights,

 out_shape, strides=strides, padding='SAME')

 Example 18

 def test_cumsum(mock_np, arr, normalize, expected_result):

 mock_np.cumsum = mock.Mock(side_effect = lambda *a, **k: np.cumsum(*a, **k))

 mock_np.square = mock.Mock(side_effect = lambda *a, **k: np.square(*a, **k))

 mock_np.max = mock.Mock(side_effect = lambda *a, **k: np.max(*a, **k))

 # mock_np.true_divide = mock.Mock(side_effect = lambda *a, **k: np.true_divide(*a, **k))

 mock_np.isnan = mock.Mock(side_effect = lambda *a, **k: np.isnan(*a, **k))

 r = cumsum(arr, normalize=normalize)

 assert len(r) == len(arr)

 assert (r == np.array(expected_result)).all()

 assert mock_np.cumsum.called

 assert mock_np.square.called

 assert mock_np.isnan.called == normalize

 assert mock_np.max.called == normalize

 # if normalize:

 # assert mock_np.isnan.called

 # assert mock_np.max.called

 # assert mock_np.true_divide.called

 # else:

 # assert not mock_np.max.called

 # assert not mock_np.true_divide.called

 Example 19

 def Quadrify(contour):

 epsilon = 10

 for i in range(1,10):

 quad = cv2.approxPolyDP(contour, epsilon, True)

 length = len(quad)

 randomVar = np.random.random()

 epsilon = np.multiply(epsilon, np.true_divide(np.add(length, randomVar), np.add(4, randomVar)))

 # print epsilon, length

 if length == 4:

 return np.multiply(i, 0.01)

 return 1

 Example 20

 def test_NotImplemented_not_returned(self):

 # See gh-5964 and gh-2091. Some of these functions are not operator

 # related and were fixed for other reasons in the past.

 binary_funcs = [

 np.power, np.add, np.subtract, np.multiply, np.divide,

 np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,

 np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,

 np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,

 np.logical_and, np.logical_or, np.logical_xor, np.maximum,

 np.minimum, np.mod

 ]

 # These functions still return NotImplemented. Will be fixed in

 # future.

 # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]

 a = np.array('1')

 b = 1

 for f in binary_funcs:

 assert_raises(TypeError, f, a, b)

 Example 21

 def test_true_divide(self):

 # True_divide has a non uniform signature, see #3484.

 # This also tests type_tuple_type_resolver.

 a = np.full(5, 12.5)

 b = np.full(5, 10.0)

 tgt = np.full(5, 1.25)

 assert_almost_equal(np.true_divide(a, b, dtype=np.float64), tgt)

 assert_almost_equal(np.true_divide(a, b, dtype=np.float32), tgt)

 assert_raises(TypeError, np.true_divide, a, b, dtype=np.int)

 Example 22

 def test_NotImplemented_not_returned(self):

 # See gh-5964 and gh-2091. Some of these functions are not operator

 # related and were fixed for other reasons in the past.

 binary_funcs = [

 np.power, np.add, np.subtract, np.multiply, np.divide,

 np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,

 np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,

 np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,

 np.logical_and, np.logical_or, np.logical_xor, np.maximum,

 np.minimum, np.mod

 ]

 # These functions still return NotImplemented. Will be fixed in

 # future.

 # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]

 a = np.array('1')

 b = 1

 for f in binary_funcs:

 assert_raises(TypeError, f, a, b)

 Example 23

 def _hist_bin_doane(x):

 """

 Doane's histogram bin estimator.

 Improved version of Sturges' formula which works better for

 non-normal data. See

 http://stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning

 Parameters

 ----------

 x : array_like

 Input data that is to be histogrammed, trimmed to range. May not

 be empty.

 Returns

 -------

 h : An estimate of the optimal bin width for the given data.

 """

 if x.size > 2:

 sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))

 sigma = np.std(x)

 if sigma > 0.0:

 # These three operations add up to

 # g1 = np.mean(((x - np.mean(x)) / sigma)**3)

 # but use only one temp array instead of three

 temp = x - np.mean(x)

 np.true_divide(temp, sigma, temp)

 np.power(temp, 3, temp)

 g1 = np.mean(temp)

 return x.ptp() / (1.0 + np.log2(x.size) +

 np.log2(1.0 + np.absolute(g1) / sg1))

 return 0.0

 Example 24

 def __truediv__(self, other):

 """

 Divide other into self, and return a new masked array.

 """

 if self._delegate_binop(other):

 return NotImplemented

 return true_divide(self, other)

 Example 25

 def __rtruediv__(self, other):

 """

 Divide self into other, and return a new masked array.

 """

 return true_divide(other, self)

 Example 26

 def divide(x, y):

 with np.errstate(divide='ignore', invalid='ignore'):

 z = np.true_divide(x, y)

 z[~ np.isfinite(z)] = 0

 return z

 Example 27

 def __truediv__(self, other):

 return true_divide(self, other)

 Example 28

 def __itruediv__(self, other):

 return true_divide(self, other, self)

 Example 29

 def __rtruediv__(self, other):

 return true_divide(other, self)

 Example 30

 def div0(a, b):

 """ ignore / 0, div0( [-1, 0, 1], 0 ) -> [0, 0, 0] """

 with np.errstate(divide='ignore', invalid='ignore'):

 c = np.true_divide(a, b)

 c[~np.isfinite(c)] = 0 # -inf inf NaN

 return c

 Example 31

 def fullness(self):

 potential_leaves = np.prod(np.ceil(np.true_divide(self.bounds[1] - self.bounds[0], self.leaf_shape)))

 return self.root_node.count_leaves() / float(potential_leaves)

 Example 32

 def test_true_divide(self):

 # True_divide has a non uniform signature, see #3484.

 # This also tests type_tuple_type_resolver.

 a = np.full(5, 12.5)

 b = np.full(5, 10.0)

 tgt = np.full(5, 1.25)

 assert_almost_equal(np.true_divide(a, b, dtype=np.float64), tgt)

 assert_almost_equal(np.true_divide(a, b, dtype=np.float32), tgt)

 assert_raises(TypeError, np.true_divide, a, b, dtype=np.int)

 Example 33

 def test_NotImplemented_not_returned(self):

 # See gh-5964 and gh-2091. Some of these functions are not operator

 # related and were fixed for other reasons in the past.

 binary_funcs = [

 np.power, np.add, np.subtract, np.multiply, np.divide,

 np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,

 np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,

 np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,

 np.logical_and, np.logical_or, np.logical_xor, np.maximum,

 np.minimum, np.mod

 ]

 # These functions still return NotImplemented. Will be fixed in

 # future.

 # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]

 a = np.array('1')

 b = 1

 for f in binary_funcs:

 assert_raises(TypeError, f, a, b)

 Example 34

 def _hist_bin_doane(x):

 """

 Doane's histogram bin estimator.

 Improved version of Sturges' formula which works better for

 non-normal data. See

 http://stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning

 Parameters

 ----------

 x : array_like

 Input data that is to be histogrammed, trimmed to range. May not

 be empty.

 Returns

 -------

 h : An estimate of the optimal bin width for the given data.

 """

 if x.size > 2:

 sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))

 sigma = np.std(x)

 if sigma > 0.0:

 # These three operations add up to

 # g1 = np.mean(((x - np.mean(x)) / sigma)**3)

 # but use only one temp array instead of three

 temp = x - np.mean(x)

 np.true_divide(temp, sigma, temp)

 np.power(temp, 3, temp)

 g1 = np.mean(temp)

 return x.ptp() / (1.0 + np.log2(x.size) +

 np.log2(1.0 + np.absolute(g1) / sg1))

 return 0.0

 Example 35

 def __truediv__(self, other):

 """

 Divide other into self, and return a new masked array.

 """

 if self._delegate_binop(other):

 return NotImplemented

 return true_divide(self, other)

 Example 36

 def __rtruediv__(self, other):

 """

 Divide self into other, and return a new masked array.

 """

 return true_divide(other, self)

 Example 37

 def __itruediv__(self, other):

 """ See __div__. """

 oth = sanitize_units_mul(self, other)

 np.true_divide(self, oth, out=self)

 return self

 Example 38

 def one_shot_classification(test_data, num_shots, num_classes, compute_similarities, k_neighbours=1,

 num_episodes=10000):

 data_shape = np.prod(test_data[0][0].shape)

 episode_length = num_shots * num_classes + 1

 batch = np.zeros([num_classes, episode_length, data_shape], dtype=np.float32)

 accuracy = 0.

 votes = np.zeros(num_classes)

 for episode in xrange(num_episodes):

 classes = np.random.choice(test_data.shape[0], num_classes, False)

 classes_idx = np.repeat(classes[:, np.newaxis], num_shots, 1).flatten()

 idx = []

 for k in xrange(num_classes):

 idx.append(np.random.choice(test_data.shape[1], num_shots + 1, False))

 idx = np.vstack(idx)

 y = np.repeat(np.arange(num_classes)[:, np.newaxis], num_shots, 1).flatten()

 # print batch[:, :-1, :].shape, idx[:, :-1].flatten().shape

 batch[:, :-1, :] = test_data[classes_idx, idx[:, :-1].flatten(), :]

 batch[:, -1, :] = test_data[classes, idx[:, -1].flatten(), :]

 # np.true_divide(batch, 255., out=batch, casting='unsafe')

 # sim[i, j] -- similarity between batch[i, -1] and batch[i, j]

 sim = compute_similarities(batch)

 for k in xrange(num_classes):

 votes[:] = 0.

 nearest = sim[k].argsort()[-k_neighbours:]

 for j in nearest:

 votes[y[j]] += sim[k, j]

 y_hat = votes.argmax()

 if y_hat == k:

 accuracy += 1

 status = 'episode: %d, accuracy: %f' % (episode, accuracy / num_classes / (episode + 1))

 sys.stdout.write('\r' + status)

 sys.stdout.flush()

 return accuracy / num_episodes / num_classes

 Example 39

 def load_data(path):

 raw_data = np.load(path)

 data = []

 min_size = min([raw_data[f].shape[0] for f in raw_data.files])

 max_value = max([raw_data[f].max() for f in raw_data.files])

 for cl in raw_data.files:

 class_data = raw_data[cl][:min_size]

 class_data = class_data.reshape(min_size, np.prod(class_data.shape[1:]))

 np.true_divide(class_data, max_value, out=class_data, casting='unsafe')

 # reverse_data = class_data.copy()

 # reverse_data[class_data > 0.] = 0.

 # reverse_data[class_data <= 0.95] = 1.

 # data.append(reverse_data[None, :, :])

 data.append(class_data[None, :, :])

 return np.concatenate(data, axis=0)

 Example 40

 def _prepare_network_input(self, states):

 """ Normalizes the states from one minibatch.

 Args:

 states (numpy.ndarray): Mini-batch of states, shape=(batch_size,sequence_length,frame_width,frame_height)

 Returns:

 normalized_states (numpy.ndarray): State values divided by the maximim state value, shape=(batch_size,sequence_length,frame_width,frame_height)

 """

 _logger.debug("Normalizing input")

 return np.true_divide(states, self.grayscales)

 Example 41

 def prod_all_but_j(vector):

 """ returns a vector where the jth term is the product of all the entries except the jth one """

 zeros = np.where(vector==0)[0]

 if len(zeros) > 1:

 return np.zeros(len(vector))

 if len(zeros) == 1:

 result = np.zeros(len(vector))

 j = zeros[0]

 result[j] = np.prod(vector[np.arange(len(vector)) != j])

 return result

 joint = np.prod(vector)

 return np.true_divide(joint,vector)

 Example 42

 def R(self,pa,eta):

 """ returns the ratio of the probability of the given assignment under each action to the probability under the eta weighted sum of actions. """

 Q = (eta*pa).sum()

 ratio = np.true_divide(pa,Q)

 ratio[np.isnan(ratio)] = 0 # we get nan when 0/0 but should just be 0 in this case

 return ratio

 Example 43

 def V(self,eta):

 """ returns a vector of length K with the expected value of R (over x sampled from p(x|a)) for each action a """

 #with np.errstate(divide='ignore'):

 u = np.true_divide(1.0,np.dot(self.A,eta))

 u = np.nan_to_num(u) # converts infinities to very large numbers such that multiplying by 0 gives 0

 v = np.dot(self.A2T,u)

 return v

 Example 44

 def P(self,x):

 """ calculate vector of P_a for each action a """

 indx = np.arange(len(x))

 ps = self.pX[x,indx] #probability of P(X_i = x_i) for each i given do()

 joint = ps.prod() # probability of x given do()

 pi = np.true_divide(joint,ps) # will be nan for elements for which ps is 0

 for j in np.where(np.isnan(pi))[0]:

 pi[j] = np.prod(ps[indx != j])

 pij = np.vstack((pi,pi))

 pij[1-x,indx] = 0 # now this is the probability of x given do(x_i=j)

 pij = pij.reshape((len(x)*2,)) #flatten first N-1 will be px=0,2nd px=1

 result = np.hstack((pij,joint))

 return result

 Example 45

 def estimate_infrequent(self,h):

 qij_hat = np.true_divide(self.trials,h)

 s_indx = np.argsort(qij_hat) #indexes of elements from s in sorted(s)

 m_hat = Parallel.calculate_m(qij_hat[s_indx])

 infrequent = s_indx[0:m_hat]

 return infrequent

 Example 46

 def run(self,T,model):

 self.trials = np.full(model.K,2,dtype=int)

 self.success = np.full(model.K,1,dtype=int)

 for t in xrange(T):

 fails = self.trials - self.success

 theta = np.random.beta(self.success,fails)

 arm = argmax_rand(theta)

 self.trials[arm] +=1

 self.success[arm]+= model.sample_multiple(arm,1)

 mu = np.true_divide(self.success,self.trials)

 self.best_action = argmax_rand(mu)

 return max(model.expected_rewards) - model.expected_rewards[self.best_action]

 Example 47

 def upper_bound(self,t):

 mu = np.true_divide(self.success,self.trials)

 interval = np.sqrt(self.alpha*np.log(t)/(2.0*self.trials))

 return mu+interval

 Example 48

 def allocate(self,T,K):

 logK = .5 + np.true_divide(1,range(2,K+1)).sum()

 n = np.zeros((K),dtype=int)

 n[1:] = np.ceil((1.0/logK)*np.true_divide((T - K),range(K,1,-1)))

 allocations = np.diff(n)

 return allocations

 Example 49

 def run(self,T,model):

 self.trials = np.zeros(model.K)

 self.success = np.zeros(model.K)

 for t in xrange(T):

 x,y = model.sample(model.K-1)

 xij = np.hstack((1-x,x,1)) # first N actions represent x_i = 0,2nd N x_i=1, last do()

 self.trials += xij

 self.success += y*xij

 self.u = np.true_divide(self.success,self.trials)

 self.best_action = argmax_rand(self.u)

 return max(model.expected_rewards) - model.expected_rewards[self.best_action]

 Example 50

 def run(self,T,model):

 trials_per_action = T/model.K

 success = model.sample_multiple(range(model.K),trials_per_action)

 self.u = np.true_divide(success,trials_per_action)

 self.best_action = argmax_rand(self.u)

 return max(model.expected_rewards) - model.expected_rewards[self.best_action]

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值