python save_Python numpy.save() 使用实例

这是一个关于如何使用Python的numpy.save()函数保存数据的代码示例集合。示例涵盖了从不同数据源加载和处理数据,然后使用numpy.save()进行存储的操作。包括图像处理、特征提取、矩阵操作等场景。
摘要由CSDN通过智能技术生成

The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.

Example 1

def gen_pruned_features(name):

print name

feature_dir = 'data/feature_' + args.domain + \

'_' + str(args.n_boxes) + 'boxes/' + name + '/'

n_clips = len(glob.glob(feature_dir + BOX_FEATURE + '*.npy'))

for clip in xrange(1, n_clips+1):

pruned_boxes = np.load(feature_dir + BOX_FEATURE + '{:04d}.npy'.format(clip)) # (50, args.n_boxes, 4)

roisavg = np.load(feature_dir + 'roisavg{:04d}.npy'.format(clip)) # (50, args.n_boxes, 512)

pruned_roisavg = np.zeros((50, args.n_boxes, 512))

for frame in xrange(50):

for box_id in xrange(args.n_boxes):

if not np.array_equal(pruned_boxes[frame][box_id], np.zeros((4))):

pruned_roisavg[frame][box_id] = roisavg[frame][box_id]

np.save('{}pruned_roisavg{:04d}'.format(feature_dir, clip), pruned_roisavg)

Example 2

def encode_jpeg(arr):

assert arr.dtype == np.uint8

# simulate multi-channel array for single channel arrays

if len(arr.shape) == 3:

arr = np.expand_dims(arr, 3) # add channels to end of x,y,z

arr = arr.transpose((3,2,1,0)) # channels, z, y, x

reshaped = arr.reshape(arr.shape[3] * arr.shape[2], arr.shape[1] * arr.shape[0])

if arr.shape[0] == 1:

img = Image.fromarray(reshaped, mode='L')

elif arr.shape[0] == 3:

img = Image.fromarray(reshaped, mode='RGB')

else:

raise ValueError("Number of image channels should be 1 or 3. Got: {}".format(arr.shape[3]))

f = io.BytesIO()

img.save(f, "JPEG")

return f.getvalue()

Example 3

def visualize(self, zv, path):

self.ax1.clear()

self.ax2.clear()

z, v = zv

if path:

np.save(path + '/trajectory.npy', z)

z = np.reshape(z, [-1, 2])

self.ax1.hist2d(z[:, 0], z[:, 1], bins=400)

self.ax1.set(xlim=self.xlim(), ylim=self.ylim())

v = np.reshape(v, [-1, 2])

self.ax2.hist2d(v[:, 0], v[:, 1], bins=400)

self.ax2.set(xlim=self.xlim(), ylim=self.ylim())

if self.display:

import matplotlib.pyplot as plt

plt.show()

plt.pause(0.1)

elif path:

self.fig.savefig(path + '/visualize.png')

Example 4

def load_rec(self):

# first try and see if anything with the save data exists, since obviously

# we dont' want to keep loading from the original load location if some work has

# already been done

load = self.load_from_db({'exp_id': self.exp_id},

cache_filters=True)

# if not, try loading from the loading location

if not load and not self.sameloc:

load = self.load_from_db(self.load_query,

cache_filters=True,

collfs=self.load_collfs,

collfs_recent=self.load_collfs_recent)

if load is None:

raise Exception('You specified load parameters but no '

'record was found with the given spec.')

self.load_data = load

Example 5

def get_feature_mat_from_video(video_filename, output_dir='output'):

yt_vid, extension = video_filename.split('/')[-1].split('.')

assert extension in ['webm', 'mp4', '3gp']

mkdir_if_not_exist(output_dir, False)

output_filename = output_dir + '/' + yt_vid + '.npy'

vid_reader = imageio.get_reader(video_filename, 'ffmpeg')

img_list = get_img_list_from_vid_reader(vid_reader, extension)

base_model = InceptionV3(include_top=True, weights='imagenet')

model = Model(inputs=base_model.input, outputs=base_model.get_layer('avg_pool').output)

feature_mat = get_feature_mat(model, img_list)

np.save(output_filename, feature_mat)

return feature_mat

Example 6

def compute_dt_dist(docs, labels, tags, model, max_len, batch_size, pad_id, idxvocab, output_file):

#generate batches

num_batches = int(math.ceil(float(len(docs)) / batch_size))

dt_dist = []

t = []

combined = []

docid = 0

for i in xrange(num_batches):

x, _, _, t, s = get_batch_doc(docs, labels, tags, i, max_len, cf.tag_len, batch_size, pad_id)

attention, mean_topic = sess.run([model.attention, model.mean_topic], {model.doc: x, model.tag: t})

dt_dist.extend(attention[:s])

if debug:

for si in xrange(s):

d = x[si]

print "\n\nDoc", docid, "=", " ".join([idxvocab[item] for item in d if (item != pad_id)])

sorted_dist = matutils.argsort(attention[si], reverse=True)

for ti in sorted_dist:

print "Topic", ti, "=", attention[si][ti]

docid += 1

np.save(open(output_file, "w"), dt_dist)

Example 7

def predictPL(self):

B = self.flags.batch_size

W,H,C = self.flags.width, self.flags.height, self.flags.color

inputs = tf.placeholder(dtype=tf.float32,shape=[None,H,W,C])

#with open(self.flags.pred_path,'w') as f:

# pass

self._build(inputs,resize=False)

counter = 0

with tf.Session() as sess:

self.sess = sess

sess.run(tf.global_variables_initializer())

sess.run(tf.local_variables_initializer())

for imgs,imgnames in self.DATA.test_generator():

pred = sess.run(self.logit,feed_dict={inputs:imgs})

np.save("%s/%d.npy"%(self.flags.pred_path,counter),{"pred":pred,"name":imgnames})

counter+=len(imgs)

if counter/B%10 ==0:

print_mem_time("%d images predicted"%counter)

# train with placeholders

Example 8

def show_embedding(self,name,save_model="model.ckpt",meta_path='metadata.tsv'):

self._build()

self._write_meta()

from tensorflow.contrib.tensorboard.plugins import projector

# Use the same LOG_DIR where you stored your checkpoint.

with tf.Session() as sess:

self.sess = sess

sess.run(tf.global_variables_initializer())

sess.run(tf.local_variables_initializer())

summary_writer = tf.summary.FileWriter(self.flags.log_path, sess.graph)

saver = tf.train.Saver()

saver.save(sess, os.path.join(self.flags.log_path, save_model), 0)

# Format: tensorflow/contrib/tensorboard/plugins/projector/projector_config.proto

config = projector.ProjectorConfig()

# You can add multiple embeddings. Here we add only one.

embedding = config.embeddings.add()

embedding.tensor_name = name

# Link this tensor to its metadata file (e.g. labels).

embedding.metadata_path = os.path.join(self.flags.log_path, meta_path)

# Saves a configuration file that TensorBoard will read during startup.

projector.visualize_embeddings(summary_writer, config)

Example 9

def split(flags):

if os.path.exists(flags.split_path):

return np.load(flags.split_path).item()

folds = flags.folds

path = flags.input_path

random.seed(6)

img_list = ["%s/%s"%(path,img) for img in os.listdir(path)]

random.shuffle(img_list)

dic = {}

n = len(img_list)

num = (n+folds-1)//folds

for i in range(folds):

s,e = i*num,min(i*num+num,n)

dic[i] = img_list[s:e]

np.save(flags.split_path,dic)

return dic

Example 10

def save(self, filename):

"""Saves the collection to a file.

Parameters

----------

filename : :obj:`str`

The file to save the collection to.

Raises

------

ValueError

If the file extension is not .npy or .npz.

"""

file_root, file_ext = os.path.splitext(filename)

if file_ext == '.npy':

np.save(filename, self._data)

elif file_ext == '.npz':

np.savez_compressed(filename, self._data)

else:

raise ValueError('Extension %s not suppo

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值