本文整理汇总了Python中multiprocessing.pool.Pool方法的典型用法代码示例。如果您正苦于以下问题:Python pool.Pool方法的具体用法?Python pool.Pool怎么用?Python pool.Pool使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块multiprocessing.pool的用法示例。
在下文中一共展示了pool.Pool方法的27个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: save_tfrecord
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def save_tfrecord(filename, dataset, verbose=False):
observations = len(dataset['length'])
serialized = []
with Pool(processes=4) as pool:
for serialized_string in tqdm(pool.imap(
tfrecord_serializer,
zip(dataset['length'], dataset['source'], dataset['target']),
chunksize=10
), total=observations, disable=not verbose):
serialized.append(serialized_string)
# Save seriealized dataset
writer = tf.python_io.TFRecordWriter(
filename,
options=tf.python_io.TFRecordOptions(
tf.python_io.TFRecordCompressionType.ZLIB
)
)
for serialized_string in tqdm(serialized, disable=not verbose):
writer.write(serialized_string)
writer.close()
开发者ID:distillpub,项目名称:post--memorization-in-rnns,代码行数:26,
示例2: shuffled_analysis
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def shuffled_analysis(iterations: int, meta: pd.DataFrame, counts: pd.DataFrame, interactions: pd.DataFrame,
cluster_interactions: list, base_result: pd.DataFrame, threads: int, separator: str,
suffixes: tuple = ('_1', '_2'), counts_data: str = 'ensembl') -> list:
"""
Shuffles meta and calculates the means for each and saves it in a list.
Runs it in a multiple threads to run it faster
"""
core_logger.info('Running Statistical Analysis')
with Pool(processes=threads) as pool:
statistical_analysis_thread = partial(_statistical_analysis,
base_result,
cluster_interactions,
counts,
interactions,
meta,
separator,
suffixes,
counts_data=counts_data
)
results = pool.map(statistical_analysis_thread, range(iterations))
return results
开发者ID:Teichlab,项目名称:cellphonedb,代码行数:25,
示例3: fill_queue
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def fill_queue(self):
if self.results is None:
self.results = queue.deque(maxlen=self.max_queue)
if self.num_workers > 0:
if self.pool is None:
self.pool = Pool(processes=self.num_workers)
while len(self.results) < self.max_queue:
if self.distinct_levels is not None and self.idx >= self.distinct_levels:
break
elif not self.repeat_levels and self.idx >= len(self.file_data):
break
else:
data = self.get_next_parameters()
if data is None:
break
self.idx += 1
kwargs = {'seed': self._seed.spawn(1)[0]}
if self.num_workers > 0:
result = self.pool.apply_async(_game_from_data, data, kwargs)
else:
result = _game_from_data(*data, **kwargs)
self.results.append((data, result))
开发者ID:PartnershipOnAI,项目名称:safelife,代码行数:25,
示例4: __init__
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def __init__(self, configer=None, num_classes=None, boundary_threshold=0.00088, num_proc=15):
assert configer is not None or num_classes is not None
self.configer = configer
if configer is not None:
self.n_classes = self.configer.get('data', 'num_classes')
else:
self.n_classes = num_classes
self.ignore_index = -1
self.boundary_threshold = boundary_threshold
self.pool = Pool(processes=num_proc)
self.num_proc = num_proc
self._Fpc = 0
self._Fc = 0
self.seg_map_cache = []
self.gt_map_cache = []
开发者ID:openseg-group,项目名称:openseg.pytorch,代码行数:21,
示例5: create_features_from_path
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def create_features_from_path(self, train_path: str, test_path: str) -> Tuple[pd.DataFrame, pd.DataFrame]:
column_pairs = self.get_column_pairs()
col1s = []
col2s = []
latent_vectors = []
gc.collect()
with Pool(4) as p:
for col1, col2, latent_vector in p.map(
partial(self.compute_latent_vectors, train_path=train_path, test_path=test_path), column_pairs):
col1s.append(col1)
col2s.append(col2)
latent_vectors.append(latent_vector.astype(np.float32))
gc.collect()
return self.get_feature(train_path, col1s, col2s, latent_vectors), \
self.get_feature