python urlretrieve_Python request.urlretrieve方法代码示例

本文整理汇总了Python中six.moves.urllib.request.urlretrieve方法的典型用法代码示例。如果您正苦于以下问题:Python request.urlretrieve方法的具体用法?Python request.urlretrieve怎么用?Python request.urlretrieve使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块six.moves.urllib.request的用法示例。

在下文中一共展示了request.urlretrieve方法的28个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: maybe_download

​点赞 6

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def maybe_download(url, filename, prefix, num_bytes=None):

"""Takes an URL, a filename, and the expected bytes, download

the contents and returns the filename.

num_bytes=None disables the file size check."""

local_filename = None

if not os.path.exists(os.path.join(prefix, filename)):

try:

print "Downloading file {}...".format(url + filename)

with tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:

local_filename, _ = urlretrieve(url + filename, os.path.join(prefix, filename), reporthook=reporthook(t))

except AttributeError as e:

print "An error occurred when downloading the file! Please get the dataset using a browser."

raise e

# We have a downloaded file

# Check the stats and make sure they are ok

file_stats = os.stat(os.path.join(prefix, filename))

if num_bytes is None or file_stats.st_size == num_bytes:

print "File {} successfully loaded".format(filename)

else:

raise Exception("Unexpected dataset size. Please get the dataset using a browser.")

return local_filename

开发者ID:abisee,项目名称:cs224n-win18-squad,代码行数:24,

示例2: maybe_download

​点赞 6

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def maybe_download(filename, expected_bytes, force=False):

"""Download a file if not present, and make sure it's the right size."""

if force or not os.path.exists(filename):

print('Attempting to download:', filename)

filename, _ = urlretrieve(url + filename, filename,

reporthook=download_progress_hook)

print('\nDownload Complete!')

statinfo = os.stat(filename)

if statinfo.st_size == expected_bytes:

print('Found and verified', filename)

else:

raise Exception(

'Failed to verify ' + filename + \

'. Can you get to it with a browser?')

return filename

开发者ID:igormq,项目名称:ctc_tensorflow_example,代码行数:18,

示例3: retrieve_file_from_url

​点赞 6

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def retrieve_file_from_url(url):

"""

Retrieve a file from an URL

Args:

url: The URL to retrieve the file from.

Returns:

The absolute path of the downloaded file.

"""

try:

alias_source, _ = urlretrieve(url)

# Check for HTTPError in Python 2.x

with open(alias_source, 'r') as f:

content = f.read()

if content[:3].isdigit():

raise CLIError(ALIAS_FILE_URL_ERROR.format(url, content.strip()))

except Exception as exception:

if isinstance(exception, CLIError):

raise

# Python 3.x

raise CLIError(ALIAS_FILE_URL_ERROR.format(url, exception))

return alias_source

开发者ID:Azure,项目名称:azure-cli-extensions,代码行数:27,

示例4: maybe_download

​点赞 6

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def maybe_download(filename, expected_bytes, force=False):

"""Download a file if not present, and make sure it's the right size."""

dest_filename = os.path.join(data_root, filename)

if force or not os.path.exists(dest_filename):

print('Attempting to download:', filename)

filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook)

print('\nDownload Complete!')

statinfo = os.stat(dest_filename)

if statinfo.st_size == expected_bytes:

print('Found and verified', dest_filename)

else:

raise Exception(

'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')

return dest_filename

#num_classes = 10

开发者ID:PacktPublishing,项目名称:Neural-Network-Programming-with-TensorFlow,代码行数:19,

示例5: download

​点赞 6

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def download(url, dst_file_path):

# Download a file, showing progress

bar_wrap = [None]

def reporthook(count, block_size, total_size):

bar = bar_wrap[0]

if bar is None:

bar = progressbar.ProgressBar(

maxval=total_size,

widgets=[

progressbar.Percentage(),

' ',

progressbar.Bar(),

' ',

progressbar.FileTransferSpeed(),

' | ',

progressbar.ETA(),

])

bar.start()

bar_wrap[0] = bar

bar.update(min(count * block_size, total_size))

request.urlretrieve(url, dst_file_path, reporthook=reporthook)

开发者ID:chainer,项目名称:chainer,代码行数:25,

示例6: load_bert_vocab

​点赞 6

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def load_bert_vocab(vocab_file):

global BERT_VOCAB

if BERT_VOCAB is not None:

return BERT_VOCAB

if validate_url(vocab_file):

print(f'Downloading {vocab_file}')

vocab_file, _ = urlretrieve(vocab_file)

vocab = collections.OrderedDict()

index = 0

with open(vocab_file, "r") as rf:

for line in rf:

token = convert_to_unicode(line)

if not token:

break

token = token.strip()

vocab[token] = index

index += 1

BERT_VOCAB = vocab

return vocab

开发者ID:dpressel,项目名称:mead-baseline,代码行数:23,

示例7: web_downloader

​点赞 6

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def web_downloader(url, path_to_save=None):

# Use a class to simulate the nonlocal keyword in 2.7

class Context: pg = None

def _report_hook(count, block_size, total_size):

if Context.pg is None:

length = int((total_size + block_size - 1) / float(block_size)) if total_size != -1 else 1

Context.pg = create_progress_bar(length)

Context.pg.update()

if not path_to_save:

path_to_save = "/tmp/data.dload-{}".format(os.getpid())

try:

path_to_save, _ = urlretrieve(url, path_to_save, reporthook=_report_hook)

Context.pg.done()

except Exception as e: # this is too broad but there are too many exceptions to handle separately

raise RuntimeError("failed to download data from [url]: {} [to]: {}".format(url, path_to_save))

return path_to_save

开发者ID:dpressel,项目名称:mead-baseline,代码行数:20,

示例8: download_exchange_symbols

​点赞 6

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def download_exchange_symbols(exchange_name):

"""

Downloads the exchange's symbols.json from the repository.

Parameters

----------

exchange_name: str

environ:

Returns

-------

str

"""

filename = get_exchange_symbols_filename(exchange_name)

url = SYMBOLS_URL.format(exchange=exchange_name)

response = request.urlretrieve(url=url, filename=filename)

return response

开发者ID:enigmampc,项目名称:catalyst,代码行数:20,

示例9: download_file

​点赞 6

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def download_file(file_url, output_file_dir, expected_size, FORCE=False):

name = file_url.split('/')[-1]

file_output_path = os.path.join(output_file_dir, name)

print('Attempting to download ' + file_url)

print('File output path: ' + file_output_path)

print('Expected size: ' + str(expected_size))

if not os.path.isdir(output_file_dir):

os.makedirs(output_file_dir)

if os.path.isfile(file_output_path) and os.stat(file_output_path).st_size == expected_size and not FORCE:

print('File already downloaded completely!')

return file_output_path

else:

print(' ')

filename, _ = urlretrieve(file_url, file_output_path, download_hook_function)

print(' ')

statinfo = os.stat(filename)

if statinfo.st_size == expected_size:

print('Found and verified', filename)

else:

raise Exception('Could not download ' + filename)

return filename

开发者ID:PacktPublishing,项目名称:Machine-Learning-with-TensorFlow-1.x,代码行数:24,

示例10: download_file

​点赞 6

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def download_file(file_url, output_file_dir, expected_size, FORCE=False):

name = file_url.split('/')[-1]

file_output_path = os.path.join(output_file_dir, name)

print('Attempting to download ' + file_url)

print('File output path: ' + file_output_path)

print('Expected size: ' + str(expected_size))

if not os.path.isdir(output_file_dir):

os.makedirs(output_file_dir)

if os.path.isfile(file_output_path) and os.stat(file_output_path).st_size == expected_size and not FORCE:

print('File already downloaded completely!')

return file_output_path

else:

print(' ')

filename, _ = urlretrieve(file_url, file_output_path, download_hook_function)

print(' ')

statinfo = os.stat(filename)

print(statinfo.st_size)

if statinfo.st_size == expected_size:

print('Found and verified', filename)

else:

raise Exception('Could not download ' + filename)

return filename

开发者ID:PacktPublishing,项目名称:Machine-Learning-with-TensorFlow-1.x,代码行数:25,

示例11: maybe_download

​点赞 6

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def maybe_download(url, filename, prefix, num_bytes=None):

"""Takes an URL, a filename, and the expected bytes, download

the contents and returns the filename.

num_bytes=None disables the file size check."""

local_filename = None

output_path = os.path.join(prefix, filename)

if not os.path.exists(output_path):

try:

print("Downloading file {} to {}...".format(url + filename, output_path))

with tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:

local_filename, _ = urlretrieve(url + filename, output_path, reporthook=reporthook(t))

except AttributeError as e:

print("An error occurred when downloading the file! Please get the dataset using a browser.")

raise e

# We have a downloaded file

# Check the stats and make sure they are ok

file_stats = os.stat(os.path.join(prefix, filename))

if num_bytes is None or file_stats.st_size == num_bytes:

print("File {} successfully downloaded to {}.".format(filename, output_path))

else:

raise Exception("Unexpected dataset size. Please get the dataset using a browser.")

return local_filename

开发者ID:chrischute,项目名称:squad-transformer,代码行数:25,

示例12: download_data_url

​点赞 6

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def download_data_url(url, download_dir):

filename = url.split('/')[-1]

file_path = os.path.join(download_dir, filename)

if not os.path.exists(file_path):

os.makedirs(download_dir, exist_ok=True)

print('Download %s to %s' % (url, file_path))

file_path, _ = request.urlretrieve(

url=url,

filename=file_path,

reporthook=report_download_progress)

print('\nExtracting files')

if file_path.endswith('.zip'):

zipfile.ZipFile(file=file_path, mode='r').extractall(download_dir)

elif file_path.endswith(('.tar.gz', '.tgz')):

tarfile.open(name=file_path, mode='r:gz').extractall(download_dir)

开发者ID:LGE-ARC-AdvancedAI,项目名称:auptimizer,代码行数:20,

示例13: urlretrieve

​点赞 6

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def urlretrieve(url, filename, reporthook=None, data=None):

def chunk_read(response, chunk_size=8192, reporthook=None):

total_size = response.info().get('Content-Length').strip()

total_size = int(total_size)

count = 0

while 1:

chunk = response.read(chunk_size)

if not chunk:

break

count += 1

if reporthook:

reporthook(count, chunk_size, total_size)

yield chunk

response = urlopen(url, data)

with open(filename, 'wb') as fd:

for chunk in chunk_read(response, reporthook=reporthook):

fd.write(chunk)

开发者ID:GUR9000,项目名称:KerasNeuralFingerprint,代码行数:20,

示例14: urlretrieve

​点赞 6

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def urlretrieve(url, filename, reporthook=None, data=None):

def chunk_read(response, chunk_size=8192, reporthook=None):

total_size = response.info().get('Content-Length').strip()

total_size = int(total_size)

count = 0

while 1:

chunk = response.read(chunk_size)

count += 1

if not chunk:

reporthook(count, total_size, total_size)

break

if reporthook:

reporthook(count, chunk_size, total_size)

yield chunk

response = urlopen(url, data)

with open(filename, 'wb') as fd:

for chunk in chunk_read(response, reporthook=reporthook):

fd.write(chunk)

开发者ID:ECP-CANDLE,项目名称:Benchmarks,代码行数:21,

示例15: cached_download

​点赞 5

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def cached_download(url):

"""Downloads a file and caches it.

This is different from the original

:func:`~chainer.dataset.cached_download` in that the download

progress is reported.

It downloads a file from the URL if there is no corresponding cache.

If there is already a cache for the given URL, it just returns the

path to the cache without downloading the same file.

Args:

url (string): URL to download from.

Returns:

string: Path to the downloaded file.

"""

cache_root = os.path.join(get_dataset_root(), '_dl_cache')

try:

os.makedirs(cache_root)

except OSError:

if not os.path.exists(cache_root):

raise

lock_path = os.path.join(cache_root, '_dl_lock')

urlhash = hashlib.md5(url.encode('utf-8')).hexdigest()

cache_path = os.path.join(cache_root, urlhash)

with filelock.FileLock(lock_path):

if os.path.exists(cache_path):

return cache_path

temp_root = tempfile.mkdtemp(dir=cache_root)

try:

temp_path = os.path.join(temp_root, 'dl')

print('Downloading ...')

print('From: {:s}'.format(url))

print('To: {:s}'.format(cache_path))

request.urlretrieve(url, temp_path, _reporthook)

with filelock.FileLock(lock_path):

shutil.move(temp_path, cache_path)

finally:

shutil.rmtree(temp_root)

return cache_path

开发者ID:chainer,项目名称:chainerrl,代码行数:43,

示例16: load_word_vectors

​点赞 5

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def load_word_vectors(root, wv_type, dim):

"""Load word vectors from a path, trying .pt, .txt, and .zip extensions."""

if isinstance(dim, int):

dim = str(dim) + 'd'

fname = os.path.join(root, wv_type + '.' + dim)

if os.path.isfile(fname + '.pt'):

fname_pt = fname + '.pt'

print('loading word vectors from', fname_pt)

return torch.load(fname_pt)

if os.path.isfile(fname + '.txt'):

fname_txt = fname + '.txt'

cm = open(fname_txt, 'rb')

cm = [line for line in cm]

elif os.path.basename(wv_type) in URL:

url = URL[wv_type]

print('downloading word vectors from {}'.format(url))

filename = os.path.basename(fname)

if not os.path.exists(root):

os.makedirs(root)

with tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:

fname, _ = urlretrieve(url, fname, reporthook=reporthook(t))

with zipfile.ZipFile(fname, "r") as zf:

print('extracting word vectors into {}'.format(root))

zf.extractall(root)

if not os.path.isfile(fname + '.txt'):

raise RuntimeError('no word vectors of requested dimension found')

return load_word_vectors(root, wv_type, dim)

else:

raise RuntimeError('unable to load word vectors')

#######

开发者ID:uwnlp,项目名称:verb-attributes,代码行数:32,

示例17: download_url

​点赞 5

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def download_url(url, destination=None, progress_bar=True):

"""Download a URL to a local file.

Parameters

----------

url : str

The URL to download.

destination : str, None

The destination of the file. If None is given the file is saved to a temporary directory.

progress_bar : bool

Whether to show a command-line progress bar while downloading.

Returns

-------

filename : str

The location of the downloaded file.

Notes

-----

Progress bar use/example adapted from tqdm documentation: https://github.com/tqdm/tqdm

"""

def my_hook(t):

last_b = [0]

def inner(b=1, bsize=1, tsize=None):

if tsize is not None:

t.total = tsize

if b > 0:

t.update((b - last_b[0]) * bsize)

last_b[0] = b

return inner

if progress_bar:

with tqdm(unit='B', unit_scale=True, miniters=1, desc=url.split('/')[-1]) as t:

filename, _ = urlretrieve(url, filename=destination, reporthook=my_hook(t))

else:

filename, _ = urlretrieve(url, filename=destination)

开发者ID:alexandonian,项目名称:pretorched-x,代码行数:41,

示例18: download

​点赞 5

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def download(number=-1, name="", save_dir='./'):

"""Download pre-trained word vector

:param number: integer, default ``None``

:param save_dir: str, default './'

:return: file path for downloaded file

"""

df = load_datasets()

if number > -1:

row = df.iloc[[number]]

elif name:

row = df.loc[df["Name"] == name]

url = ''.join(row.URL)

if not url:

print('The word vector you specified was not found. Please specify correct name.')

widgets = ['Test: ', Percentage(), ' ', Bar(marker=RotatingMarker()), ' ', ETA(), ' ', FileTransferSpeed()]

pbar = ProgressBar(widgets=widgets)

def dlProgress(count, blockSize, totalSize):

if pbar.max_value is None:

pbar.max_value = totalSize

pbar.start()

pbar.update(min(count * blockSize, totalSize))

file_name = url.split('/')[-1]

if not os.path.exists(save_dir):

os.makedirs(save_dir)

save_path = os.path.join(save_dir, file_name)

path, _ = urlretrieve(url, save_path, reporthook=dlProgress)

pbar.finish()

return path

开发者ID:chakki-works,项目名称:chakin,代码行数:36,

示例19: maybe_download

​点赞 5

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def maybe_download(filename, expected_bytes, force=False):

"""Download a file if not present, and make sure it's the right size."""

dest_filename = os.path.join(data_root, filename)

if force or not os.path.exists(dest_filename):

print('Attempting to download:', filename)

filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook)

print('\nDownload Complete!')

statinfo = os.stat(dest_filename)

if statinfo.st_size == expected_bytes:

print('Found and verified', dest_filename)

else:

raise Exception(

'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')

return dest_filename

开发者ID:PacktPublishing,项目名称:Neural-Network-Programming-with-TensorFlow,代码行数:16,

示例20: download_mnist_data

​点赞 5

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def download_mnist_data():

print('Downloading {:s}...'.format(train_images))

request.urlretrieve('{:s}/{:s}'.format(parent, train_images), train_images)

print('Done')

print('Downloading {:s}...'.format(train_labels))

request.urlretrieve('{:s}/{:s}'.format(parent, train_labels), train_labels)

print('Done')

print('Downloading {:s}...'.format(test_images))

request.urlretrieve('{:s}/{:s}'.format(parent, test_images), test_images)

print('Done')

print('Downloading {:s}...'.format(test_labels))

request.urlretrieve('{:s}/{:s}'.format(parent, test_labels), test_labels)

print('Done')

print('Converting training data...')

data_train, target_train = load_mnist(train_images, train_labels,

num_train)

print('Done')

print('Converting test data...')

data_test, target_test = load_mnist(test_images, test_labels, num_test)

mnist = {'data': np.append(data_train, data_test, axis=0),

'target': np.append(target_train, target_test, axis=0)}

print('Done')

print('Save output...')

with open('mnist.pkl', 'wb') as output:

six.moves.cPickle.dump(mnist, output, -1)

print('Done')

print('Convert completed')

开发者ID:lanpa,项目名称:tensorboardX,代码行数:30,

示例21: download_file

​点赞 5

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def download_file(url, filename, quiet=True, reporthook_kwargs=None):

"""Downloads a file with optional progress report."""

if '://' not in url:

raise ValueError("fully qualified URL required: %s" % url)

if url.partition('://')[0] not in ('https', 'http', 'ftp'):

raise ValueError("unsupported URL schema: %s" % url)

if url.startswith('ftp://'):

retrieve = _urlretrieve

else:

retrieve = _urlretrieve_requests

if quiet:

return retrieve(url, filename)

reporthook_kwargs = reporthook_kwargs or {}

if filename:

reporthook_kwargs.setdefault('desc', filename)

reporthook_kwargs.setdefault('unit', 'b')

reporthook_kwargs.setdefault('unit_scale', True)

reporthook = _ReportHook(**reporthook_kwargs)

retrieve = _urlretrieve if url.startswith('ftp://') else _urlretrieve_requests

with contextlib.closing(reporthook):

retrieve(url, filename, reporthook)

开发者ID:rmax,项目名称:databrewer,代码行数:28,

示例22: main

​点赞 5

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def main():

request.urlretrieve(

'http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz',

'tasks_1-20_v1-2.tar.gz')

开发者ID:chainer,项目名称:chainer,代码行数:6,

示例23: urlretrieve

​点赞 5

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def urlretrieve(url, filename, reporthook=None, data=None):

"""Replacement for `urlretrive` for Python 2.

Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy

`urllib` module, known to have issues with proxy management.

# Arguments

url: url to retrieve.

filename: where to store the retrieved data locally.

reporthook: a hook function that will be called once

on establishment of the network connection and once

after each block read thereafter.

The hook will be passed three arguments;

a count of blocks transferred so far,

a block size in bytes, and the total size of the file.

data: `data` argument passed to `urlopen`.

"""

def chunk_read(response, chunk_size=8192, reporthook=None):

content_type = response.info().get('Content-Length')

total_size = -1

if content_type is not None:

total_size = int(content_type.strip())

count = 0

while True:

chunk = response.read(chunk_size)

count += 1

if reporthook is not None:

reporthook(count, chunk_size, total_size)

if chunk:

yield chunk

else:

break

with closing(urlopen(url, data)) as response, open(filename, 'wb') as fd:

for chunk in chunk_read(response, reporthook=reporthook):

fd.write(chunk)

开发者ID:Relph1119,项目名称:GraphicDesignPatternByPython,代码行数:39,

示例24: urlretrieve

​点赞 5

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def urlretrieve(url, filename, reporthook=None, data=None):

"""Replacement for `urlretrive` for Python 2.

Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy

`urllib` module, known to have issues with proxy management.

# Arguments

url: url to retrieve.

filename: where to store the retrieved data locally.

reporthook: a hook function that will be called once

on establishment of the network connection and once

after each block read thereafter.

The hook will be passed three arguments;

a count of blocks transferred so far,

a block size in bytes, and the total size of the file.

data: `data` argument passed to `urlopen`.

"""

def chunk_read(response, chunk_size=8192, reporthook=None):

content_type = response.info().get('Content-Length')

total_size = -1

if content_type is not None:

total_size = int(content_type.strip())

count = 0

while True:

chunk = response.read(chunk_size)

count += 1

if reporthook is not None:

reporthook(count, chunk_size, total_size)

if chunk:

yield chunk

else:

break

with closing(urlopen(url, data)) as response, open(filename, 'wb') as fd:

for chunk in chunk_read(response, reporthook=reporthook):

fd.write(chunk)

开发者ID:videoflow,项目名称:videoflow,代码行数:37,

示例25: maybe_download

​点赞 5

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def maybe_download(filename, expected_bytes, force=False):

"""Download a file if not present, and make sure it's the right size."""

if force or not os.path.exists(filename):

print('Attempting to download:', filename)

filename, _ = urlretrieve(url + filename, filename, reporthook=download_progress_hook)

print('\nDownload Complete!')

statinfo = os.stat(filename)

if statinfo.st_size == expected_bytes:

print('Found and verified', filename)

else:

raise Exception(

'Failed to verify ' + filename + '. Can you get to it with a browser?')

return filename

开发者ID:eliben,项目名称:deep-learning-samples,代码行数:15,

示例26: setUpClass

​点赞 5

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def setUpClass(cls):

base_url = 'https://chainercv-models.preferred.jp/tests'

cls.dataset = np.load(request.urlretrieve(os.path.join(

base_url, 'eval_detection_coco_dataset_2017_10_16.npz'))[0],

allow_pickle=True)

cls.result = np.load(request.urlretrieve(os.path.join(

base_url, 'eval_detection_coco_result_2017_10_16.npz'))[0],

allow_pickle=True)

开发者ID:chainer,项目名称:chainercv,代码行数:11,

示例27: setUpClass

​点赞 5

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def setUpClass(cls):

base_url = 'https://chainercv-models.preferred.jp/tests'

cls.dataset = np.load(request.urlretrieve(os.path.join(

base_url,

'eval_instance_segmentation_voc_dataset_2018_04_04.npz'))[0],

allow_pickle=True,

encoding='latin1')

cls.result = np.load(request.urlretrieve(os.path.join(

base_url,

'eval_instance_segmentation_voc_result_2018_04_04.npz'))[0],

allow_pickle=True,

encoding='latin1')

开发者ID:chainer,项目名称:chainercv,代码行数:15,

示例28: setUpClass

​点赞 5

# 需要导入模块: from six.moves.urllib import request [as 别名]

# 或者: from six.moves.urllib.request import urlretrieve [as 别名]

def setUpClass(cls):

base_url = 'https://chainercv-models.preferred.jp/tests'

cls.dataset = np.load(request.urlretrieve(os.path.join(

base_url,

'eval_instance_segmentation_coco_dataset_2018_07_06.npz'))[0],

encoding='latin1',

allow_pickle=True)

cls.result = np.load(request.urlretrieve(os.path.join(

base_url,

'eval_instance_segmentation_coco_result_2019_02_12.npz'))[0],

encoding='latin1',

allow_pickle=True)

开发者ID:chainer,项目名称:chainercv,代码行数:15,

注:本文中的six.moves.urllib.request.urlretrieve方法示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值