您可以使用多个线程或进程.确保在每个线程中创建一个新的ftplib.FTP对象.最简单的方法(代码方式)是使用multiprocessing.Pool:
#!/usr/bin/env python
from multiprocessing.dummy import Pool # use threads
try:
from urllib import urlretrieve
except ImportError: # Python 3
from urllib.request import urlretrieve
def download(url):
url = url.strip()
try:
return urlretrieve(url, url2filename(url)), None
except Exception as e:
return None, e
if __name__ == "__main__":
p = Pool(20) # specify number of concurrent downloads
print(p.map(download, open('urls'))) # perform parallel downloads
其中url包含要下载文件的ftp url,例如,ftp://example.com/path/to/file和url2filename()从url中提取文件名部分,例如:
import os
import posixpath
try:
from urlparse import urlsplit
from urllib import unquote
except ImportError: # Python 3
from urllib.parse import urlsplit, unquote
def url2filename(url, encoding='utf-8'):
"""Return basename corresponding to url.
>>> print url2filename('http://example.com/path/to/dir%2Ffile%C3%80?opt=1')
file?
"""
urlpath = urlsplit(url).path
basename = posixpath.basename(unquote(urlpath))
if os.path.basename(basename) != basename:
raise ValueError(url) # reject 'dir%5Cbasename.ext' on Windows
return basename