#用线程简单爬取网络页面
from urllib import request
from multiprocessing import Process,Queue
def downloader(url_queue):
p = url_queue.get()
response = request.urlopen(p)
html = response.read()
content = html.decode("utf-8")
path_list=p.split('/')
file_name = path_list[-1]
with open(file_name,"w",encoding = "utf-8") as f:
f.write(content)
if __name__ == "__main__":
url_queue = Queue()
s1 = "http://www.langlang2017.com/index.html"
s2 = "http://www.langlang2017.com/route.html"
s3 = "http://www.langlang2017.com/FAQ.html"
url_queue.put(s1)
url_queue.put(s2)
url_queue.put(s3)
p1 = Process(target=downloader, args=(url_queue, ))
p1.start()
p2 = Process(target=downloader, args=(url_queue, ))
p2.start()
p3 = Process(target=downloader, args=(url_queue, ))
p3.start()
兄弟连学python
Python学习交流、资源共享群:563626388 QQ