# -*- coding:utf-8 -*-
import urllib2
import urlparse
import re
import os
##import pickle
import threading
import random
import multiprocessing
from multiprocessing import Lock
def process_crawler(url):
process_url=[]
process=[]
num_cpu=multiprocessing.cpu_count()#获取cpu的内核数
try:
for i in range(1,5):
manyurl=url%i#用数字i替换url中的%d
process_url.append(manyurl)#获取五个网页的url
for a in range(num_cpu):
p=multiprocessing.Process(target=thread_crawler,args=(process_url.pop(),))#创建进程。args中的是元祖,是thread_crawler目标函数的参数
p.start()
process.append(p)
for p1 in process:
p1.join()#子进程加入join(time)让主进程等待子进程结束,才执行主进程此处代码以后的代码,若不设置则主进程不用等待子进程,主进程结束子进程就结束了,time为时间,等待的秒数,不输入time则一直等到子进程结束
print p1
except:
print 'exception!!!'
def thread_crawler(url):
html=downhtml(url)
regex_img=re.compile('
]*?(src|data-url)=["\']http://(.*?\.png|.*?0\.jpg)')#正则表达式返回的是以列表的形式,列表里边装的是小括号()里的内容。re.compile()将正则表达式编译成pattern实例
mgsz=regex_img.findall(html)
threads=[]
while True:
if mgsz:
if len(threads)<10:#len计算字符串长度,列表长度
thread=threading.Thread(target=go,args=(mgsz.pop(),))#启动线程和启动进程类似
thread.setDaemon(True)#设置线程为守护线程,也意味着此线程不重要。主线程不用等待子线程的完成
thread.start()
threads.append(thread)
else:
pass
else:
break
if threads:
for thread in threads:
if not thread.is_alive():
threads.remove(thread)
def go(url):
try:
lock=Lock()
lock.acquire()#获得锁
imghtml=downhtml('http://'+url[1])
with open('G:\python2.7.14\images\%d.jpg'%random.randint(1,10000000000),'wb')as th:
th.write(imghtml)#写入图片
print 'write successful\n'
except:
print 'Error'
finally:
lock.release()
def downhtml(url,num_retires=2,user_agent='wswp'):
## print 'downloading:',url,'\n'
header={'User-agent':user_agent}
request=urllib2.Request(url,headers=header)
try:
html=urllib2.urlopen(request).read()
except urllib2.URLError as e:
print 'download error:',e.reason
html=None
if num_retires>0:
if hasattr(e,'code') and 500<=e.code<600:
return downhtml(url,retires-1)
return html
##def test(x):
## print 'ok'
if __name__ == "__main__":#比如在test1中定义了这个函数,test1python文件执行则__name__等于test1.py,而__main__也等于test1.py,process_crawler函数的到执行
#若在test3中引用test1文件则__name__等于test没有后缀名,但是__main__等于test1.py因此process_crawler不执行,多用与测试代码
process_crawler('http://ibaotu.com/guanggao/1-0-0-0-0-%d.html')#%d代表整数用法‘xxxx%dxx’%2用整数2替换%d
收获学会了多进程多线程的工作原理,明白了在python中python默认编译器有gil机制,使得在同一进程中只能有一个线程运行,并没有实现多线程的并发执行