python多线程爬虫界面_多线程网页爬虫 python 实现

#!/usr/bin/env python

#coding=utf-8

importthreadingimporturllibimportreimporttime

g_mutex=threading.Condition()

g_pages=[] #从中解析所有url链接

g_queueURL=[] #等待爬取的url链接列表

g_existURL=[] #已经爬取过的url链接列表

g_failedURL=[] #下载失败的url链接列表

g_totalcount=0#下载过的页面数classCrawler:

def __init__(self,crawlername,url,threadnum):

self.crawlername=crawlername

self.url=url

self.threadnum=threadnum

self.threadpool=[]

self.logfile=file("log.txt",'w')

def craw(self):

global g_queueURL

g_queueURL.append(url)

depth=0print self.crawlername+" 启动..."

while(len(g_queueURL)!=0):

depth+=1print'Searching depth ',depth,'...\n\n'self.logfile.write("URL:"+g_queueURL[0]+"........")

self.downloadAll()

self.updateQueueURL()

content='\n>>>Depth '+str(depth)+':\n'self.logfile.write(content)

i=0

while i

content=str(g_totalcount+i)+'->'+g_queueURL[i]+'\n'self.logfile.write(content)

i+=1def downloadAll(self):

global g_queueURL

global g_totalcount

i=0

while i

j=0

while j

g_totalcount+=1threadresult=self.download(g_queueURL[i+j],str(g_totalcount)+'.html',j)if threadresult!=None:

print'Thread started:',i+j,'--File number =',g_totalcount

j+=1i+=jforthread in self.threadpool:

thread.join(30)

threadpool=[]

g_queueURL=[]

def download(self,url,filename,tid):

crawthread=CrawlerThread(url,filename,tid)

self.threadpool.append(crawthread)

crawthread.start()

def updateQueueURL(self):

global g_queueURL

global g_existURL

newUrlList=[]forcontent in g_pages:

newUrlList+=self.getUrl(content)

g_queueURL=list(set(newUrlList)-set(g_existURL))

def getUrl(self,content):

reg=r'"(http://.+?)"'regob=re.compile(reg,re.DOTALL)

urllist=regob.findall(content)returnurllistclassCrawlerThread(threading.Thread):

def __init__(self,url,filename,tid):

threading.Thread.__init__(self)

self.url=url

self.filename=filename

self.tid=tid

def run(self):

global g_mutex

global g_failedURL

global g_queueURLtry:

page=urllib.urlopen(self.url)

html=page.read()

fout=file(self.filename,'w')

fout.write(html)

fout.close()

except Exception,e:

g_mutex.acquire()

g_existURL.append(self.url)

g_failedURL.append(self.url)

g_mutex.release()

print'Failed downloading and saving',self.url

print ereturnNone

g_mutex.acquire()

g_pages.append(html)

g_existURL.append(self.url)

g_mutex.release()if __name__=="__main__":

url=raw_input("请输入url入口:\n")

threadnum=int(raw_input("设置线程数:"))

crawlername="小小爬虫"crawler=Crawler(crawlername,url,threadnum)

crawler.craw()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值