#!/user/bin/python
#coding:utf-8
import requests;
from collections import deque;
import urllib2;
import re;
import os;
class Data(object):
def __init__(self,startURL,depth=1):
self.URL=startURL
self.depth=depth
class crawler(object):
def __init__(self,args):
self.startURL=args.URL
self.unvisitedHrefs=deque()
self.visitedHrefs=set()
self.unvisitedHrefs.append(args)
def startCrawler(self):
save_url=open("Save_URL.txt", 'w')
save_files=open("Save_HTML.txt",'w')
while len(self.unvisitedHrefs):
da=self.unvisitedHrefs.pop()
if da.depth >200:
break;
try:
self.visitedHrefs.add(da.URL)
req=urllib2.urlopen(da.URL, data=None, timeout=2)
coding_style=re.findall('charset.*"', content[1:500],re.I)
coding_style=coding_style[0].replace('"',"")
##if (coding_style=="charset=gb2312"):
## content.decode("GB2312")
##elif (coding_style=="charset=utf-8" or coding_style=="charset=UTF-8"):
## content.decode("gb2312")
##content=content.replace("\n","")
save_files.write(da.URL+"\t"+content+"\n")
except:
print("the error URL:",da.URL)
urls=re.findall('<a href="http.*"',content,re.I)
depth=da.depth+1
for u in urls:
li=u.split('"')
if(li[1] not in self.visitedHrefs):
da=Data(li[1],depth)
self.unvisitedHrefs.append(da)
save_url.write(li[1]+"\n")
print li[1]
print "Finished Crawler!"
save_url.close()
save_files.close()
def addUnvisitedHrefs(self,webPage):
self.de.add
if __name__=="__main__":
da=Data("http://www.hrbeu.edu.cn/")
cra=crawler(da)
cra.startCrawler()
#coding:utf-8
import requests;
from collections import deque;
import urllib2;
import re;
import os;
class Data(object):
def __init__(self,startURL,depth=1):
self.URL=startURL
self.depth=depth
class crawler(object):
def __init__(self,args):
self.startURL=args.URL
self.unvisitedHrefs=deque()
self.visitedHrefs=set()
self.unvisitedHrefs.append(args)
def startCrawler(self):
save_url=open("Save_URL.txt", 'w')
save_files=open("Save_HTML.txt",'w')
while len(self.unvisitedHrefs):
da=self.unvisitedHrefs.pop()
if da.depth >200:
break;
try:
self.visitedHrefs.add(da.URL)
req=urllib2.urlopen(da.URL, data=None, timeout=2)
coding_style=re.findall('charset.*"', content[1:500],re.I)
coding_style=coding_style[0].replace('"',"")
##if (coding_style=="charset=gb2312"):
## content.decode("GB2312")
##elif (coding_style=="charset=utf-8" or coding_style=="charset=UTF-8"):
## content.decode("gb2312")
##content=content.replace("\n","")
save_files.write(da.URL+"\t"+content+"\n")
except:
print("the error URL:",da.URL)
urls=re.findall('<a href="http.*"',content,re.I)
depth=da.depth+1
for u in urls:
li=u.split('"')
if(li[1] not in self.visitedHrefs):
da=Data(li[1],depth)
self.unvisitedHrefs.append(da)
save_url.write(li[1]+"\n")
print li[1]
print "Finished Crawler!"
save_url.close()
save_files.close()
def addUnvisitedHrefs(self,webPage):
self.de.add
if __name__=="__main__":
da=Data("http://www.hrbeu.edu.cn/")
cra=crawler(da)
cra.startCrawler()