import re,urllib,urllib2,socket
import os
import random
import socket
import urllib2
import cookielib
ERROR = {
'0':'Can not open the url,checck you net',
'1':'Creat download dir error',
'2':'The image links is empty',
'3':'Download faild',
'4':'Build soup error,the html is empty',
'5':'Can not save the image to your disk',
}
FILENAME = 0
class BrowserBase(object):
def __init__(self):
socket.setdefaulttimeout(20)
def speak(self,name,content):
print '[%s]%s' %(name,content)
def openurl(self,url):
"""
打开网页
"""
cookie_support= urllib2.HTTPCookieProcessor(cookielib.CookieJar())
self.opener = urllib2.build_opener(cookie_support,urllib2.HTTPHandler)
urllib2.install_opener(self.opener)
user_agents = [
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 ",
]
agent = random.choice(user_agents)
self.opener.addheaders = [("User-agent",agent),("Accept","*/*"),('Referer','http://www.google.com')]
try:
res = self.opener.open(url)
urlSource= res.read()
print urlSource
pattern = re.compile(r'package.*?
match = pattern.findall(urlSource)
length = len(match)
for i in range(0,length):
print "##############"
print match[i]
print len(match[i])
global FILENAME
f= open(savepath+`FILENAME`+".java",'wb')
f.write(match[i][0:len(match[i])-1])
f.close()
FILENAME +=1
except Exception,e:
self.speak(str(e) + url,'content')
raise Exception
else:
return res
def get_code(url):
urlSource = urllib.urlopen(url).read()
print urlSource
#pattern = re.compile(r'package(?!
pattern = re.compile(r'package')
match = pattern.findall(urlSource)
if __name__=='__main__':
splider=BrowserBase()
i=0;
savepath = 'e:\\picture\\'
splider.openurl('http://blog.csdn.net/m13666368773/article/details/7691871')
1 写作缘由:
看这个页面上额《java与设计模式》
会把代码复制下来,在跑以下,看看结果。(他的代码很规范)但是会很麻烦。于是就想写一个脚本爬下来。
2
遇到的问题总结:
2.1
问题: 用抓图片那个脚本来抓。报403错误。
2.2
问题:写好了正则表达式以后。不知道findall返回的是什么。
解决:findall返回的是一个list,直接用match【i】来访问对应。
2.3
问题:list长度
解决:len(match)
2.4
问题:写流的时候,希望文件名每次+1
解决:python 全局变量。
CONSTANT = 0
def modifyGlobal():
global CONSTANT
print(CONSTANT)
CONSTANT += 1
if __name__ == '__main__':
modifyGlobal()
print(CONSTANT)
开始是用 global CONSTANT +=1.汗。
2.5
问题: string indices must be integers, not tuple
解决:用tab上面那个字符
2.6
问题:正则后面多一个“
解决:在流写的时候,对字符串进行截取。match[i][0:len(match[i])-1]
总结完毕