Python爬虫

1.简单的获得页面内容

import urllib2
content = urllib2.urlopen('http://www.hao123.com').read()
f=open("1.html",'w');
f.write(content)
f.close()



   但是这样存在一些站点采取了保护,因此要伪装成浏览器的正常请求,

2.

# -*- coding: cp936 -*-
import urllib, urllib2, cookielib

class Dawn:
    '''这是一个访问浏览器的方法,目前只是写着玩,已经一年没有写Python,该忘的都忘了吧'''
    timeout = 30

    def __init__(self):
        '''初始化模块,增加cookie支持'''
        httpHandler = urllib2.HTTPHandler()
        httpsHandler = urllib2.HTTPSHandler()
        cookie = cookielib.CookieJar()
        cookie_support = urllib2.HTTPCookieProcessor(cookie)
        opener = urllib2.build_opener(cookie_support, httpHandler, httpsHandler)
        urllib2.install_opener(opener)

    def getHeader(self):
        '''返回浏览器header'''
        header = {
            "User-Agent":"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.2.13) Gecko/20101203 Firefox/3.6.13",
            #"User-Agent" = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.13) Gecko/20101206 Ubuntu/10.10 (maverick) Firefox/3.6.13",
            "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
            "Accept-Language":"zh-cn,zh;q=0.5",
            #"Accept-Encoding":"gzip,deflate",
            "Accept-Charset":"GB2312,utf-8;q=0.7,*;q=0.7",
            "Keep-Alive":"115",
            "Connection":"keep-alive"
            }
        return header

    def request(self, url, headers=None, data = None):
        '''请求处理'''
        if headers is None:
            header = self.getHeader()

        #开始设置请求数据
        req = urllib2.Request(
            url = url,
            headers = header
            )
        if data is not None:
            data = urllib.urlencode(data)

        #请求开始
        try:
            request = urllib2.urlopen(req, data, self.timeout)
            source = request.read()
            request.close()
        except:
            source = None
            #print "connect faild..."
            
        return source

if __name__ == "__main__":
    dawn = Dawn()
    urls=["www.baidu.com","www.163.com","oschina.net","www.sina.com"]
    
    for item in urls:
        url="http://"+item
        fileName=item+".html"
        content=dawn.request(url)
        f=open(fileName,"w")
        f.write(content)
        f.close()
        
        
   
 


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值