python爬虫小试

说明:之所以要构造http请求,因为单纯的用
urllib.urlopen(url)
来获得html源码,次数多了,网站就会是403 forbidden了,构造http请求则会避免403错误
脚本如下:
#-*- coding:utf-8 -*-
#import urllib
import urllib2
import re
import sys
import cookielib




reload(sys)
sys.setdefaultencoding("utf-8")




def getHtml(url):
    #page = urllib.urlopen(url)
    #page = urllib2.urlopen(url)
    #html = page.read().decode("utf-8")
    cookie_support = urllib2.HTTPCookieProcessor(cookielib.CookieJar())
    #opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
    opener = urllib2.build_opener(cookie_support)
    user_agents = ['Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36']
    opener.addheaders = [("User-agent", user_agents), ("Accept", "*/*"), ('Referer', 'http://www.douban.com')]
    response = opener.open(url)
    html = response.read().decode("utf-8")
    return html


def getTitle(html):
    reg = u'class="note-item">.*?<a.*?href=(.*?)class="title".*?target="_blank">(.*?)</a>.*?<span>(.*?)喜欢</span>'
    titleRe = re.compile(reg,re.S)
    titlelist = re.findall(titleRe,html)
    return titlelist


page_num = 0
filePath = r'C:\Users\Administrator\tmp\DoubanTop250.txt'
while page_num < 10:
    html_url = 'https://www.douban.com/tag/%E8%A3%85%E4%BF%AE/article?start=' + str(page_num*15)
    page_num = page_num + 1
    html = getHtml(html_url)
    #print html
    Contents= getTitle(html)
    if page_num == 1:
        fileTop250 = open(filePath, 'w')
    else:
        fileTop250 = open(filePath, 'a')
    for Content in Contents:
        if int(Content[2]) >= 1500:
        #if 1:
            fileTop250.write(Content[2] + '人喜欢' + '\r')
            fileTop250.write('Title:' + Content[1] + '\r')
            fileTop250.write('Link:' + Content[0] + '\r')
            fileTop250.write('from the ' + str(page_num) + ' page' + '\r\n')
    print 'Read the ' + str(page_num) + ' page successful...'


    fileTop250.close()

                
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值