1.自定义
a)当前更新微博头条内容,邮件通知
1.分析页面
a)微博主页:https://weibo.com/,记录cookies
b)登陆链接
https://login.sina.com.cn/sso/login.php
c)头条Url
https://d.weibo.com/623751_1
3.实现思路
a)微博模拟登陆,保存cookie信息
b)关注实时头条
c)每个小时,更新一次,新发布头条,邮件通知
4.编码实现
Spider组件
# -*- coding: utf-8 -*-
from scrapy.spiders import Spider
from scrapy import Spider, Request
from lxml import etree
import re
import datetime,time
import uuid
from SinaTopic.items import SinatopicItem
class SinaspiderSpider(Spider):
name = 'SinaSpider'
allowed_domains = ['weibo.com']
start_urls = ['https://d.weibo.com/623751_1']
def start_requests(self):
for i in range(10):
yield Request(url=self.start_urls[0], callback=self.parse, dont_filter=True)
def parse(self, response):
res = response.text.replace('\r','').replace('\\n','').replace('\t','').replace('\\','').strip()
#with open('html.txt','w') as f:
# f.write(res)
lists = re.findall(r'<li class="pt_li pt_li_2 S_bg2"(.*)</li>', res)[0]
selector = etree.HTML(str(lists))
titles = selector.xpath('//div[@class="text_box"]/div[@class="title W_autocut"]/a/text()')
hrefs = selector.xpath('//div[@class="text_box"]/div[@class="title W_autocut"]/a/@href')
subs = selector.xpath('//div[@class="text_box"]/div[@class="text text_cut S_txt2"]/text()')
picmuls = selector.xpath('//ul[@class="pic_m3 clearfix"]/li/img/@src')
accounts = selector.xpath('//div[@class="subinfo_box clearfix"]/a/span[@class="subinfo S_txt2"]/text()')
datatimes = selector.xpath('//div[@class="subinfo_box clearfix"]/span[@class="subinfo S_txt2"]/text()')
refercounts = selector.xpath('//em[@class="W_fico