要求:
- 存储文章的标题、作者、发布时间、正文、正文中的图片链接、文章链接、文章所属分类
- 根据网站的实时更新(周期1分钟)进行采集
- 时间格式保存为"yyyy-mm-dd HH:MM:SS"
- 存储到mysql数据库
代码实现如下:
新浪滚动的爬虫文件:
# spiders/sina_gundong.py
import time
from scrapy_plus.core.spider import Spider
from scrapy_plus.http.request import Request
from scrapy_plus.item import Item
import js2py
class SinaGunDong(Spider):
name = "sina_gundong"
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Cookie": "UOR=www.google.com,www.sina.com.cn,; SGUID=1520816292777_83076650; SINAGLOBAL=211.103.136.242_1520816292.736990; SUB=_2AkMt-V_2f8NxqwJRmPEQy2vmZYx_zwjEieKbpa4tJRMyHRl-yD83qnIJtRB6BnlxGSLw2fy6O04cZUKTsCZUeiiFEsZE; SUBP=0033WrSXqPxfM72-Ws9jqgMF55529P9D9WhpFUZmqbYYLueonGrZIL2c; U_TRS1=0000001a.e268c0.5aaa0d39.35b0731a; lxlrttp=1521688012; Apache=223.72.62.219_1522208561.132697; ULV=1522208952476:6:6:3:223.72.62.219_1522208561.132697:1522208561158; U_TRS2=000000db.81c2323e.5abca69b.ad269c11; ArtiFSize=14; rotatecount=1; hqEtagMode=1",
# "Host": "roll.news.sina.com.cn", 这里host必须禁用掉
"Pragma": "no-cache",
"Referer": "http://roll.news.sina.com.cn/s/channel.php?ch=01",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36",
}
def start_requests(self):
while True:
# 需要发起这个请求,才能获取到列表页数据,并且返回的是一个js语句
url = "http://roll.news.sina.com.cn/interface/rollnews_ch_out_interface.php?col=89&spec=&type=&ch=&k=&offset_page=0&offset_num=0&num=120&asc=&page=1&r=0.5559616678192825"
yield Request(url, parse='parse', filter=False)
time.sleep(60) # 每60秒发起一次请求
def parse(self, response):
'''响应体数据是js代码'''
# 使用js2py模块,执行js代码,获取数据
ret = js2py.eval_js(response.body.decode("gbk")) # 对网站分析发现,数据编码格式是gbk的,因此需要先进行解码
for news in ret.list: #
yield Request(news["url"], headers=self.headers, parse='parse_detail', meta={"type": news["channel"]["title"]})
def parse_detail(self, response):
response.body = response.body.decode("utf-8") # 部分页面无法正确解码,因此在这里手动进行解码操作
title = response.xpath("//h1[@class='main-title']/text()")[0]
pub_date = response.xpath("//span[@class='date']/text()")[0]
try:
author = response.xpath("//div[@class='date-source']//a/text()")[0] # 由于作者的提取,有两种格式,因此这里使用一个异常捕获来进行判断
except IndexError:
author = response.xpath("//div[@class='date-source']//span[contains(@class,'source')]/text()")[0]
content = response.xpath("//div[@class='article']//text()") # 多个 每一个代表一段
image_links = response.xpath("//div[@class='article']//img/@src") # 图片链接有多个
yield Item({
"content": content, # 正文
"image_links":image_links, # 图片链接
"title": title, # 标题
"pub_date":pub_date, # 发布日期
"author": author, # 作者
"url": response.url, # 文章链接
"type": response.request.meta["type"], # 文章所属分类
}
)
项目中新建db.py
# 项目文件夹下db.py
# 依赖:sqlalchemy pymysql
from sqlalchemy import Column,Integer,Text,DateTime, String
from sqlalchemy.ext.declarative import declarative_base
# 创建对象的基类:
Base = declarative_base()
class Model(Base):
__tablename__ = 'sina_news'
id = Column(Integer, primary_key=True, autoincrement=True) # 主键id
title = Column(String(100), nullable=False) # 标题
author = Column(String(20), nullable=False) # 作者
pub_date = Column(DateTime, nullable=False) # 发布时间
content = Column(Text, nullable=False) # 正文
image_links = Column(Text, nullable=False) # 图片链接
url = Column(String(500), nullable=False) # 文章链接
type = Column(String(6), nullable=False) # 文章分类
news_tag = Column(String(40), nullable=False) # 文章去重标记
管道文件:
# 项目下管道文件 pipelines.py
import json
from datetime import datetime
from hashlib import sha1
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from scrapy_plus.core.scheduler import utf8_string
from db import Base, Model
class Pipeline(object):
'''数据入库前的清洗和格式化处理'''
def process_item(self, item, spider):
item.data['pub_date'] = datetime.strptime(item.data['pub_date'], '%Y年%m月%d日 %H:%M') # 将时间格式进行一个处理,转换为datetime类型
item.data['content'] = [i for i in item.data['content'] if i.strip()] # 去掉content中的空白字符
item.data['content'] = "\n\n".join(item.data["content"])
item.data['image_links'] = json.dumps(item.data['image_links']) # 列表转换为json字符串
# 数据去重标识生成:利用标题、作者、文章链接生成唯一key
s1 = sha1()
s1.update(utf8_string(item.data['title']))
s1.update(utf8_string(item.data['author']))
s1.update(utf8_string(item.data['url']))
item.data['news_tag'] = s1.hexdigest() # 数据去重标识
return item
class MysqlPipeline(object):
def __init__(self):
# 建立数据库链接
self.conn = create_engine("mysql+pymysql://root:ryoma@127.0.0.1/test3?charset=utf8")
Base.metadata.create_all(bind=self.conn) # 创建表,如果有,就不在创建
def _get_session(self):
# 创建session对象
Session = sessionmaker(bind=self.conn)
return Session()
def process_item(self, item, spider):
session = self._get_session() # 获取session
# 先判断news_tag是否已经存在:如果存在,代表数据是重复的,否则才插入
if not session.query(Model).filter_by(news_tag=item.data['news_tag']).all():
obj = Model(**item.data) # 创建模型类对象
session.add(obj) # 插入数据
session.commit() # 提交
session.close() # 关闭session
return item
项目配置文件:
# 更改默认的配置
DEFAULT_LOG_FILENAME = '滚动新闻采集.log' # 默认日志文件名称
SPIDERS = [
"spiders.sina.SinaGunDong"
]
PIPELINES = [
"pipelines.Pipeline",
"pipelines.MysqlPipeline"
]
SPIDER_MIDS = [
]
DOWNLOADER_MIDS = [
]
# 控制最大并发数
MAX_ASYNC_NUMBER = 1
# 异步模式 thread, coroutine
ASYNC_TYPE = 'thread'
'''分布式配置'''
# 执行角色
# None 代表非分布式,发起初始请求(_start_requests), 处理请求(_execute_request_response_item)
# master代表主,只负责发起初始请求(_start_requests),并维护请求队列
# slave代表从,只负责处理请求(_execute_request_response_item)
# ROLE = 'master'
# ROLE = 'slave'
ROLE = None
# 最大重试次数
MAX_RETRY_TIMES = 3
# redis 队列的配置
REDIS_QUEUE_NAME = 'request_queue'
REDIS_QUEUE_HOST = 'localhost'
REDIS_QUEUE_PORT = 6379
REDIS_QUEUE_DB = 10
# reids 集合配置
REDIS_SET_NAME = 'filter_container'
REDIS_SET_HOST = 'localhost'
REDIS_SET_PORT = 6379
REDIS_SET_DB = 10
# 利用redis进行请求备份 的配置
REDIS_BACKUP_NAME = 'request_backup'
REDIS_BACKUP_HOST = 'localhost'
REDIS_BACKUP_PORT = 6379
REDIS_BACKUP_DB = 10