目录
1.安装依赖
pip install peewee -i https://pypi.tuna.tsinghua.edu.cn/simple
pip install pymysql -i https://pypi.tuna.tsinghua.edu.cn/simple
2.构建模型:编辑items.py
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import dataclasses
from dataclasses import dataclass
from peewee import MySQLDatabase, TextField, Model
db_mysql = MySQLDatabase('fast_generator', user='root', password='123456', host='localhost', port=3306)
@dataclass
class NoticeItem(Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# 标题
title = TextField()
# 内容
content = TextField()
class Meta:
database = db_mysql
# 如果类名和表名驼峰相同,则此行可略;
table_name = "notice"
3.传入数据:编辑Spider.py
import scrapy
from scrapy.http import HtmlResponse
from testproject.items import NoticeItem
class TestspiderSpider(scrapy.Spider):
name = 'testspider'
allowed_domains = ['ssr1.scrape.center']
start_urls = ['https://ssr1.scrape.center/']
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url, callback=self.parse)
def parse(self, response: HtmlResponse, **kwargs):
yield NoticeItem(title='测试标题', content='测试内容')
4.保存数据:修改pipelines.py
from testproject.items import NoticeItem
class TestprojectPipeline:
def process_item(self, item, spider):
if isinstance(item, NoticeItem):
item.save()
return item
5.开启管道:修改settings.py
ITEM_PIPELINES = {
'testproject.pipelines.TestprojectPipeline': 300,
}