1.创建工程
scrapy startproject csdnProject
2.创建spider
进入创建的工程目录下:
cd csdnProject
根据基础模块创建spider
scrapy genspider -t basic csdnSpider csdn.net
3.编写spider
# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from csdnProject.items import CsdnprojectItem
class CsdnspiderSpider(scrapy.Spider):
# 定义变量
name = 'csdn'
allowed_domains = ['blog.csdn.net']
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"
}
# 开始爬取列表页
def start_requests(self):
for page_num in range(1, 7, 1):
url = 'http://blog.csdn.net/jeikerxiao/article/list/' + str(page_num)
yield Request(url, headers=self.headers, callback=self.parse)
# 解析列表页
# 爬取从列表页中获取的内容页
def parse(self, response):
title_items = response.xpath("//div[@id='article_list']//span[@class='link_title']")
for title_item in title_items:
# item['title'] = title_item.xpath("./a/text()").extract()[0]
extend_url = title_item.xpath("./a/@href").extract()[0]
base_url = "http://blog.csdn.net"
content_url = base_url + extend_url
yield Request(content_url, headers=self.headers, callback=self.content_parse)
# 解析内容页
def content_parse(self, response):
item = CsdnprojectItem()
# 解析标题信息
title_str = response.xpath("//div[@class='article_title']/h1/span[@class='link_title']/a/text()").extract()[0]
# 删除标题字符串中的 空格 和 换行符
item['title'] = title_str.replace('\r\n', '').replace(' ', '')
yield item
4.编写item
# -*- coding: utf-8 -*-
import scrapy
class CsdnprojectItem(scrapy.Item):
# 标题和连接
title = scrapy.Field()
url = scrapy.Field()
5.运行
查看蜘蛛列表
scrapy list
运行蜘蛛
scrapy crawl csdn