利用scrapy框架爬取网易新闻排行榜

wyxw.py中代码

# -*- coding: utf-8 -*-
import scrapy
from ..items import WyxwItem

class WyxwSpider(scrapy.Spider):
    name = 'wyxw'
    allowed_domains = ['news.163.com']
    start_urls = ['http://news.163.com/special/0001386F/rank_whole.html']

    def parse(self, response):


        item = WyxwItem()
        xq_title = response.xpath('//table/tr/td[1]/a/text()').extract()
        item['title'] = xq_title

        print(xq_title)
        xq_url = response.xpath('//table/tr/td[1]/a/@href').extract()
        item['xq_url'] = xq_url
        print(xq_url)
        xq_djl = response.xpath('//table/tr/td[2]/text()').extract()
        print(xq_djl)
        item['dj'] = xq_djl
        for i in range(0, len(xq_title)):

            item['title'] = xq_title[i]
            item['xq_url'] = xq_url[i]
            item['dj'] = xq_djl[i]
            yield item


items.py文件代码

class WyxwItem(scrapy.Item):
    title = scrapy.Field()
    xq_url = scrapy.Field()
    dj = scrapy.Field()
    def get_insert_sql(self):
        sql = 'insert into wyxw_test(title,xq_url,dj) values (%s,%s,%s)'
        data = (self['title'],self['xq_url'],self['dj'])
        return (sql,data)

pipelines.py代码

class MysqlProjectPipeline(object):
    def process_item(self, item, spider):
        (insert_sql,data) = item.get_insert_sql()
        myhelper = MysqlHelper()
        myhelper.execute_modify_sql(insert_sql,data)

其他文件配置看scrapy框架基本设置

基于Python Scrapy实现的网易云音乐music163数据爬取爬虫系统 含全部源代码 基于Scrapy框架网易云音乐爬虫,大致爬虫流程如下: - 以歌手页为索引页,抓取到全部歌手; - 从全部歌手页抓取到全部专辑; - 通过所有专辑抓取到所有歌曲; - 最后抓取歌曲的精彩评论。 数据保存到`Mongodb`数据库,保存歌曲的歌手,歌名,专辑,和热评的作者,赞数,以及作者头像url。 抓取评论者的头像url,是因为如果大家喜欢,可以将他做web端。 ### 运行: ``` $ scrapy crawl music ``` #!/usr/bin/python #-*-coding:utf-8-*- import time from pprint import pprint from scrapy.spider import BaseSpider from scrapy.selector import HtmlXPathSelector from scrapy.http import Request from woaidu_crawler.items import WoaiduCrawlerItem from woaidu_crawler.utils.select_result import list_first_item,strip_null,deduplication,clean_url class WoaiduSpider(BaseSpider): name = "woaidu" start_urls = ( 'http://www.woaidu.org/sitemap_1.html', ) def parse(self,response): response_selector = HtmlXPathSelector(response) next_link = list_first_item(response_selector.select(u'//div[@class="k2"]/div/a[text()="下一页"]/@href').extract()) if next_link: next_link = clean_url(response.url,next_link,response.encoding) yield Request(url=next_link, callback=self.parse) for detail_link in response_selector.select(u'//div[contains(@class,"sousuolist")]/a/@href').extract(): if detail_link: detail_link = clean_url(response.url,detail_link,response.encoding) yield Request(url=detail_link, callback=self.parse_detail) def parse_detail(self, response): woaidu_item = WoaiduCrawlerItem() response_selector = HtmlXPathSelector(response) woaidu_item['book_name'] = list_first_item(response_selector.select('//div[@class="zizida"][1]/text()').extract()) woaidu_item['author'] = [list_first_item(response_selector.select('//div[@class="xiaoxiao"][1]/text()').extract())[5:].strip(),] woaidu_item['book_description'] = list_first_item(response_selector.select('//div[@class="lili"][1]/text()').extract()).strip() woaidu_item['book_covor_image_url'] = list
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值