【代码备忘录】scrapy爬虫代码片段之xpath

def parse(self, response):
    groups = response.xpath('//*[@id="index"]/div[4]/div/div[1]/article')
    for group in groups:
        item = GeekParkItem()
        item['title'] = group.xpath('div[1]/a[2]/h3/text()').extract_first()
        item['report'] = group.xpath('div[1]/p/text()').extract_first()
        item['author'] = group.xpath('div[2]/a/text()').extract_first()
        item['posttime'] = group.xpath('div[1]/div/text()').extract_first()
       #print(item['title'], item['report'], item['author'], item['posttime'])
        item['link'] = group.xpath('div[1]/a[2]/@href').extract_first()
        url = response.urljoin(item['link'])
        yield scrapy.Request(url, callback=self.parse_article)
def parse_article(self, response):
        item = GeekParkItem()
        detail = response.xpath('//div[@class="main-wrap"]/article')
        for paper in detail:
            item['title'] = paper.xpath('header[@class="post-header"]/h1/text()').extract_first()
            item['author'] = paper.xpath('header[@class="post-header"]/div[@class="user-info"]/a/span/text()').extract_first()
            item['posttime'] = paper.xpath('header[@class="post-header"]/div[@class="user-info"]/span/text()').extract_first()
            text_list = paper.xpath('div[@id="article-body"]/div/p/text()').extract()
            item['text'] = "".join(text_list).strip()
            print(item)
            yield item

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值