# -*- coding: utf-8 -*-
import scrapy
from urllib import parse
class BaiduSoSpider(scrapy.Spider):
name = "baidu_so"
allowed_domains = ["www.baidu.com"]
start_urls = ['http://www.baidu.com/s?wd=%E7%A3%81%E5%8A%9B%E9%93%BE%E6%8E%A5%E6%90%9C%E7%B4%A2%E5%99%A8']
def parse(self, response):
post_urls = response.xpath("//*[@id=\"page\"]/a/@href").extract()
print(post_urls)
for post_url in post_urls:
yield scrapy.Request(url=parse.urljoin(response.url, post_url), callback=self.parse_detail, dont_filter=True)
def parse_detail(self, response):
my_urls = response.xpath("//*/div[2]/a[1]/text()").extract()
print(my_urls)
for my_url in my_urls:
my_url.replace("\xa0", "")
print(my_url)
爬百度搜索
最新推荐文章于 2024-04-23 18:06:15 发布