# -*- coding: utf-8 -*-
# 中国产业竞争情报网
# http://www.chinacir.com.cn/hyzx/
import scrapy
from urllib.parse import urljoin
class ZgcyjzqbwSpider(scrapy.Spider):
name = 'zgcyjzqbw'
allowed_domains = ['www.chinacir.com.cn']
start_urls = ['http://www.chinacir.com.cn/hyzx/']
headers = {
"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'
}
# 抓取页数
page_number = 1
def parse(self, response):
label_list_urls = response.xpath('//*[@id="hy_index"]/ul/div/ul/li/a/@href').extract()
label_list_label = response.xpath('//*[@id="hy_index"]/ul/div/ul/li/a/text()').extract()
label_urls = [(urljoin(self.start_urls[0], i), label_list_label[index]) for index, i in enumerate(label_list_urls)]
for label_url in label_urls:
# 传参数 meta 格式是字典
yield scrapy.Request(url=label_url[0], meta={'industry':label_url[1]}, callback=self.manage_list)
def manage_list(self,response):
print(response.url)
# 获取传过来的参数
print(response.meta.get('industry'))
if __name__ == '__main__':
from scrapy import cmdline
cmdline.execute('scrapy crawl zgcyjzqbw'.split())