1.实例编写
并没有说这个页面是怎么找到的,2020.3.14需要加下划线
https://quote.eastmoney.com/stock_list.html
当查找雪球网的时候:比如sh501007
https://xueqiu.com/k?q=sh501007
点击进入页面后
https://xueqiu.com/S/SH501007
这里大小写都可以
请求头
真第麻烦:爬取成功,但出了一大堆问题,css选择器查不到对应标签【我还以为是我写错了,但最后查标签name都查不到(只能查到一部分标签,想要的查不到)】,等问题解决再补吧
# -*- coding: utf-8 -*-
import scrapy
import re
class StocksSpider(scrapy.Spider):
name = 'stocks'
allowed_domains = ['xueqiu.com']
start_urls = ['https://quote.eastmoney.com/stock_list.html']
def parse(self, response):
for href in response.css('a::attr(href)').extract():
try:
stock=re.findall(r"[s][hz]\d{6}",href)[0]
url ='https://xueqiu.com/k?q='+stock
#变成生成器
#当查找雪球网的时候:比如sh501007
#https: // xueqiu.com/k?q = sh501007
#点击进入页面后
#https: // xueqiu.com/S/SH501007
#这里大小写都可以
headers = {
'cookie': 'miid=1296267545453648768; t=b4d385e2145f596a67961e4dd08e9a8f; cna=pqwcFXxbJjACAXWIA7AFEfA8; thw=cn; tracknick=tb487881011; lgc=tb487881011; _cc_=UIHiLt3xSw%3D%3D; tg=0; enc=%2FTqA3gAexHOKU0cyPYbSWM1pGS8vgnlEK3EMnkYd2T%2BlB%2BJh18hxryREG48c%2BYmdk7yfvbSMCBDQExP23eUm3w%3D%3D; hng=CN%7Czh-CN%7CCNY%7C156; x=e%3D1%26p%3D*%26s%3D0%26c%3D0%26f%3D0%26g%3D0%26t%3D0%26__ll%3D-1%26_ato%3D0; cookie2=19ef67fdfc3f433776e5e9cafaf6a8ea; v=0; _tb_token_=08b7e3e7e183; _m_h5_tk=62383241b06635c64b07942e50e47d9d_1562004576179; _m_h5_tk_enc=0465da475a8335f8fd8d9ef6bb280a71; unb=4235284520; sg=101; _l_g_=Ug%3D%3D; skt=c571ae590b7580cb; cookie1=AnQIvxj44XbyESoVNTVtwfJRB8W%2BbAPV%2BVZMWhAghjk%3D; csg=23f40375; uc3=vt3=F8dBy34cs3fc7ebsEqk%3D&id2=Vy67WD1MZomrsw%3D%3D&nk2=F5RBzeKtOazPVJc%3D&lg2=UtASsssmOIJ0bQ%3D%3D; existShop=MTU2MTk5NTE3MQ%3D%3D; dnk=tb487881011; _nk_=tb487881011; cookie17=Vy67WD1MZomrsw%3D%3D; mt=ci=21_1; uc1=cookie14=UoTaGdT0tHdY5w%3D%3D&lng=zh_CN&cookie16=VT5L2FSpNgq6fDudInPRgavC%2BQ%3D%3D&existShop=false&cookie21=VFC%2FuZ9aj3yE&tag=8&cookie15=UIHiLt3xD8xYTw%3D%3D&pas=0; whl=-1%260%260%261561995222497; isg=BHNzJqpkKgCWtOesccf13ZRUAnddACwkF8iwAyUQzxLJJJPGrXiXutG23hRvn19i; l=bBMxcfBPv539-OTkBOCanurza77OSIRYYuPzaNbMi_5K-6T_2qQOkAuQFF96Vj5Rs4YB4G2npwJ9-etkq',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
}
yield scrapy.Request(url,callback=self.parse_stock,headers=headers)
#callback给出了一个处理函数
except:
continue
def parse_stock(self,response):
#最终要返回信息给item_pipline
infoDict={}
#找到拥有 股票所拥有 的属性 的区域
#
#stockInfo = response.css('.container-sm.float-left.stock__main')
#
#self.log(str(stockInfo))
name =response.css('span::text').extract()
name2=response.css('p::text').extract()
fname='Xueqiu.txt'
with open(fname,'a') as f:
f.write(str(response)+'\n')
f.write('span'+str(name)+'\n')
f.write('p'+str(name2)+'\n')
f.close()
self.log('Saved file %s.' % str(response))
# self.log(str(keyList))