目录
导入库
import requests #发送网络请求
import csv
import pandas as pd
from pyecharts.charts import Bar
from pyecharts import options as opts
from pyecharts.charts import Line
from pyecharts.globals import ThemeType
from pyecharts.charts import Pie
对雪球网进行发送请求
按f12进入开发者模式
点击搜索选项随机搜索一个你需要的
这里以三六零为例
双击进入
找到头headers在标头里 有我们需要的url 并且请求方式是get
同时通过https://stock.xueqiu.com/v5/stock/screener/quote/list.json?page=1&size=30&order=desc&order_by=amount&exchange=CN&market=CN&type=sha
我们可以知道这是第一页的地址,如果我们想要爬取多页的地址只需要将page = 1 修改成page={page},将page作为变量,利用for循环,循环去爬取
file = open('data.csv',mode='a',encoding='utf-8',newline='')
csv_write=csv.DictWriter(file,fieldnames=['股票代码','股票名称','当前价','涨跌额','涨跌幅','年初至今','成交量','成交额','换手率','市盈率(TTM)','股息率','市值'])
csv_write.writeheader()
for page in range(1,56):
url = f'https://stock.xueqiu.com/v5/stock/screener/quote/list.json?page={page}&size=30&order=desc&order_by=amount&exchange=CN&market=CN&type=sha'
#发送网络请求
#伪装
headers ={
'cookie':'s=bg12ceoud3; xq_a_token=92653cab19163fc842ad5747ac2c2cdee44c935e; xq_r_token=0f90d6ef86e3c742498591af7860096fe2e3fc86; xq_id_token=eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJ1aWQiOi0xLCJpc3MiOiJ1YyIsImV4cCI6MTY4NzczOTYwMiwiY3RtIjoxNjg1ODU4NDc2MzcwLCJjaWQiOiJkOWQwbjRBWnVwIn0.I3Ca8N5CigCzk-zJnkhBtKDHdqzhs59GEF7kxrPPAOyB1yTp0SZLQVMnS5cE85WrklmzKkp1w1Pz8d_1pzo_rN1c99RKZhcDPupadTACmdZpleweIqLgNb_Z2vX9syJp_Kk2qizaEmU-qh_FPngHvk3UDzkUMp_1FqtyUnXl5E6v8byWdTXcv17hcygC9biOFECw2w22jiyHrYJub7OJYe5A6D85gCqzgNxeOyeOt0i18kQuGeyvDmc5yfCcYakTGtSf6v_-S8z7faFiXwZTqMWvZURvGWxG416xjc9H32gDqWFG-zUtx_HNZ3jdpn7SHOPGzv28g5xnkJk-uioGPQ; Hm_lvt_1db88642e346389874251b5a1eded6e3=1685858531; u=321685858530935; device_id=919dd2375604c02498437383a183e8c9; Hm_lpvt_1db88642e346389874251b5a1eded6e3=1685862751'
,'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.57'
}
response = requests.get(url,headers=headers)
print(requests)
print("正在爬取%d页"%page)
爬取想要的东西
我们需要爬取的是下面这些东西:
我们可以在响应里面找到参数值对应
例如
data1 = data['symbol']
data2 = data['name']
data3 = data['current']
这三个指的就是
以此类推将我们所有参数找到后
使用西csv进行保存,以上就是所有股票爬取的内容
csv_write.writerow(data_dict)
这部分的代码如下:
#判断爬取类型
content_type = response.headers.get('Content-Type')
#print(content_type)
json_data = response.json()
#print(json_data)
# #数据解析
data_list = json_data['data']['list']
for data in data_list:
data1 = data['symbol']
data2 = data['name']
data3 = data['current']
data4 = data['chg']
data5 = data['percent']
data6 = data['current_year_percent']
data7 = data['volume']
data8 = data['amount']
data9 = data['turnover_rate']
data10 = data['pe_ttm']
data11 = data['dividend_yield']
data12 = data['market_capital']
data_dict={
'股票代码':data1,
'股票名称':data2,
'当前价':data3,
'涨跌额':data4,
'涨跌幅':data5,
'年初至今':data6,
'成交量':data7,
'成交额':data8,
'换手率':data9,
'市盈率(TTM)':data10,
'股息率':data11,
'市值':data12,
}
#数据存取
csv_write.writerow(data_dict)
file.close()
print("爬取完毕!")
对股票数据的预处理和可视化
#数据处理
data_df=pd.read_csv('data.csv')
#print(data_df)
#数据处理 空值,异常值,重复值
df = data_df.fillna(0)
#成交量柱状图
df1=df[['股票名称','成交量']]
df1_1=df1.iloc[:50]
c1 = (Bar()
.add_xaxis(list(df1_1['股票名称'].values))
.add_yaxis('股票成交量情况', df1_1['成交量'].values.tolist())
.set_global_opts(
title_opts=opts.TitleOpts(title="成交量图表"),
datazoom_opts=opts.DataZoomOpts()
)
)
c1.render("chart.html")
print("成交量柱形图表生成成功!")
上述对数据进行了空值的填充,并且将成交量和股票划分出来,绘制了柱形图。