功能描述
• 目标:获取上证A股股票名称和交易信息
• 输出:保存到文件中
• 技术路线:requests + bs4 + re
此处选取股票信息静态存储在HTML页面中的页面进行爬取
程序结构设计
(1)首先得到股票代码,此处选取证券之星获得上证A股股票代码
(2)根据股票列表逐个到网易财经获取个股详细信息
(3)将结果存储到文件
代码实现
import re
import time
import traceback
import requests
from bs4 import BeautifulSoup as bs
# 获取网页html文本
def getHTMLText(url):
try:
kv = {
'user-agent': 'Mozilla/5.0'
}
r = requests.get(url, headers=kv, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return ""
# 获取股票代码数据存进列表中
def getStockList(lst, stockURL):
html = getHTMLText(stockURL)
soup = bs(html, 'html.parser')
ul = soup.find('ul', attrs={"id": "index_data_0"})
for a in ul('a'):
try:
href = a.attrs['href']
stockCode = re.search(r'\d{6}', href).group(0)
if stockCode and stockCode not in lst:
lst.append(stockCode)
except:
continue
# 获取列表中每支股票的详细信息并存入文本文档
def getStockInfo(lst, stockURL, fpath):
with open(fpath, 'w', encoding='utf-8') as obj:
obj.write('')
count = 0
for stock in lst:
url = stockURL + '0' + stock + '.html'
html = getHTMLText(url)
try:
if html == '':
continue
infoDict = {}
soup = bs(html, 'html.parser')
div = soup.find('div', attrs={'class': 'relate_stock clearfix'})
script = div('script')
info = script[0].string.strip().split(',')
infoDict['股票名称'] = eval(re.search(r'name\: \'.*\'', info[0]).group(0).split(':')[1])
infoDict['股票代码'] = eval(re.search(r'code\: \'\d{6}\'', info[1]).group(0).split(":")[1])
infoDict['现价'] = eval(re.search(r'price\: \'.*\'', info[2]).group(0).split(":")[1])
infoDict['涨跌幅'] = re.search(r'change\: \'.*%', info[3]).group(0).split("'")[1]
infoDict['昨收'] = eval(re.search(r'yesteday\: \'.*\'', info[4]).group(0).split(":")[1])
infoDict['今开'] = eval(re.search(r'today\: \'.*\'', info[5]).group(0).split(":")[1])
infoDict['最高'] = eval(re.search(r'high\: \'.*\'', info[6]).group(0).split(":")[1])
infoDict['最低'] = eval(re.search(r'low\: \'.*\'', info[7]).group(0).split(":")[1])
with open(fpath, 'a', encoding='utf-8') as obj:
obj.write(str(infoDict) + '\n')
count += 1
print("\r当前进度:{:.2f}%".format(count * 100 / len(lst)), end="")
except:
traceback.print_exc()
print("\r当前进度:{:.2f}%".format(count * 100 / len(lst)), end="")
continue
def main():
tick1 = time.time()
stock_list_url = "http://quote.stockstar.com/stock/stock_index.htm"
stock_info_url = "http://quotes.money.163.com/"
outputPath = 'F:\\PythonCrawler\\study\\re\\stockData.txt' # 此处需要修改成你自己的存储位置
slist = []
getStockList(slist, stock_list_url)
getStockInfo(slist, stock_info_url, outputPath)
tick2 = time.time()
interval = tick2 - tick1
if interval > 60:
print("\n程序运行了{0:.2f}分钟".format(interval / 60))
else:
print("\n程序运行了{0:.2f}秒".format(interval))
main()
如果需要将代码复制到本地测试,一定要将存储地址改为你自己的存储地址。最后PS:该程序由于使用的bs4,所以爬取速度会较慢,亲测爬完一千多支股票花了40多分钟