#导入各种需要用到的库
import requests
from bs4 import BeautifulSoup
import traceback
import re
# 定义第一个函数,获取网页的数据
def getHTMLText(url):
try:
r = requests.get(url,timeout = 30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return "getHTMLText函数异常"
# 定义第二个函数,对获取到的网页数据处理,循环遍历
def getStockList(lst,stockURL):
html = getHTMLText(stockURL)
# BeautifulSoup具有解析遍历维护标签树的功能
soup = BeautifulSoup(html,'html.parser')
# 查找所有的a标签,返回列表
a = soup.find_all('a')
# 遍历列表
for i in a:
try:
# 返回属性是href的值
href = i.attrs['href']
# 对值进行正则匹配,符合的添加到列表当中
lst.append(re.findall(r"[s][hz]\d{6}",href)[0])
except:
# 如果异常继续
continue
# 定义第三个函数
def getStockInfo(lst,stockURL,fpath):
# 遍历各个股票代码
for stock in lst:
# 各个股票的详情页
url = stockURL + stock + ".html"
# 获取页面信息
html = getHTMLText(url)
try:
if html =='':
continue
# 创建空字典
infoDict = {}
soup = BeautifulSoup(html,'html.parser')
# 查找div标签,并且属性是'class':'stock-bets'的第一个标签
stockInfo = soup.find('div',attrs={'class':'stock-bets'})
# 返回列表,属性是'class':'bets-name'的所有标签的第一个标签
name = stockInfo.find_all(attrs={'class':'bets-name'})[0]
#将股票名称添加到字典中,这的.text和.string有区别,区别是当有子孙节点时.string不能返回值
infoDict.update({'股票名称':name.text.split()[0]})
# 查找所有的信息名称和值
keyList = stockInfo.find_all('dt')
valueList = stockInfo.find_all('dd')
# 遍历信息名称和值并且添加到字典中,存放到文件当中
for i in range(len(keyList)):
key = keyList[i].text
val = valueList[i].text
infoDict[key] = val
with open(fpath,'a',encoding='utf-8') as f:
f.write(str(infoDict) + '\n')
# 进度条,命令行才可以看到
count = count+1
print('\r当前速度:{:.2f}%'.format(count*100/len(lst)),end='')
except:
count = count + 1
print('\r当前速度:{:.2f}%'.format(count * 100 / len(lst)), end='')
traceback.print_exc()
continue
def main():
stock_list_url = ""
stock_info_url = ""
output_file = 'E:\pycharmProjects\pachonglianxi\StockInfo.txt'
slist = []
getStockList(slist,stock_list_url)
getStockInfo(slist,stock_info_url,output_file)
main()