- import requests
- from bs4 import BeautifulSoup
- import traceback
- import re
- def getHTMLText(url):
- try:
- user_agent = '自己的浏览器头部信息'
- headers = {'User-Agent': user_agent}
- r = requests.get(url,headers = headers,timeout = 30)
- r.raise_for_status()
- r.encoding = r.apparent_encoding
- return r.text
- except:
- return ""
- def getStockList(lst,stock_list_url):
- html = getHTMLText(stock_list_url)
- soup = BeautifulSoup(html,'html.parser')
- a = soup.find_all('a')
- for i in a:
- try:
- href = i.attrs['href']
- lst.append(re.findall(r"sh\d{6}",href)[0])
- #print(lst)
- except:
- continue
- def getStockInfo(lst,stock_info_url,fpath):
- for stock in lst:
- url = stock_info_url + stock + '.html'
- html = getHTMLText(url)
- try:
- if html =="":
- continue
- infoDict = { }
- soup = BeautifulSoup(html,'html.parser')
- stockInfo = soup.find('div',attrs = {'class':'stock-bets'})
- if stockInfo == None:
- continue
- #print(stockInfo)
- name = stockInfo.find_all(attrs={'class':'bets-name'})[0]
- #print(name)
- infoDict.update({'股票名称': name.text.split()[0]})
- keyList = stockInfo.find_all('dt')
- valueList = stockInfo.find_all('dd')
- for i in range(len(keyList)):
- key = keyList[i].text
- val = valueList[i].text
- infoDict[key] = val
- with open(fpath,'a',encoding = 'utf-8') as f:
- f.write(str(infoDict) + '\n')
- except:
- traceback.print_exc()
- continue
- def main():
- stock_list_url = 'http://quote.eastmoney.com/stocklist.html'
- stock_info_url = 'http://gupiao.baidu.com/stock/'
- output_file = 'D://Postgraduate//Python//python项目//Python网络爬虫与信息提取-中国大学MOOC//3 网络爬虫之实战//BaiduStockInfo.txt'
- slist = []
- getStockList(slist,stock_list_url)
- getStockInfo(slist,stock_info_url,output_file)
- main()
股票下载代码
最新推荐文章于 2024-10-17 15:15:44 发布