python爬虫股票模型_Python爬虫练习之股票数据

Python版本3.7

import requests

import re

from bs4 import BeautifulSoup

import traceback

def getHTMLText(url):

try:

r = requests.get(url, timeout = 30)

r.raise_for_status()

r.encoding = r.apparent_encoding

return r.text

except:

return ""

def getStockList(lst, stockURL):

html = getHTMLText(stockURL)

soup = BeautifulSoup(html, 'html.parser')

a = soup.find_all('a')

for i in a:

try:

href = i.attrs['href']

lst.append(re.findall(r'[s][hz]\d{6}', href)[0])

except:

continue

def getStockInfo(lst, stockURL, fpath):

for stock in lst:

url = stockURL + stock + '.html'

html = getHTMLText(url)

try:

if html == "":

continue

infoDict = {}

soup = BeautifulSoup(html, 'html.parser')

stockInfo = soup.find('div', attrs = {'class':'stock-bets'})

name = stockInfo.find_all(attrs = {'class':'bets-name'})[0]

infoDict.update({'股票名称':name.text.split()[0]})

keyList = stockInfo.find_all('dt')

valueList = stockInfo.find_all('dd')

for i in range(len(keyList)):

key = keyList[i].text

val = valueList[i].text

infoDict[key] = val

with open(fpath, 'a', encoding = 'utf-8') as f:

f.write(str(infoDict) + '\n')

except:

traceback.print_exc()

continue

def main():

stock_list_url = 'http://quote.eastmoney.com/stocklist.html'

stock_info_url = 'https://gupiao.baidu.com/stock/'

output_file = 'D://BaiduStockInfo.txt'

slist = []

getStockList(slist, stock_list_url)

getStockInfo(slist, stock_info_url, output_file)

在此基础上可以进行两点优化:1.直接设定编码方式  2.增加进度条的显示

为此将代码修改如下:

def getHTMLText(url, code = 'utf-8'):

try:

r = requests.get(url, timeout = 30)

r.raise_for_status()

r.encoding = code

return r.text

except:

return ""

def getStockList(lst, stockURL):

html = getHTMLText(stockURL, 'GB2312')

soup = BeautifulSoup(html, 'html.parser')

a = soup.find_all('a')

for i in a:

try:

href = i.attrs['href']

lst.append(re.findall(r'[s][hz]\d{6}', href)[0])

except:

continue

def getStockInfo(lst, stockURL, fpath):

count = 0

for stock in lst:

url = stockURL + stock + '.html'

html = getHTMLText(url)

try:

if html == "":

continue

infoDict = {}

soup = BeautifulSoup(html, 'html.parser')

stockInfo = soup.find('div', attrs = {'class':'stock-bets'})

name = stockInfo.find_all(attrs = {'class':'bets-name'})[0]

infoDict.update({'股票名称':name.text.split()[0]})

keyList = stockInfo.find_all('dt')

valueList = stockInfo.find_all('dd')

for i in range(len(keyList)):

key = keyList[i].text

val = valueList[i].text

infoDict[key] = val

with open(fpath, 'a', encoding = 'utf-8') as f:

f.write(str(infoDict) + '\n')

count = count + 1

print('\r当前进度:{:.2f}%'.format(count*100/len(lst)), end = '')

except:

traceback.print_exc()

count = count + 1

print('\r当前进度:{:.2f}%'.format(count*100/len(lst)), end = '')

continue

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值