Python中cookie登录基金账户获取账户金额以追加写入方式导出到excel【心血付出】

以下代码聚集了python学习以来的最高精华,很多百度、csdn和51To里都翻遍了,不少代码在其他代码基础上测试了n遍才运行通过,看似很简单,但实际运行不是这里报错就是哪里bug,熬了很多个通宵和日夜才弄粗来,现在免费拿出来供大家学习参考使用!

此篇的推出,也是作为业余爱好的本人学习和实践的告一段落;

后续还好陆续涉及Excel中VBA,C#,ASP.net,python,Tableau编程总结推出,感谢大家关注,编程这玩意上瘾,做了一个功能还想做更高级的,没完没了,太累了,还好不是这个行业的,不然非累趴下不可;

其实本人在二十四五岁的时候也曾想过转行做编程,哈哈,那会就算不发工资也愿意,被一句“程序员35岁的时候就失业了”这句江湖传言误导了,现在想想也是十多年前的事了,时光如箭、岁月如梭!

好买网完整代码:

from unicodedata import decimal
import requests
from bs4 import BeautifulSoup
from datetime import datetime
import json
import xlwt
import xlwings as xw

url = ' https://i.howbuy.com/member/property/index.htm'
session = requests.session()
cookie = 'Cookie: __hutmz=268394641.1638712023.1.1.hutmcsr=(direct)|hutmccn=(direct)|hutmcmd=(none); __hutmmobile=6291F9B9-A916-44D6-BFFE-910AC7FDE2DF; _ga=GA1.2.2010464580.1638712024; USER_INFO_COOKIE=8000283184; USER_SALT_COOKIE=a3e87ddbaba546326a620d29b9433fda; SESSION=55fd09d5-eaf4-45bb-b968-6863a1bed9d7; __hutma=268394641.36887752.1638712023.1642219388.1643520565.3; __hutmc=268394641; _hb_pgid=; Hm_lvt_d7da8e068ed3fc9fddf36e1474d491a2=1642219388,1643520566; _gid=GA1.2.1215252369.1643520566; OZ_SI_1497=sTime=1643520564&sIndex=6; OZ_1U_1497=vid=v1acc2d7e57a04.0&ctime=1643520798&ltime=1643520585; OZ_1Y_1497=erefer=-&eurl=https%3A//i.howbuy.com/login/login.htm%3FtargetUrl%3Dhttps%3A//i.howbuy.com/member/property/index.htm&etime=1643520564&ctime=1643520798&ltime=1643520585&compid=1497; __hutmb=268394641.4.10.1643520565; Hm_lpvt_d7da8e068ed3fc9fddf36e1474d491a2=1643520799'

headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36',
    'Cookie': cookie
}
res = session.get(url=url, headers=headers)
# soup = BeautifulSoup(res.text, 'lxml')
# 'Host': 'www.douban.com',
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
# 'Connection': 'keep-alive',
with open('rr.html', 'w', encoding='utf-8') as f:
    f.write(res.content.decode())

html = BeautifulSoup(res.text, 'html.parser')
# html = page.read()
# print(html )
html.list=html.find_all('div',class_='typBox mt20')
# print(html.list)


fund=[]
for item in html.list:
    # print(item)
    fundName=item.find('p',class_='title').text.strip(',').strip('公募')
    fundSum = item.find('div',class_='lt w160').p.text.strip(',')[0:].replace(',','')
    try:
        fundProfit=item.find('p',class_='cRed').text.strip(',')[0:].replace(',','')
    except:
        fundProfit = item.find('p', class_='cGreen').text.strip(',')[0:].replace(',', '')

    print(fundName,fundSum,fundProfit,sep='/')
    # print(item.find_all('div',class_='clearfix mt20')[0])
    print(fundName)
    fund.append({
        'fundName': fundName,
        'fundSum': float(fundSum),
        'fundProfit': float(fundProfit)
    })
    print( "写入正常")


with open('.fund.json', 'w', encoding='utf-8')as f:
    json.dump(fund, f, indent=1, ensure_ascii=False)

    # 读取json
with open(".fund.json", 'r', encoding='utf-8') as f:
    data = json.load(f)


# 创建一个workbook 设置编码
app=xw.App(visible=False,add_book=False)
new_workbook=xw.Book()
# workbook = xlwt.Workbook(encoding='utf-8')
# 创建一个worksheet
new_worksheet=new_workbook.sheets.add('Worksheet')
app.display_alerts=False
app.screen_updating=False
# worksheet = workbook.add_sheet('Worksheet')
title = ["序号","平台","分类","基金名称","基金代码", "金额","份额", "收益","收益率","投资日期","记录"]
new_worksheet['A1'].value=title
# for i in range(len(title)):
#     worksheet.write(0, i, title[i])
    # 写入excel参数对应 行, 列,值
    # print(data[i]("fundSum"))
for i in range(len(data)):
    try:
        new_worksheet.cells[i+1,0].value=i+1
        new_worksheet.cells[i + 1, 1].value = "好买网"
        new_worksheet.cells[i+1,3].value=data[i]['fundName']
        new_worksheet.cells[i+1, 5].value = data[i]["fundSum"]
        new_worksheet.cells[i+1, 7].value = data[i]["fundProfit"]
        print(str(i),"写入正常")
    except:
        print(str(i),"写入异常")
new_worksheet.autofit()
new_workbook.save('Excel.xlsx')
new_workbook.close()
app.quit()
# worksheet.write(i + 1, 0, label=i+1)#序号
# worksheet.write(i + 1, 3, label=data[i]['fundName'])  # label=data[1]["fundName"])z中的label可以不用
# worksheet.write(i + 1, 5, label=data[i]["fundSum"]*1)#float(str(data[i]["fundSum"].strip('\t').split())))
# worksheet.write(i + 1, 7, label=data[i]["fundProfit"]*1)#float(str(data[i]["fundProfit"].strip('\t').split())))
# workbook.save('Excel.xls')

# def format(x):
#     try:
#         return float(x)
#     except ValueError:
#         x = 0.0
#         return x

雪球网蛋卷基金追加数据写入Excel,为读取本地html

本地html是直接从网页里导出来的,因为他们家cookie一直变;

from lxml.html import fromstring
from bs4 import BeautifulSoup
import xlwt
import json
import openpyxl
import xlwings  as xw
from pip._vendor.colorama import win32

from xlwings import Range, constants

with open('eggfund.html', 'r', encoding='utf-8') as f:
    # Soup = BeautifulSoup(f, 'lxml')
    html = BeautifulSoup(f, 'html.parser')
    html.list = html.find_all('div', class_='inner')
    # print(html.list)
fund = []

for i, item in enumerate(html.list):
    try:
        fundName = item.find('div', class_='p-name').text.strip()
        fundSum = item.find_all('div', class_='small-box')[0].contents[2].string.strip().replace(',',
                                                                                                 '')  # [1].contents[1].string#获取第二tr标签下的第二个元素
        fundProfit = item.find_all('div', class_='col2')[0].contents[0].string.strip()[1:]  # 获取第二tr标签下的第二个元素
        # print(fundName,fundSum,fundProfit,sep='/')
        # print(len(fundSum))
        fund.append({
            'fundName': fundName if fundName else '',
            'fundSum': float(fundSum) if float(fundSum) else '',
            'fundProfit': float(fundProfit) if float(fundProfit) else ''
        })

    except:
        print(i)

with open('.fund.json', 'w', encoding='utf-8')as f:
    json.dump(fund, f, indent=1, ensure_ascii=False)

# 读取json
with open(".fund.json", 'r', encoding='utf-8') as f:
    data = json.load(f)

# # 创建一个workbook 设置编码
# workbook = xlwt.Workbook(encoding='utf-8')
# # 创建一个worksheet
# worksheet = workbook.add_sheet('Worksheet')
# title = ["序号", "平台", "分类", "基金名称", "基金代码", "金额", "份额", "收益", "收益率", "投资日期", "记录"]
# for i in range(len(title)):
#     worksheet.write(0, i, title[i])
app = xw.App(visible=False, add_book=False)
# wb=openpyxl.load_workbook('Excel.xlsx')
# sh = wb['Worksheet']
wb = app.books.open('Excel.xlsx')
sh = wb.sheets['Worksheet']
# sh.range('A1').value=100
# print(sh.range('A1').value)
# print(sh.used_range.rows.count)
j = sh.used_range.rows.count  # 获取最后一行数据,很费劲啊,虽然学习过vba
for i in range(len(data)):
    try:
        sh.cells[i + j, 0].value = i + j
        sh.cells[i + j, 1].value = "蛋卷基金"
        sh.cells[i + j, 3].value = data[i]['fundName']
        sh.cells[i + j, 5].value = data[i]["fundSum"]
        sh.cells[i + j, 7].value = data[i]["fundProfit"]
        print(str(i), "写入正常")
    except:
        print(str(i), "写入异常")
wb.save()
wb.close()
app.quit()
#     # 写入excel参数对应 行, 列,值
#     # print(data[i]("fundSum"))
# for i in range(len(data)):
#     worksheet.write(i + 1, 0, label=i+1)  # 序号
#     worksheet.write(i + 1, 3, label=data[i]['fundName'])  # label=data[1]["fundName"])z中的label可以不用
#     worksheet.write(i + 1, 5, label=data[i]["fundSum"])
#     worksheet.write(i + 1, 7, label=data[i]["fundProfit"])
#     workbook.save('Excel.xls')

同花顺爱基金完整代码:


from lxml.html import fromstring
from bs4 import BeautifulSoup
import xlwt
import json
import xlwings  as xw

with open('aijijinFund.html', 'r' , encoding='utf-8') as f:
    # Soup = BeautifulSoup(f, 'lxml')
    html = BeautifulSoup(f, 'html.parser')
    html.list=html.find_all('div',class_='box_cont_0')#有包含关系,取box_cont_0 cb回漏掉最后一个box_cont_0 cb nom
    # print(html.list)
fund=[]

for item in html.list:
    # print(item)
    # print(item.find('strong', class_='fund_name fl'))
    fundName=item.find('strong',class_='fund_name fl').a.text.strip('').split('(')[0]
    fundName_a = item.find('strong', class_='fund_name fl').a.text.strip('').split('(')[1]#获取基金代码
    fundSum = item.find_all('tr')[1].contents[1].string#获取第二tr标签下的第二个元素
    fundProfit = item.find_all('tr')[1].contents[4].string[1:]#去掉+-号#获取第二tr标签下的第二个元素
    # print(fundName,fundSum,fundProfit,sep='/')
    # print(fundName_a[0:6])
    # print(type(fundProfit))
    fund.append({
        'fundName':fundName,
        'fundSum':float(fundSum) ,#数值型
        'fundProfit':float(fundProfit)
    })

with open('.fund.json', 'w', encoding='utf-8')as f:
    json.dump(fund, f, indent=1, ensure_ascii=False)

# 读取json
with open(".fund.json", 'r', encoding='utf-8') as f:
    data = json.load(f)

app=xw.App(visible=False,add_book=False)
# wb=openpyxl.load_workbook('Excel.xlsx')
# sh = wb['Worksheet']
wb=app.books.open('Excel.xlsx')
sh=wb.sheets['Worksheet']
# sh.range('A1').value=100
# print(sh.range('A1').value)
# print(sh.used_range.rows.count)
j=sh.used_range.rows.count#获取最后一行数据,很费劲啊,虽然学习过vba
for i in range(len(data)):
    try:
        sh.cells[i+j,0].value=i+j
        sh.cells[i + j, 1].value = "爱基金"
        sh.cells[i+j,3].value=data[i]['fundName']
        sh.cells[i+j, 5].value = data[i]["fundSum"]
        sh.cells[i+j, 7].value = data[i]["fundProfit"]
        print(str(i),"写入正常")
    except:
        print(str(i),"写入异常")
wb.save()
wb.close()
app.quit()

'''
#创建一个workbook 设置编码
workbook = xlwt.Workbook(encoding='utf-8')
# 创建一个worksheet
worksheet = workbook.add_sheet('Worksheet')
title = ["序号","平台","分类","基金名称","基金代码", "金额","份额", "收益","收益率","投资日期","记录"]
for i in range(len(title)):
    worksheet.write(0, i, title[i])
    # 写入excel参数对应 行, 列,值
    # print(data[i]("fundSum"))
for i in range(len(data)):
    worksheet.write(i + 1, 0, label=i)#序号
    worksheet.write(i + 1, 3, label=data[i]['fundName'])  # label=data[1]["fundName"])z中的label可以不用
    worksheet.write(i + 1, 5, label=data[i]["fundSum"])
    worksheet.write(i + 1, 7, label=data[i]["fundProfit"])
    workbook.save('Excel.xls')
    '''

--编程学习和实操告一段落--

天天基金网完整代码:

from lxml.html import fromstring
from bs4 import BeautifulSoup
import xlwt
import json
import xlwings  as xw

with open('tiantianFund.html', 'r' , encoding='utf-8') as f:
    # Soup = BeautifulSoup(f, 'lxml')
    html = BeautifulSoup(f, 'html.parser')
    html.list=html.find_all('tr')#有包含关系,取box_cont_0 cb回漏掉最后一个box_cont_0 cb nom
    # print(html.list)
fund=[]
for item in html.list:
    try:
        # print(item)
        # print(item.find('strong', class_='fund_name fl'))
        fundName=item.find('p',class_='f16').a.text.strip('').split('(')[0]#获取基金名称
        fundCode = item.find('p',class_='f16').a.text.strip('').split('(')[1].replace(')','')#获取基金代码
        fundSum = item.find('td',class_='tor f16 desc').text.strip('有在途交易').replace(',','')#获取基金金额
        fundProfit = item.find('span',attrs={'class':'f16'}).text.replace('-','').replace(',','')
        # print(fundName,fundSum,fundProfit,sep='/')
        # print(fundName_a[0:6])
        # print(type(fundProfit))
        print(fundProfit)
        fund.append({
            'fundName':fundName,
            'fundCode':str(fundCode),
            'fundSum':float(fundSum) ,#数值型
            'fundProfit':float(fundProfit)
        })
    except:
        continue


with open('.fund.json', 'w', encoding='utf-8')as f:
    json.dump(fund, f, indent=1, ensure_ascii=False)
# 读取json
with open(".fund.json", 'r', encoding='utf-8') as f:
    data = json.load(f)

app=xw.App(visible=False,add_book=False)
# wb=openpyxl.load_workbook('Excel.xlsx')
# sh = wb['Worksheet']
wb=app.books.open('Excel.xlsx')
sh=wb.sheets['Worksheet']
# sh.range('A1').value=100
# print(sh.range('A1').value)
# print(sh.used_range.rows.count)
j=sh.used_range.rows.count#获取最后一行数据,很费劲啊,虽然学习过vba
for i in range(len(data)):
    try:
        sh.cells[i+j,0].value=i+j
        sh.cells[i + j, 1].value = "天天基金"
        sh.cells[i+j,3].value=data[i]['fundName']
        sh.cells[i + j, 4].value = data[i]['fundCode']
        sh.cells[i+j, 5].value = data[i]["fundSum"]
        sh.cells[i+j, 7].value = data[i]["fundProfit"]
        print(str(i),"写入正常")
    except:
        print(str(i),"写入异常")
wb.save()
wb.close()
app.quit()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

品尚公益团队

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值