bs4查找标签属性值

soup查找

from bs4 import BeautifulSoup

soup = BeautifulSoup(open('tencent.html', encoding='utf-8'), 'lxml')
# print(soup.prettify())

# 获取所有的tr标签(列表通过下标获取元素)
# res1 = soup.find_all('tr')
# for i in res1:
#     print('='*50)
#     print(res1)

#trs = soup.select('tr')


# 获取第二个tr标签
# trs = soup.find_all('tr',limit=2)[1]
# print(trs)

#tr = soup.select('tr:nth-of-type(2)')[0]
#print(tr)

# 获取所有class为even的标签
# rs = soup.find_all('tr',class_='even')
#rs = soup.find_all('tr',attrs={'class':'even'})
#print(rs)

#trs = soup.select('tr[class="even"]')
#print(trs)

# 获取所有id和class均为test 的a标签
alist = soup.select('a[class="test"][id="test"]')
print(alist)
for a in alist:
    print(a)

#al=soup.select('a[class="test"][id="test"]')
#print(al)

# 获取所有a标签的herf属性
# alist = soup.find_all('a')
# for a in alist:
#     href = a['href']
#     href = 'https://' + href
#     print(href)

# 获取所有职位信息 纯文本
tq = soup.find_all('tr')[1:]
ms = []
# for tr in tq:
#     m = {}
#     tds = tr.find_all('td')
#
#     title = tds[0].string
#     category = tds[1].string
#     number = tds[2].string
#     city = tds[3].string
#
#     pubtime = tds[4].string
#     m[title] = tds[0].string
#     m[category] = tds[1].string
#     m[number] = tds[2].string
#     m[city] = tds[3].string
#     m[pubtime] = tds[4].string
#     ms.append(m)
#
# print(ms)

#select获取一个 select one获取多个
# trs = soup.select('tr')
# for tr in trs:
#     infos = list(tr.stripped_strings)
#     print(infos)

天气数据爬取

记得先安装相关的库,并联合pyecharts可视化最终结果

import requests
from bs4 import BeautifulSoup
from pyecharts.charts import Bar

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
    "Referer": "http://www.weather.com.cn",
}
ALL_DATA = []
def parse_weather(url):
    response  = requests.get(url,headers=headers)
    data = response.content.decode('utf-8')
    soup = BeautifulSoup(data,'html5lib')
    conMidtab = soup.find('div',class_='conMidtab')
    tables = conMidtab.find_all('table')
    for table in tables:
        trs = table.find_all('tr')[2:]
        for index,tr in enumerate(trs):
            tds = tr.find_all('td')
            # print(index,tds)
            city_td =tds[0]
            temp_td = tds[3]
            if index==0:
                city_td = tds[1]
                temp_td = tds[4]
            city_td = list(city_td.stripped_strings)[0]

            temp_td = list(temp_td.stripped_strings)[0]
            # print(city_td,temp_td)
            ALL_DATA.append({'city':city_td,'temp':temp_td})
urls = [
    'http://www.weather.com.cn/textFC/hb.shtml',
    'http://www.weather.com.cn/textFC/hd.shtml',
    'http://www.weather.com.cn/textFC/hz.shtml',
    'http://www.weather.com.cn/textFC/hn.shtml',
    'http://www.weather.com.cn/textFC/xn.shtml',
    'http://www.weather.com.cn/textFC/xb.shtml',
    'http://www.weather.com.cn/textFC/db.shtml',
    'http://www.weather.com.cn/textFC/gat.shtml',
]

for url in urls:
    parse_weather(url)

print(ALL_DATA)

# 可视化
ALL_DATA.sort(key=lambda x:x['temp'])
data = ALL_DATA[0:10]
# print(data)
cities = list(map(lambda x:x['city'], data))
max_temp = list(map(lambda x:x['temp'], data))

bar = Bar()
bar.add_xaxis(cities)
bar.add_yaxis('最高气温', max_temp)
# render 会生成本地 HTML 文件,默认会在当前目录生成 render.html 文件
# 也可以传入路径参数,如 bar.render("mycharts.html")
bar.render('weather.html')
  • 2
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值