异常爬虫demo、 find函数和findAll函数 以及正则表达式查找元素

代码位于书的第1-2章

# from urllib.request import urlopen
# from bs4 import BeautifulSoup
# html = urlopen("http://pythonscraping.com/pages/page1.html")
# # html.parser是Python的解析器的解析类型
# bsObj = BeautifulSoup(html.read() , "html.parser")
# print(bsObj.h1)

# 异常处理的爬虫例子
# from urllib.error import HTTPError, URLError
# from urllib.request import urlopen
# from bs4 import BeautifulSoup
#
# def get_title(url):
#     try:
#         html = urlopen(url)
#     except (HTTPError, URLError) as e:
#         return None
#     try:
#         bsObj = BeautifulSoup(html.read(), "html.parser")
#         title = bsObj.body.h1
#     except AttributeError as e:
#         return None
#     return title
# title = get_title("http://pythonscraping.com/pages/page1.html")
# if title is None :
#     print("title could not be found")
# else :
#     print(title)

# find和findAll函数
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
# html = urlopen("http://pythonscraping.com/pages/warandpeace.html")
# bsObj=BeautifulSoup(html.read(), "html.parser")
# nameList=bsObj.findAll("span", {"class": "green"})
# for name in nameList:
#     print(name.get_text())
#     from urllib.request import urlopen
#     from bs4 import BeautifulSoup

# 使用导航树进行查找
# html = urlopen("http://www.pythonscraping.com/pages/page3.html")
# bsObj = BeautifulSoup(html.read(), "html.parser")
# print(bsObj.find("img", {"src": "../img/gifts/img1.jpg"}).parent.previous_sibling.get_text())

# 使用正则表达式查找
html = urlopen("http://www.pythonscraping.com/pages/page3.html")
bsObj = BeautifulSoup(html.read(), "html.parser")
images=bsObj.findAll("img", {"src": re.compile("\.\.\/img\/gifts\/img.*\.jpg")})
for image in images:
    print(image["src"])

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值