1 用法
1.解析内容
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_doc,’lxml’)
2.浏览数据
soup.title
soup.title.string
3.BeautifulSoup正则使用
soup.find_all(name=’x’,attrs={‘xx’:re.compile(‘xx’)})
2 实例
这是一个爬百度贴吧的py
#!/usr/bin/env python
# coding=utf-8
from bs4 import BeautifulSoup
import requests
import re
f=open('1.txt','w')
s=requests.get('http://tieba.baidu.com/f?kw=%C9%CC%C7%F0%D2%BB%B8%DF&fr=ala0&tpl=5')
soup = BeautifulSoup(s.content,'lxml')
r = soup.find_all(name='div',attrs={'class':"threadlist_abs threadlist_abs_onlyline "})
for i in r:
print i.string.encode('gbk','ignore')
f.write(i.string.encode('gbk','ignore'))
这是一个爬中国天气网的py
#!/usr/bin/env python
# coding=utf-8
from bs4 import BeautifulSoup
import requests
import re
cc=[]
s=requests.get('http://www.weather.com.cn/weather/101180101.shtml')
soup = BeautifulSoup(s.content,'lxml')
r = soup.find_all(name='p',attrs={'class':'wea'})
p = soup.find_all(name='h1')
j=0
for i in range(0,7):
cc.append(p[i].string+':'+r[i].string)
for i in cc:
print i
这个爬出城市名
import re
import requests
url = 'http://www.weather.com.cn/weather/101180101.shtml'
headers={
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0'
}
s = requests.get(url = url , headers=headers).content.decode('utf-8')
r = re.findall('weather.com.cn">(.*?)</a>',s)
for i in range(0,len(r)):
print r[i]
获取标签中的内容
from bs4 import BeautifulSoup
s = """
</span><span style= 'font-size:12.0pt;color:#CC3399'>714659079qqcom 2014/09/10 10:14</span></p></div>
"""
soup = BeautifulSoup(s, "html.parser")
print soup
print soup.get_text()