使用Requests&BeautifulSoup获取汽车之家新闻
import requests
from bs4 import BeautifulSoup
#下载页面
ret = requests.get(
url="https://www.autohome.com.cn/news/"
)
ret.encoding = ret.apparent_encoding
# print(ret.text)
#页面解析,获取指定内容
soup = BeautifulSoup(ret.text,'html.parser')
div = soup.find(name='div',id='auto-channel-lazyload-article')
li_list = div.find_all(name='li')
for li in li_list:
h3 = li.find(name='h3')
if not h3:
continue
# print(h3.text,"\t")
p = li.find(name='p')
a = li.find(name='a')
# print(a.attrs)
print(h3.text,a.get('href'))
print(p.text)
img = li.find('img')
src = img.get('src')
file_name = src.rsplit('__',maxsplit=1)[1]
print(file_name)
print('=' * 100)
ret_img = requests.get(
url= 'https:'+src
)
with open(file_name,'wb') as f:
f.write(ret_img.content)
爬虫基础笔记
requests & BeautifulSoup
a.伪造浏览器向某个地址发送Http请求,获取返回的字符串
pip3 install requests
response = requests.get(url='url')
response.encoding = apparent_encoding
response.content #content返回的是bytes,二级制型的数据
response.text #text 返回的是unicode 型的数据,一般是在网页的header中定义的编码形式
#也就是说你如果想要提取文本就用text,但是如果你想要提取图片、文件,就要用到content
b. bs4,解析HTML格式的字符串
pip3 install beautifulsoup4
soup = BeautifulSoup('<html>....</html>',"html.parser")
div = soup.find(name='标签名')
div = soup.find(name='标签名',id='id')
div = soup.find(name='标签名',id='id',_class='xxx') or soup.find(name='标签名',attrs={'id':'id','class':'xxx'})
div.text #获取文本字符串
div.attrs #获取所有属性
div.get(name='href') #获取某一个属性
divs = soup.find_all(name='标签名')
divs = soup.find_all(name='标签名',id='id')
divs = soup.find_all(name='标签名',id='id',_class='xxx') or soup.find(name='标签名',attrs={'id':'id','class':'xxx'})
divs的属性为列表,获取列表内信息用for循环