爬取今日头条图片(bs4方法)
一,初步爬取
在这里我们不难出来了该网页的请求方法是GET请求。(本来准备爬取美女,但是后续发现,会出现图片违规,后面改成了爬取风景图片,爬取美女图片只需要改变url即可)
对于爬取图片,我一开始便是直接想到用bs4解析器进行爬取。
import requests
from bs4 import BeautifulSoup
class Baidu_photo_get(object):
def __init__(self):
self.number = 1
self.url = "https://so.toutiao.com/search?keyword=%E9%A3%8E%E6%99%AF&pd=atlas&dvpf=pc&aid=4916&page_num=0&search_json=%7B%22from_search_id%22%3A%22202311012132564BC1AED711837A44558F%22%2C%22origin_keyword%22%3A%22%E7%BE%8E%E5%A5%B3%22%2C%22image_keyword%22%3A%22%E7%BE%8E%E5%A5%B3%22%7D&source=input"
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36 Edg/118.0.2088.76',
'Cookie': 'msToken=inSIYQCMcgQBS_WUQq9eZGwZYXJ3aJH4oisRFRvTPXjWLetIE2Fgy0gygqV4YwjZyVchLEh6ublkb_9a1EB_ZTaFa52rRtlNlBJPIYc7; tt_webid=7295968954378421798; _ga_QEHZPBE5HH=GS1.1.1698725159.1.0.1698725159.0.0.0; _ga=GA1.1.4784048.1698725160; ttwid=1%7CZHZf2H84ODU8HGvNgE6R7ItycHQGup5OoD9-LskKIik%7C1698725160%7C91e339d248bafc5260a492036ed670625bbd96ea3d17febdcb9ccc16dfb39bb1; __ac_nonce=065407d2c00b3483a557b; __ac_signature=_02B4Z6wo00f017yueGAAAIDA6Szz.5tt6XO8jnzAAIp01c; __ac_referer=https://www.toutiao.com/; _tea_utm_cache_4916=undefined; _S_WIN_WH=1488_742; _S_DPR=1.25; _S_IPAD=0; s_v_web_id=verify_lodt3rdk_jZ1cBKNS_em8n_4wCG_8hSi_foqXnR2uODsR'
}
def data_get_index(self):
resp = requests.get(url=self.url, headers=self.headers)
if resp.status_code == 200:
return resp.text
else:
return None
def parse_data_index(self, response):
soup = BeautifulSoup(response, 'lxml')
html = soup.find_all('img')
for data in html:
url = data.get('src')
self.save_photo_data(url)
def save_photo_data(self, url):
file_data = f'第{self.number}张照片'
with open('./photo/' + file_data + '.jpeg', 'wb') as f:
img = requests.get(url).content
f.write(img)
print(f'{file_data}图片--保存完毕!')
self.number += 1
def run(self):
response = self.data_get_index()
# print(response)
self.parse_data_index(response)
if __name__ == '__main__':
Photo = Baidu_photo_get()
Photo.run()
结果发现:只爬取到了40张图片,但是我们通过实践的分析网页图片发现远不止40张图片,因此,我们需要接着对页面进行一个更加深度的分析。
二,进阶爬取
我们再次回到目标网站的页面,然后对页面进行一个分析,发现每次滑动到出现下一组照片时,这里的page_num会逐渐增加。
因此,如果要爬取到完整的页面所有图片,需要获取到这里所有的字符串参数,将这些参数转成url地址,此时才能获取到所有图片。
在这里我们通过初步的爬取,发现一组图片一共是40张,因此,在这里我们先简单爬取4组图片,所以在这里我们需要用到一下这个库
from urllib.parse import urlencode
他可以将负载的参数转化为对应的后续图片对应的地址,这个page_num需要我们自己去手动构建。进阶代码如下:
import requests
from bs4 import BeautifulSoup
from urllib.parse import urlencode
class Toutiao_data_get(object):
def __init__(self):
self.number = 1
self.url = "https://so.toutiao.com/search?keyword=%E9%A3%8E%E6%99%AF&pd=atlas&dvpf=pc&aid=4916&page_num=0&search_json=%7B%22from_search_id%22%3A%22202311012132564BC1AED711837A44558F%22%2C%22origin_keyword%22%3A%22%E7%BE%8E%E5%A5%B3%22%2C%22image_keyword%22%3A%22%E7%BE%8E%E5%A5%B3%22%7D&source=input"
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36 Edg/118.0.2088.76",
"Cookie":"msToken=inSIYQCMcgQBS_WUQq9eZGwZYXJ3aJH4oisRFRvTPXjWLetIE2Fgy0gygqV4YwjZyVchLEh6ublkb_9a1EB_ZTaFa52rRtlNlBJPIYc7; tt_webid=7295968954378421798; _ga=GA1.1.4784048.1698725160; _tea_utm_cache_4916=undefined; _S_DPR=1.25; _S_IPAD=0; s_v_web_id=verify_lodt3rdk_jZ1cBKNS_em8n_4wCG_8hSi_foqXnR2uODsR; ttwid=1%7CZHZf2H84ODU8HGvNgE6R7ItycHQGup5OoD9-LskKIik%7C1698845574%7Ce105d7a136bdffc51f9cb61807cd7ff129a7e164783e088dc19e57e95b345b1f; _ga_QEHZPBE5HH=GS1.1.1698845574.2.0.1698845623.0.0.0; _S_WIN_WH=659_742; __ac_nonce=065425afa003b701c73a7; __ac_signature=_02B4Z6wo00f0196I-ogAAIDAiwpxFB5jZHPeqP4AAJLpd0; __ac_referer=https://so.toutiao.com/search?keyword=%E7%BE%8E%E5%A5%B3&pd=atlas&dvpf=pc&aid=4916&page_num=0&search_json=%7B%22from_search_id%22%3A%22202311012132564BC1AED711837A44558F%22%2C%22origin_keyword%22%3A%22%E7%BE%8E%E5%A5%B3%22%2C%22image_keyword%22%3A%22%E7%BE%8E%E5%A5%B3%22%7D&source=input"
}
def url_data_get(self, num):
parms = {
'keyword': '风景',
'pd': 'atlas',
'dvpf': 'pc',
'aid': '4916',
'page_num': {},
'search_json': '{"from_search_id":"202311012132564BC1AED711837A44558F","origin_keyword":"美女","image_keyword":"美女"}',
'source': 'input',
}
url = "https://so.toutiao.com/search?" + urlencode(parms).format(num)
return url
def data_get_index(self, num):
resp = requests.get(url=self.url_data_get(num), headers=self.headers)
if resp.status_code == 200:
return resp.text
else:
return None
def parse_data_index(self, response):
soup = BeautifulSoup(response, 'lxml')
html = soup.find_all('img')
# print(html)
for data in html:
url = data.get('src')
self.save_data_get(url)
def save_data_get(self, url):
file_data = f'第{self.number}张照片'
with open('./photo/' + file_data + '.jpeg', 'wb') as f:
img = requests.get(url).content
f.write(img)
print(f'{file_data}图片--保存完毕!')
self.number += 1
def run(self):
for num in range(1, 5):
response = self.data_get_index(num)
self.parse_data_index(response)
if __name__ == '__main__':
photo = Toutiao_data_get()
photo.run()
爬取的数据如下图所示:
最后,能够正确的实现图片的爬取。