学以致用
说明
我们学习python这么久,是时候拿它来做些什么了:
- 爬虫 ,爬取网页图片;
- 本代码仅供大量的互联网朋友们参考学习,欢迎交流;
图片
图片: 下面是我们下载下来的部分截图
代码展示
下面是我们的所有代码 代码片
.
'''
Author:Dew
Time:2020/06/12 18:39
Content:爬取煎蛋网的图片(Crawl pictures of omelette)
'''
from bs4 import BeautifulSoup
import urllib.request
from urllib.request import urlretrieve
import requests
import os
import time
import re
import lxml
import random
i = 0
if __name__ == '__main__':
list_url = []
for num in range(127, 132):
if num == 1:
url = 'http://jandan.net/ooxx'
else:
url = 'http://jandan.net/ooxx/MjAyMDA2MTItMTI%d#comments' % (num-124)
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"
}
req = requests.get(url=url, headers=headers)
req.encoding = 'utf-8'
html = req.text
# print(html)
bf = BeautifulSoup(html, 'lxml')
targets_url = bf.find_all(class_='view_img_link')
for each in targets_url:
list_url.append('http://' + each.get('href').replace('//', ''))
print(list_url)
os.makedirs('images')
os.chdir('images')
for target_url in list_url:
filename = ' ' + str(time.localtime()) + '.jpg'
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"
}
response = urllib.request.urlopen(url=target_url)
# img_req = requests.get(url=target_url, headers=headers)
img = response.read()
# img_req.encoding = 'utf-8'
# img_html = img_req.text
# img_bf = BeautifulSoup(img_html, 'lxml')
# print(img)
# c = random(0,200)
# print(c)
# if 'images2' not in os.listdir():
i += 1
# urlretrieve(url = target_url, filename=('images/' + filename))
with open('images%s.jpg' %i, 'wb') as f:
f.write(img)
f.close()
time.sleep(1)
print('下载完成!')