最近看了一些requests和bs4模块的文档,学习中遇到很多问题,于是写一个小爬虫练练手。虽然看文档上使用很简单,但是使用起来还是遇到了很多困难,以至于有些地方使用了正则表达式实现。
爬虫实现了爬取虎扑爆照区前10页的照片
附上爬虫的代码:
import requests
import bs4
import re
import os
#使用requests模块打开
def open_url(url):
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'}
res = requests.get(url,headers=headers)
return res
#保存图片
def save_pics(addrs,folder,user_name):
name = addrs.split('/')[-1]
path = folder + 'id='+ user_name+ '_' + str(name)
with open(path,'wb') as f:
img = open_url(addrs).content
f.write(img)
#找到每页所有帖子地址
def find_posts(res):
soup = bs4.BeautifulSoup(res.text,'html.parser')
addrs = []
targets = soup.find_all('div',class_='titlelink box')
for each in targets:
add = 'https://bbs.hupu.com' + str(each.a['href'])
addrs.append(add)
return addrs
#找到每个帖子的中的图片
def find_pics(each,folder):
user_name = each.split('.')[-2].split('/')[-1]
res = open_url(each)
pat = re.compile(r'</p><p><img src="(([^"]+?)\.jpg)\?')
targets = pat.findall(res.text)
print(' 找到%d张图片' % len(targets))
if targets:
for target in targets:
save_pics(target[0],folder,user_name)
def main():
url = 'https://bbs.hupu.com/selfie'
res = open_url(url)
folder = 'E:/LF/BZ/'
os.mkdir(folder)
print('正在爬取第1页')
for i in range(2,10):
addrs = find_posts(res)
print(' 找到%d张帖子' % len(addrs))
for each in addrs:
find_pics(each,folder)
host = url + '-' +str(i)
res = open_url(host)
print('正在爬取第%d页' % i)
print('爬取完毕')
if __name__=='__main__':
main()