一、网络爬虫准备工作
爬虫实践
普通用户上网过程:打开浏览器-->往目标站点发送请求-->接受响应数据-->渲染到页面上
爬虫程序:模拟浏览器-->往目标站点发送请求-->接受响应数据-->提取有用的数据-->保存到本地
爬虫的过程
1、发送请求(requests模块)
2、获取响应数据(服务器返回)
3、解析并提取数据(BeatufulSoup查找或者re正则)
4、保存数据
pip install beautifulsoup4 -t /home/library
request模块:requests是python实现的简单易用的HTTP库
官网地址:http://cn.python-requests.org/zh_CN/latest/
BeautifulSoup库:Beautiful Soup 是一个可以从HTML或XML文件中提取数据的Python库。
网址:https://beautifulsoup.readthedocs.io/zh_CN/v4.4.0/
二、爬虫实例(获取青春有你2参赛选手的相关照片)
"""
爬虫 青春有你
"""
import json
import os
import re
import requests
import datetime
from bs4 import BeautifulSoup
today=datetime.date.today().strftime('%Y%m%d')
def crawl_player_data():
headers={
'User-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36'
}
url='https://baike.baidu.com/item/青春有你第二季'
try:
response=requests.get(url,headers=headers)
soup=BeautifulSoup(response.text,'lxml')
tables=soup.find_all('table')
crawl_table_title='参赛学员'
for table in tables:
table_title=table.find_previous_sibling("div").find_all("h3")
for title in table_title:
if crawl_table_title in title:
return table
except Exception as e:
print(e)
def parse_player_data(table_html):
"""
从百度百科返回的html中解析得到选手信息,以当前日期作为文件名,存JSON文件,保存到Day2data目录下
"""
bs = BeautifulSoup(str(table_html), 'lxml')
all_trs = bs.find_all('tr')
error_list = ['\'', '\"']
stars = []
for tr in all_trs[1:]:
all_tds = tr.find_all('td')
print(all_tds)
star = dict()
star["name"] = all_tds[0].text.replace("\n", "")
star["link"] = 'https://baike.baidu.com' + all_tds[0].find('a').get('href')
star["zone"] = all_tds[1].text
star["constellation"] = all_tds[2].text
flower_word = all_tds[3].text
for c in flower_word:
if c in error_list:
flower_word = flower_word.replace(c, '')
star["flower_word"] = flower_word
if not all_tds[4].find('a') is None:
star["company"] = all_tds[4].find('a').text
else:
star["company"] = all_tds[4].text
stars.append(star)
json_data = json.loads(str(stars).replace("\'", "\""))
with open(today + '.json', 'w', encoding='UTF-8') as f:
json.dump(json_data, f, ensure_ascii=False)
def crawl_player_pics():
"""
爬取每个选手的百度百科图片,并保存
"""
with open(today + '.json', 'r', encoding='UTF-8') as fr:
json_array = json.loads(fr.read())
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0'
}
for star in json_array:
name = star['name']
link = star['link']
response = requests.get(link, headers=headers)
soup = BeautifulSoup(response.text, 'lxml')
divs = soup.find('div', {'class': 'summary-pic'})
pic_urls = []
if divs:
img_srcs = divs.find_all('a')[0]
img_addr = img_srcs.get("href")
response = requests.get('https://baike.baidu.com' + img_addr, headers=headers)
soup = BeautifulSoup(response.text, 'lxml')
all_a = soup.find_all('a', {'class': 'pic-item'})
for a in all_a:
img_lists = a.find_all('img')
for img in img_lists:
img_addr = img.get("src")
img_addr = img_addr.split("/resize")[0]
if img_addr:
pic_urls.append(img_addr)
down_pic(name, pic_urls)
def down_pic(name, pic_urls):
"""
根据图片链接列表pic_urls, 下载所有图片,保存在以name命名的文件夹中,
"""
path = './pics/' + name + '/'
if not os.path.exists(path):
os.makedirs(path)
for i, pic_url in enumerate(pic_urls):
try:
pic = requests.get(pic_url, timeout=15)
string = str(i + 1) + '.jpg'
with open(path + string, 'wb') as f:
f.write(pic.content)
print('成功下载第%s张图片: %s' % (str(i + 1), str(pic_url)))
except Exception as e:
print('下载第%s张图片时失败: %s' % (str(i + 1), str(pic_url)))
print(e)
continue
def show_pic_path(path):
"""
遍历所爬取的每张图片,并打印所有图片的绝对路径
"""
pic_num = 0
for (dirpath, dirnames, filenames) in os.walk(path):
for filename in filenames:
pic_num += 1
name=os.path.join(dirpath, filename).replace('\\','/')
print("第%d张照片:%s" % (pic_num, name))
print("共爬取《青春有你2》选手的%d照片" % pic_num)
if __name__ == '__main__':
today = datetime.date.today().strftime('%Y%m%d')
html = crawl_player_data()
parse_player_data(html)
crawl_player_pics()
show_pic_path('./pics/')
print("所有信息爬取完成!")