requests模块
- requests模块是python中原生的一款基于网络请求的模块,功能非常强大,简单便捷,效率极高。
- 作用:模拟浏览器发请求。
- 编码流程:指定url——发起请求——获取响应数据——持久化存储
- 基于requests模块的get请求:
#爬取搜狗首页的页面数据
import requests
if __name__ == "__main__":
#指定url
url = 'https://www.sogou.com/'
#发起请求
#get方法会返回一个响应对象
response = requests.get(url=url)
#获取响应数据,text返回的是字符串形式的响应数据
page_text = response.text
print(page_text)
#持久化存储
with open('./sougou.html','w',encoding='utf-8') as fp:
fp.write(page_text)
print('爬取数据结束!!!')
- UA:User-Agent(请求载体身份标识)
- UA检测:反爬机制
- UA伪装:反反爬机制,让爬虫对应的请求载体身份标识伪装成某一款浏览器
#简易的网页采集器
import requests
if __name__ == "__main__":
#UA伪装:将对应的User-Agent封装到一个字典中
header = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3756.400 QQBrowser/10.5.4039.400'
}
url = 'https://www.sogou.com/web'
#处理url携带的参数:封装到字典中
kw = input('enter a word : ')
param = {
'query':kw
}
#对指定的url发起的请求对应的url是携带参数,并且请求过程中处理了参数
response = requests.get(url = url, params = param, headers = header)
page_text = response.text
fileName = kw+'.html'
with open(fileName,'w',encoding='utf-8') as fp:
fp.write(page_text)
print(fileName,'保存成功!')
- 基于requests模块的post请求:
#破解百度翻译
import requests
import json
if __name__ == "__main__":
#指定url
post_url = 'https://fanyi.baidu.com/sug'
#UA伪装
header = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3756.400 QQBrowser/10.5.4039.400'
}
#post请求参数处理(同get请求一致)
word = input('enter a word:')
data = {
'kw': word
}
#请求发送
response = requests.post(url=post_url, data=data, headers=header)
#获取响应数据:json()方法返回的是obj
dic_json = response.json()
#持久化存储
fileName = word+'.json'
fp = open(fileName, 'w', encoding='utf-8')
json.dump(dic_json, fp=fp, ensure_ascii=False)
print('over!')
- 基于requests模块ajax的get请求:
#爬取豆瓣电影分类排行榜
import requests
import json
if __name__ == "__main__":
#指定url
post_url = 'https://movie.douban.com/j/chart/top_list'
param = {
'type': '24',
'interval_id': '100:90',
'action': '',
'start': '0',
'limit': '2'
}
#UA伪装
header = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36'
}
response = requests.get(url=post_url, params=param, headers=header)
list_data = response.json()
#持久化存储
fp = open('./douban.json', 'w', encoding='utf-8')
json.dump(list_data, fp=fp, ensure_ascii=False)
print('over!')
- 爬取国家药品监督管理总局中基于中华人民共和国化妆品生产许可证相关数据
- 动态加载数据
- 首页中对应企业信息数据是通过ajax动态请求得到的
- 对详情页url观察发现:url的域名都是一样的,只有携带的参数(id)不一样
- id可以从首页中ajax请求获得的json串中获取
- 域名和id值可以拼接成一个完整的企业对应的详情页的url
- 详情页数据也是动态加载的
#爬取国家药品监督管理总局
import requests
import json
if __name__ == "__main__":
id_list = []
All_data_list = []
#指定url
post1_url = 'http://125.35.6.84:81/xk/itownet/portalAction.do?method=getXkzsList'
# UA伪装
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; LCTE; rv:11.0) like Gecko Core/1.70.3756.400 QQBrowser/10.5.4039.400'
}
for page in range(1, 5):
data = {
'on': 'true',
'page': str(page),
'pageSize': '15',
'productName': '',
'conditionType': '1',
'applyname': '',
'applysn': ''
}
json_ids = requests.post(url=post1_url, data=data, headers=header).json()
for dic in json_ids['list']:
id_list.append(dic['ID'])
post2_url = 'http://125.35.6.84:81/xk/itownet/portalAction.do?method=getXkzsById'
for id in id_list:
post_data = {
'id': id
}
detail_json = requests.post(url=post2_url,data=post_data,headers=header).json()
All_data_list.append(detail_json)
fp = open('./detail_data.json', 'w', encoding='utf-8')
json.dump(All_data_list, fp=fp, ensure_ascii=False)
print('over!')
数据解析
- 解析的局部的文本内容都会在标签之间或者标签对应的属性中进行存储
- 原理:进行指定标签的定位——标签或者标签对应的属性中存储的数据值进行提取(解析)
- 步骤:指定url——发起请求——获取响应数据——数据解析——持久化存储
#爬取图片
import requests
if __name__ == "__main__":
url = 'https://pic.qiushibaike.com/system/pictures/12302/123023649/medium/7PM2J1FJVCOUAKEY.jpg'
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3756.400 QQBrowser/10.5.4039.400'}
#属性content返回的是二进制图片数据
img_data = requests.get(url=url, headers=header).content
with open('./tu.jpg' , 'wb') as fp:
fp.write(img_data)
- 正则解析:
#正则解析,爬取糗事百科图片
import requests
import re
import os
if __name__ == "__main__":
#创建一个文件夹,保存所有图片
if not os.path.exists('./qiutuLibs'):
os.mkdir('./qiutuLibs')
url = 'https://www.qiushibaike.com/imgrank/page/%d/'
page_start = int(input('enter start page:'))
page_end = int(input('enter end page:'))
for pageNum in range(page_start,page_end):
New_url = format(url%pageNum)
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3756.400 QQBrowser/10.5.4039.400'}
#属性content返回的是二进制图片数据
page_text = requests.get(url=New_url, headers=header).text
ex = '<div class="thumb".*?<img src="(.*?)" alt.*?</div>'
img_src_list = re.findall(ex, page_text, re.S)
#print(img_src_list)
for src in img_src_list:
# 拼接出一个完整的图片地址
src = 'https:'+src
#请求到图片二进制数据
img_data = requests.get(url=src, headers=header).content
#生成图片名称
img_name = src.split('/')[-1]
#图片存储路径
imgPath = './qiutuLibs/'+img_name
with open(imgPath,'wb') as fp:
fp.write(img_data)
print(img_name,'下载成功')
未完,有空再续吧