写在前面
有关python爬虫笔记我会一直更新下去,如果您碰巧有什么好的建议,欢迎对我提出,不胜感激。
获取页面
- 获取没有禁止爬虫的网页
import requests
url = "https://item.jd.com/4140539.html"
try:
r = requests.get(url)
r.raise_for_status()#这个是检测网页响应,如果响应不对,会直接到except
#print(r.status_code) #去掉前面#可以得到200,说明网页响应正确
r.encoding = r.apparent_encoding
print(r.text[:100])
except:
print("爬去失败")
复制代码
- 获取有禁止爬虫(检测headers)的网页
import requests
url = "https://www.amazon.cn/dp/B079FLYB49/ref=cngwdyfloorv2_recs_0/460-7004898-2910845?pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=desktop-2&pf_rd_r=MTJFMQEKZR9180R1YDRV&pf_rd_r=MTJFMQEKZR9180R1YDRV&pf_rd_t=36701&pf_rd_p=7149a3bb-2ee6-4f99-92eb-d87852365f8c&pf_rd_p=7149a3bb-2ee6-4f99-92eb-d87852365f8c&pf_rd_i=desktop"
try :
kv = {'user-agent':'Mozilla/5.0'}
r = requests.get(url, headers = kv)
r.raise_for_status()
r.encoding = r.apparent_encoding
print(r.text[1000:2000])
except:
print("获取失败")
复制代码
3.百度、360搜索关键词提交
import requests
url = "http://www.baidu.com/s"
keyword = "Python"
try :
kv = {'wd': keyword}#借口有wd
r = requests.get(url, params = kv)
print(r.request.url) #查看浏览器的搜索
r.raise_for_status()
print(len(r.text)) #打印搜索网页
except:
print("获取失败")
复制代码
- 网络图片的爬去和储存
import requests
import os
url = "http://image.nationalgeographic.com.cn/2017/1010/20171010012304725.jpg"
root = "E://pics//"
path = root + url.split('/')[-1]
try :
if not os.path.exists(root):
os.mkdir(root)
if not os.path.exists(path):
r = requests.get(url)
with open(path, 'wb') as f:
f.write(r.content)
f.close()
print("文件保存成功")
else:
print("文件已经存在")
except:
print("获取失败")
复制代码
- IP地址归属地的自动查询
import requests
import os
url = "http://m.ip138.com/ip.asp?ip="
try :
r = requests.get(url+'202.204.80.112')
r.raise_for_status()
r.recoding = r.apparent_encoding
print(r.text[-500:])
except:
print("获取失败")
复制代码