1.京东商品:
import requests
def getHTMLText(url):
try:
r=requests.get(url)
r.raise_for_status()
r.encoding=r.apparent_encoding
return r.text[:1000] ///前1000字符
except:
return "121212"
if __name__ == "__main__":
url = 'https://miaosha.jd.com/#6468246'
print(getHTMLText(url))
2.亚马逊商品(更换请求头)
import requests
r = requests.get(
'https://www.amazon.cn/dp/B01N34KSKC/ref=sr_1_1?keywords=waterpik+%E6%B4%81%E7%A2%A7&qid=1583747842&sr=8-1')
r.status_code
api造成网络错误:反爬
r.request.headers
看到爬虫很忠实的告诉亚马逊我是个爬虫“ User-agent”
更改请求头
kv = {'user-agent':'Mozilla/5.0' }
实际代码:
**import requests
kv = {'user-agent': 'Mozilla/5.0'}
url = 'https://www.amazon.cn/dp/B01N34KSKC/ref=sr_1_1?keywords=waterpik+%E6%B4%81%E7%A2%A7&qid=1583747842&sr=8-1'
r = requests.get(url,kv)
r.status_code
r.request.headers**
3.百度/360搜索关键词提取
百度代码
import requests
kv = {'wd' : 'python'
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36'}
# ?是分隔符,s相当于调用搜索模块
#url 会与kv自动拼接,而使用headers=headers更换整个请求头
url = 'http://www.baidu.com/s'
r = requests.get(url, kv, headers=headers)
r.status_code
r.request.url
r.request.headers
360代码
import requests
kv = {'q' : 'python'
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36'}
# ?是分隔符,s相当于调用搜索模块
url = 'https://www.so.com/s'
r = requests.get(url, params=kv, headers=headers)
r.status_code
r.request.url
r.request.headers
4.网络图片的爬取与存储
import requests
path ="D:/abc.jpg"
url = "https://img-blog.csdn.net/20170613110251879?watermark/2/text/aHR0cDovL2Jsb2cuY3Nkbi5uZXQvdzQxMDU4OTUwMg==/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70/gravity/SouthEast"
r=requests.get(url)
r.status_code
with open(path,'wb') as f:
f.write(r.content)
f.close()
#打开文件,并标为f,写到文件中 ,r.content表示文件的二进制形式
os 库:新建目录
正式代码
import requests
import os
url = "https://img-blog.csdn.net/20170613110251879?watermark/2/text/aHR0cDovL2Jsb2cuY3Nkbi5uZXQvdzQxMDU4OTUwMg==/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70/gravity/SouthEast"
root ="D://pics//"
path = root +url.split('/')[-1]#以 / 为分割符保留最后一段。
try:
if not os.path.exists(root):#当前根目录是否存在
os.mkdir(root)
if not os.path.exists(path):#当前文件是否存在
r=requests.get(url)
with open(path,'wb') as f:
f.write(r.content)
f.close
print("保存成功:"+url)
else:
print("保存失败,文件已存在")
except:
print("爬取失败")
5.IP地址归属地的自动查询
import requests
url = "http://m.ip138.com/ip.asp?ip="
headers={"user-agent":"Mozilla/5.0"#反扒了,得改headers
}
r=requests.get(url+'202.204.80.112',headers=headers)
r.status_code
r.text[-500:]
很多我们在浏览器的交互操作,点击什么的,都是通过链接实现的,所以最好挖掘下该api
summary
1.京东:get框架
2.亚马逊:headers修改
3.百度/360:关键字提交,url拼接
4.网络图片爬取 :文件保存
5.IP查询:接口使用,header修改高级版