适合于 没有爬虫基础的同学,先写爬虫的基本知识点然后 BeautifulSoup解析器 然后 几个爬虫的项目然后 scrapy框架也可能会写一点。。。 主要的目的是梳理一下 主要以代码为主
#encoding:utf-8
import requests
re=requests.get("http://www.*****.com")
print(re)# <Response [200]>
print(type(re)) # <class 'requests.models.Response'>
print("status_code",re.status_code) # 200
print("encoding:%s"%re.encoding) #查看编码 # encoding:gbk
print(re.cookies) #查看返回的cookie # <RequestsCookieJar[<Cookie ECS[visit_times]=1 for www.****.com/>, #避免打广告
print("text",re.text) #字符串方式响应 *****经常用
print("content",re.content.decode("gbk")) #字节方式响应 *****经常用
request-get
import requests
payload={"key1":"value1","key2":"value2"}
#使用get传递参数
#re=requests.get("http://httpbin.org/get",params=payload)
#print(re.url) # http://httpbin.org/get?key1=value1&key2=value2
#print(re.text) #
#get 添加请求头文件伪装浏览器
header = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36',
}
re=requests.get("http://httpbin.org/get",params=payload,headers=header)
print(re.text)
json=re.json()
print(type(json)) # <class 'dict'> 就是把返回内容 以 dict的格式 呈现出来
print("json",json)# {'args': {'key1': 'value1', 'key2': 'value2'}, 'headers': {'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate', 'Connection': 'close', 'Host': 'httpbin.org', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36'}, 'origin': '116.232.80.230', 'url': 'http://httpbin.org/get?key1=value1&key2=value2'}
#获取 原始响应内容stream=True re.raw
re=requests.get("https://github.com/timeline.json",stream=True)
print(re.raw.read(50)) #获取50个字符
#下载音乐
re = requests.get('http://zhangmenshiting.qianqian.com/data2/music/32f1890582a08e81c51519f9d6764801/540295726/540295726.mp3?xcode=a2d048430dfe291026bdb4a8ac0cec0f',stream = True)
# print(re.text)
# print(re.raw.read(50))
#
with open('a1.mp3','wb') as file:
for chunk in re.iter_content(1024*10):
file.write(chunk)
#
#下载图片
re = requests.get('https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1521960449488&di=717ebe22e5b254c0751f22ca92076370&imgtype=0&src=http%3A%2F%2Fp3.gexing.com%2FG1%2FM00%2F66%2F03%2FrBACE1PQWyXCVidOAAaY9QAD3h0020.png',stream = True)
print(re.text)
print(re.raw.read(50))
with open('meizi.png','wb') as file:
for chunk in re.iter_content(1024*10):
file.write(chunk)
#获取发送 cookie
re = requests.get('http://www.ibeifeng.com')
print(re.cookies) # <RequestsCookieJar[<Cookie ECS[visit_times]=1 for www.ibeifeng.com/>,
#<Cookie ECS_ID=305f5e3348a13e953461281c87952ec3328df044 for www.ibeifeng.com/>,
#<Cookie real_ipd=116.232.80.230 for www.ibeifeng.com/>,
#<Cookie route=f8c75d468de16d5fae6f24965c2aa4a7 for www.ibeifeng.com/>]>
re=requests.get("http://httpbin.org/cookies",cookies={'name':"joe"})
print(re.text)
#{
#"cookies": {
#"name": "joe"
#}
#}