安装requests
pip install requests
import requests
# 发出http请求
re = requests.get("https://www.baidu.com")
# 查看响应状态
print(re.status_code) # 响应的HTTP状态码
#print(re.text) # 响应内容的字符串形式
#print(re.content) # 响应内容的二进制形式
#print(re.encoding) # 响应内容的编码
# 200
# 200就是响应的状态码,表示请求成功
用爬虫下载孔乙己的文章
import requests
import os
os.chdir('F:\\Datawhale\\POA\\爬虫')
re = requests.get('https://apiv3.shanbay.com/codetime/articles/mnvdu')
print('网页的状态码为%s' %re.status_code)
with open('鲁迅文章.txt', 'w') as file:
print('正在爬取小说')
file.write(re.text)
# 网页的状态码为200
# 正在爬取小说
re.text用于文本内容的获取、下载
re.content用于图片、视频、音频等内容的获取、下载
import requests
# 下载图片
res = requests.get('https://img-blog.csdnimg.cn/20210424184053989.PNG')
# 以二进制写入的方式打开一个名为 datawhale.jpg 的文件
with open('datawhale.png','wb') as pic:
# 将数据的二进制形式写入文件中
pic.write(res.content)
HTML解析和提取
import requests
res = requests.get('http://www.baidu.com')
print(res.text)
安装BS4 BeautifulSoup
pip install bs4
解析豆瓣读书 Top250
import io
import sys
import requests
from bs4 import BeautifulSoup
###运行出现乱码时可以修改编码方式
#sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='gb18030')
###
headers = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'}
res = requests.get('http://book.douban.com/top250', headers= headers)
soup = BeautifulSoup(res.text, 'lxml')
print(soup)
find()方法和find_all()方法:
find()返回符合条件的首个数据 find_all()返回符合条件的所有数据
import io
import sys
import requests
from bs4 import BeautifulSoup
###运行出现乱码时可以修改编码方式
#sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='gb18030')
###
headers = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'}
res = requests.get('http://book.douban.com/top250', headers= headers)
soup = BeautifulSoup(res.text, 'lxml')
print(soup.find('a'))
print(soup.find_all('a'))
除了传入 HTML 标签名称外,BeautifulSoup 还支持熟悉的定位
# 定位div开头 同时id为'doubanapp-tip的标签
print(soup.find('div', id='doubanapp-tip'))
# 定位a抬头 同时class为rating_nums的标签
soup.find_all('span', class_='rating_nums')
#class是python中定义类的关键字,因此用class_表示HTML中的class
自如公寓数据抓取
import requests
from bs4 import BeautifulSoup
import random
import time
import csv
#这里增加了很多user_agent
#能一定程度能保护爬虫
user_agent = [
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)"]
def get_info():
csvheader=['名称','面积','朝向','户型','位置','楼层','是否有电梯','建成时间',' 门锁','绿化']
with open('wuhan_ziru.csv', 'a+', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(csvheader)
for i in range(1,50): #总共有50页
print('正在爬取自如第%s页'%i)
timelist=[1,2,3]
print('有点累了,需要休息一下啦(¬㉨¬)')
time.sleep(random.choice(timelist)) #休息1-3秒,防止给对方服务器过大的压力!!!
url='https://wh.ziroom.com/z/p%s/'%i
headers = {'User-Agent': random.choice(user_agent)}
r = requests.get(url, headers=headers)
r.encoding = r.apparent_encoding
soup = BeautifulSoup(r.text, 'lxml')
all_info = soup.find_all('div', class_='info-box')
print('开始干活咯(๑><๑)')
for info in all_info:
href = info.find('a')
if href !=None:
href='https:'+href['href']
try:
print('正在爬取%s'%href)
house_info=get_house_info(href)
writer.writerow(house_info)
except:
print('出错啦,%s进不去啦( •̥́ ˍ •̀ू )'%href)
def get_house_info(href):
#得到房屋的信息
time.sleep(1)
headers = {'User-Agent': random.choice(user_agent)}
response = requests.get(url=href, headers=headers)
response=response.content.decode('utf-8', 'ignore')
soup = BeautifulSoup(response, 'lxml')
name = soup.find('h1', class_='Z_name').text
sinfo=soup.find('div', class_='Z_home_b clearfix').find_all('dd')
area=sinfo[0].text
orien=sinfo[1].text
area_type=sinfo[2].text
dinfo=soup.find('ul',class_='Z_home_o').find_all('li')
location=dinfo[0].find('span',class_='va').text
loucen=dinfo[1].find('span',class_='va').text
dianti=dinfo[2].find('span',class_='va').text
niandai=dinfo[3].find('span',class_='va').text
mensuo=dinfo[4].find('span',class_='va').text
lvhua=dinfo[5].find('span',class_='va').text
['名称','面积','朝向','户型','位置','楼层','是否有电梯','建成时间',' 门锁','绿化']
room_info=[name,area,orien,area_type,location,loucen,dianti,niandai,mensuo,lvhua]
return room_info
if __name__ == '__main__':
get_info()