xpath实战1.58同城二手房数据爬取
import requests
# 新版本的python中没有安装lxml,只安装了html包,因此,html按照etree用就行
# 爬取58二手房
import requests
from lxml import etree
if __name__ == "__main__":
url = 'https://www.58.com/ershoufang/'
header = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36'
}
#爬取页面源码数据
response = requests.get(url=url,headers=header).text
#数据解析
tree = etree.HTML(response)
#不要着急写代码,首先到对页面进行分析,可知各种房源信息均在td标签当中,因此,第一步就是获取所有的td标签
#通过页面分析,获取了所有tr标签的对象
tr_list = tree.xpath('//table[@class="tbimg"]//tr')
fp = open('./58.txt','w',encoding='utf-8')
#从上述列表中获取每个列表
for tr in tr_list:
#局部解析 局部解析使用【./】表示从当前的标签开始
title = tr.xpath('./td[@class="t"]/a/text()')[0] #之前获取的是单个列表数据,使用[0]表示获取该列表中的第一个字符串
print(title)
fp.write(title+'\n')
xpath实战2.https://pic.netbian.com/4kmeinv/
# 需求:下载页面的图片数据:https:
import requests
from lxml import etree
import os
if __name__ == "__main__":
url = 'https://pic.netbian.com/4kmeinv/'
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36'
}
#获取页面源码数据,即获取响应数据
page_text = requests.get(url=url,headers=header).text
#解析数据
tree = etree.HTML(page_text)
#创建一个文件夹
if not os.path.exists('./t1'):
os.mkdir('./t1')
pic__list = tree.xpath('//div[@class="slist"]/ul/li')
for pic in pic__list:
pic_name = pic.xpath('./a/img/@alt')[0]+ '.jpg'
#通用处理中文乱码的解决方案:【.encode('ISO-8859-1').decode('gbk')】
pic_name = pic_name.encode('ISO-8859-1').decode('gbk')
picture = pic.xpath('./a/img/@src')[0]
pictures = 'https://pic.netbian.com' + picture
#请求图片,进行持久化存储 当前解析的图片为URL形式[https:
response_data = requests.get(url=pictures,headers=header).content
#将获取的图片,保存到文件夹中
img_path = 't1/' + pic_name
with open(img_path,'wb') as fp:
fp.write(response_data)
print(pic_name,'下载成功咯!!!'+'\n')
xpath实战3.解析所有城市名称:https://www.aqistudy.cn/historydata/
# 实战: 解析所有城市名称https:
import requests
from lxml import etree
if __name__ == "__main__":
url = 'https://www.aqistudy.cn/historydata/'
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36'
}
#获取页面源码数据,即获取响应数据
page_text = requests.get(url=url,headers=header).text
tree = etree.HTML(page_text)
ul_list = tree.xpath('//div[@class="bottom"]/ul/div[2]/li') #获取ul所有文件的列表
all_city_names = []
for ul in ul_list:
city = ul.xpath('./a/text()')[0]
all_city_names.append(city)
print(all_city_names,len(all_city_names))