python爬虫_xpath解析_案例_ZHOU125disorder_

xpath爬取58同城二手房

# 导入requests模块和etree模块
import requests
from lxml import etree
if __name__ == '__main__':
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
    }
    # 或取到源码数据
    # 指定url
    url = 'https://jingzhou.58.com/ershoufang/?utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d100000-00d9-7ff4-c206-49eb36b0df99&ClickID=4'
    page_text = requests.get(url=url, headers=headers).text
    # 数据解析
    tree = etree.HTML(page_text)
    place_list = tree.xpath('//div[@class="property"]')
    print(place_list)
    fp = open('58同城.txt', 'w', encoding='utf-8')
    for place in place_list:
        title = place.xpath('.//div[@class="property-content-title"]/h3/text()')[0]    # .代表该标签
        fp.write(title+'\n')

爬取小姐姐图片

# 导入requests模块和etree模块
import requests
import os
from lxml import etree
if __name__ == '__main__':
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
    }
    # 获取到源码数据
    # 指定url/
    # 4k动漫
    url = 'http://pic.netbian.com/4kdongman/'
    page_text = requests.get(url=url, headers=headers).text

    # img_name出现乱码  给相应数据进行编码
    # response = requests.get(url=url, headers=headers)
    # 手动设置相应数据的编码格式
    # response.encoding = 'utf-8'     # 给相应数据进行编码
    # page_text = response.text     print(img_name, img_src)    结果依旧没有改变

    # 数据解析
    tree = etree.HTML(page_text)
    img_list = tree.xpath('//div[@class="slist"]/ul/li')

    if not os.path.exists('./小姐姐'):
        os.mkdir('./小姐姐')
    for img in img_list:
        img_src = 'http://pic.netbian.com'+img.xpath('./a/img/@src')[0]
        img_name = img.xpath('./a/img/@alt')[0]+'.jpg'
        img_name = img_name.encode('iso-8859-1').decode('gbk')
        print(img_name, img_src)
        # 进行持久化存储
        img_data = requests.get(url=img_src, headers=headers).content
        img_path = '小姐姐/'+img_name
        with open(img_path, 'wb') as fp:
            fp.write(img_data)
            print(img_name+'爬取结束')

- 项目需求:解析出所有城市名称https://www.aqistudy.cn/historydata/

import requests
from lxml import etree 
url = 'https://www.aqistudy.cn/historydata/'
headers = {
    'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
}
response = requests.get(url=url,headers=headers)
#获取页面原始编码格式
print(response.encoding)
page_text = response.text
tree = etree.HTML(page_text)
li_list = tree.xpath('//div[@class="bottom"]/ul/li | //div[@class="bottom"]/ul//li')
for li in li_list:
    city_name = li.xpath('./a/text()')[0]
    city_url = 'https://www.aqistudy.cn/historydata/'+li.xpath('./a/@href')[0]
    print(city_name,city_url)
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值