【千锋Python2205班10.21笔记-day05-xpath语法和指令系统(一阶段)】

01-岗位数据爬虫

import requests
from re import findall
from json import loads

headers = {
    'cookie': '_uab_collina=166373032503108913870333; guid=f07d710a6c75cd3514a5280294353a54; nsearch=jobarea%3D%26%7C%26ord_field%3D%26%7C%26recentSearch0%3D%26%7C%26recentSearch1%3D%26%7C%26recentSearch2%3D%26%7C%26recentSearch3%3D%26%7C%26recentSearch4%3D%26%7C%26collapse_expansion%3D; search=jobarea%7E%60090200%7C%21ord_field%7E%600%7C%21recentSearch0%7E%60090200%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA00%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA0%A1%FB%A1%FA%CA%FD%BE%DD%B7%D6%CE%F6%A1%FB%A1%FA2%A1%FB%A1%FA1%7C%21recentSearch1%7E%60090200%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA00%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA0%A1%FB%A1%FAhr%A1%FB%A1%FA2%A1%FB%A1%FA1%7C%21recentSearch2%7E%60000000%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA00%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA0%A1%FB%A1%FAhr%A1%FB%A1%FA2%A1%FB%A1%FA1%7C%21recentSearch3%7E%60000000%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA00%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA0%A1%FB%A1%FA%CA%FD%BE%DD%B7%D6%CE%F6%A1%FB%A1%FA2%A1%FB%A1%FA1%7C%21; slife=lowbrowser%3Dnot%26%7C%26; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22f07d710a6c75cd3514a5280294353a54%22%2C%22first_id%22%3A%221835e0e15461639-0a46ef7ec55af38-1a525635-1296000-1835e0e15471a98%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%7D%2C%22identities%22%3A%22eyIkaWRlbnRpdHlfY29va2llX2lkIjoiMTgzNWUwZTE1NDYxNjM5LTBhNDZlZjdlYzU1YWYzOC0xYTUyNTYzNS0xMjk2MDAwLTE4MzVlMGUxNTQ3MWE5OCIsIiRpZGVudGl0eV9sb2dpbl9pZCI6ImYwN2Q3MTBhNmM3NWNkMzUxNGE1MjgwMjk0MzUzYTU0In0%3D%22%2C%22history_login_id%22%3A%7B%22name%22%3A%22%24identity_login_id%22%2C%22value%22%3A%22f07d710a6c75cd3514a5280294353a54%22%7D%2C%22%24device_id%22%3A%221835e0e15461639-0a46ef7ec55af38-1a525635-1296000-1835e0e15471a98%22%7D; privacy=1666315707; partner=51jobhtml5; acw_tc=ac11000116663157236133228e00e1446dda57d4b6ce3992bd018af60342f0; acw_sc__v2=6351f5cb9bec157b8349acb257adf72ca035506c; acw_sc__v3=6351f694c99a8e4e83cf324d4f4f1cfc197bc956; ssxmod_itna=YqUxBDuDgD973eBPGKiQ23TwfG8WYq1eD0e=QGx0vHeGzDAxn40iDtPoNO7ti3bRDxa1YhDumomWEGnR3TKGGara+W2BWeD=xYQDwxYoDUxGtDpxG6YK+DYYkDt4DTD34DYDi2=DBBv/QD7gU//MjM9f+xxBo/tkQSUk4DCE4DbrpBDB==xBQDXwjXIWX4xnRYi7oawAv9SGPtzioe77+YYGGtjpmmtRqPf0D=WAwkX0mmxDfxiKqxD=; ssxmod_itna2=YqUxBDuDgD973eBPGKiQ23TwfG8WYq1eD0e=QDnF8KqDs1YeDL7ibtyS4nbcicjoBK70i5HBKSfRmOM/PCo6vDaBRGTjKmSj++OFLQpgQ4UI9xlRx12OU401nj/azOmuKb4x/1YUwwscnysFPBsXgnskPH8hgeqq97=ED07mB4DLxG7mYD==',
    'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36'
}

url = 'https://search.51job.com/list/090200,000000,0000,00,9,99,%25E6%2595%25B0%25E6%258D%25AE%25E5%2588%2586%25E6%259E%2590,2,1.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='
response = requests.get(url, headers=headers)

# print(response.text)

1.从请求结果中提取json数据

result = findall(r'window.__SEARCH_RESULT__ = (.+?)</script>', response.text)
if result:
    json_data = result[0]

    # print(json_data)

else:
    print('获取数据失败')
    raise ValueError

2.做json解析

data = loads(json_data)
for x in data['engine_jds']:
    name = x['job_name']
    company = x['company_name']
    companytype = x['companytype_text']
    providesalary = x['providesalary_text']
    print(name, providesalary, company, companytype)

02-爬京东评论数据

import requests
from bs4 import BeautifulSoup
from selenium.webdriver import Chrome,ChromeOptions
from time import sleep


# https://search.jd.com/Search?keyword=电脑&wq=电脑&pvid=f7face95232a45289adad6b223e458bd&page=1

# https://search.jd.com/Search?keyword=电脑&wq=电脑&pvid=f7face95232a45289adad6b223e458bd&page=2

# https://search.jd.com/Search?keyword=电脑&wq=电脑&pvid=f7face95232a45289adad6b223e458bd&page=3

def get_one_page_data(page, goods='电脑'):

1.请求指定商品指定页的数据

 url = f'https://search.jd.com/Search?keyword={goods}&wq={goods}&pvid=f7face95232a45289adad6b223e458bd&page={page}'
    headers = {
        'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36'
    }
    response = requests.get(url, headers=headers)

2.解析数据,获取每个商品的详情页地址

    soup = BeautifulSoup(response.text, 'lxml')
    all_a = soup.select('#J_goodsList>ul>li .p-img>a')
    all_url = ['https:'+x.attrs['href'] for x in all_a]

3. 获取每个商品的详情数据

 for x in all_url:
        get_one_goods_info(x)


def get_one_goods_info(url):

打开页面

    b.get(url)

滚动

  b.execute_script('window.scrollBy(0, 500)')
    sleep(1)
    b.execute_script('window.scrollBy(0, 500)')
    sleep(1)

找到商品评价按钮并且点击

    all_li = b.find_elements_by_css_selector('#detail>div>ul>li')
    for x in all_li:
        if x.text.startswith('商品评价'):
            x.click()
            break
    else:
        print('该商品无评价信息!')
        return

解析数据

    sleep(1)
​    soup = BeautifulSoup(b.page_source, 'lxml')
​    all_li = soup.select('.parameter2>li')
​    name = all_li[0].text.split(':')[-1]
​    good_comment = soup.select_one('.filter-list>li:nth-child(5)>a').text
​    print(name, good_comment)


if __name__ == '__main__':
    options = ChromeOptions()
    options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images": 2})
    b = Chrome(options=options)

​    get_one_page_data(1)

03-认识xpath和xml数据

lxml是Python基于xpath做数据解析的工具

from lxml import etree

1.xpath数据解析 -

通过提供标签路径来获取标签(xpath指的就是标签的路径)

1)xpath基本感念

树: 整个html内容或者整个xml内容
节点:树结构中的每个标签(元素)就是一个节点
根节点:树结构中的第一个节点就是根节点(网页对应树的根节点是html标签)
节点内容:双标签的标签内容
节点属性:标签的标签属性

2)路径 -

目标节点在整个树结构中的位置信息

2.xml数据格式

xml和json都是通用的数据格式,可以用于不同编程语言的程序之间进行数据交流。

json更小更快;xml更安全

用json和xml两种数据格式来传输一个班级的信息:

1)json

{
    "name": "Python2205",
    "teacher": {
        "name": "余婷",
        "tel": "110",
        "age": 18
    },
    "students":[
        {"name": "小明", "age": 18, "tel": "120", "gender": "男"},
        {"name": "张三", "age": 22, "tel": "119", "gender": "女"},
        {"name": "老王", "age": 30, "tel": "140", "gender": "男"}
    ]
}

2)xml

<class  name="Python2205">
    <teacher>
        <name>余婷</name>
        <tel isHome="">110</tel>
        <age>18</age>
    </teacher>
    <students>
        <stu></stu>
        <stu></stu>
        <stu></stu>
    </students>
</class>

04-xpath语法

from lxml import etree

1.创建树获取树的根节点

# etree.XML(xml数据)

# etree.HTML(html数据)

root = etree.XML(open('data.xml', encoding='utf-8').read())

2.通过xpath路径获取节点(标签)

节点对象.xpath(路径) - 获取指定路径对应的所有的标签

xpath语法(路径的写法):

1)绝对路径:

不管xpath点前面是哪个标签,绝对路径都是以’/'开头,从根节点开始往后写

staff_names = root.xpath('/supermarket/staffs/staff/name')
print(staff_names)

# 在路径的最后加'/text()'可以获取标签内容

result = root.xpath('/supermarket/staffs/staff/name/text()')
print(result)       # ['张三', '小明', '小花']

# 注意:不断xpath前面是谁去点的,写绝对路径的时候都必须从根节点开始写

goodsList = root.xpath('/supermarket/goodsList')[0]
result = goodsList.xpath('/supermarket/goodsList/goods/price/text()')
print(result)

2)相对路径:

在写路径的时候用’.‘表示当前节点,用’…'表示当前节点的上层节点。谁去点的xpath当前节点就是谁

result = root.xpath('./staffs/staff/name/text()')
print(result)

goodsList = root.xpath('/supermarket/goodsList')[0]
result = goodsList.xpath('./goods/price/text()')
print(result)

# 相对路径中'./'开头的时候,'./'可以不写

result = goodsList.xpath('goods/price/text()')
print(result)

3)全路径:

在写路径的时候用’//'开头,获取标签的时候是在整个树中获取所有满足路径结构的标签

result = root.xpath('//name/text()')
print(result)

result = root.xpath('//goods/name/text()')
print(result)

3. xpath的谓语(条件) -

在路径中需要添加条件的节点后加’[谓语]’

1)和位置相关条件

[N] - 第N个节点
[last()] - 最后一个节点
[last()-N] - [last()-1]: 倒数第2个
[position()>N]、[position()<N]、[position()>=N]、[position()<=N]

result = root.xpath('//staffs/staff[2]/name/text()')
print(result)

result = root.xpath('//staffs/staff[last()]/name/text()')
print(result)

result = root.xpath('//staffs/staff[last()-1]/name/text()')
print(result)

result = root.xpath('//staffs/staff[position()<=2]/name/text()')
print(result)

2)和属性相关条件

[@属性名=值] - 获取指定属性为指定值的标签
[@属性名] - 获取拥有指定属性的标签

result = root.xpath('//goodsList/goods[@tag]/name/text()')
print(result)

result = root.xpath('//goods[@tag="hot"]/name/text()')
print(result)

3)和子标签内容相关条件

[子标签名=值] - 获取指定子标签的标签内容为指定值的标签
[子标签名>值] - 获取指定子标签的标签内容大于指定值的标签

result = root.xpath('//goods[price=3.5]/name/text()')
print(result)

result = root.xpath('//goods[count>=50]/name/text()')
print(result)

4.获取标签内容和标签属性

获取标签内容: 获取标签的路径/text() - 获取路径选中的所有的标签的标签内容
获取标签内容: 获取标签的路径/@属性名 - 获取路径选中的所有的标签的指定属性的值

result = root.xpath('//goods[2]/@tag')
print(result)

goods_names = root.xpath('//goods/name')
for x in goods_names:
    print(x.xpath('./text()')[0])

5.统配符*

在xpath路径中可以用*来代替任何标签或者任何属性

result = root.xpath('//goods[1]/*/text()')
print(result)

result = root.xpath('//*[@class="c1"]/text()')
print(result)

result = root.xpath('//goodsList/goods[3]/@*')
print(result)

6.若干(分支) - |

# 路径1|路径2       -   依次获取|分开的所有路径对应的内容

result = root.xpath('//goods/name/text()|//staff/name/text()')
print(result)

05-xpath解析豆瓣电影

import requests
from lxml import etree

1. 获取网页数据

headers = {
    'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36'
}
response = requests.get('https://movie.douban.com/top250', headers=headers)

2.数据解析

root = etree.HTML(response.text)

方法1:

# all_name = root.xpath('//ol[@class="grid_view"]/li/div/div[@class="info"]/div[1]/a/span[1]/text()')

# scores = root.xpath('//ol[@class="grid_view"]/li/div/div[@class="info"]/div[2]/div/span[@class="rating_num"]/text()')

# print(all_name)

# print(scores)

方法2:

all_div = root.xpath('//ol[@class="grid_view"]/li/div')

for x in all_div:
    name = x.xpath('./div[@class="info"]/div[1]/a/span[1]/text()')[0]
    score = x.xpath('./div[@class="info"]/div[2]/div/span[2]/text()')[0]
    print(name, score)

06-常见的指令操作

常见的指令操作

执行指令的工具: Windows - 命令提示符(cmd) 、Mac - 终端

1.运行python程序: -

运算程序的计算机必须先安装python环境

win: python py文件路径
mac: python3 py文件路径

注意:windows使用python指令执行Python程序的时候必须保证写指令的位置和py文件在同一个盘下面;
如果不在同一个盘,执行指令前先切盘

2.进入文件夹: cd

cd 文件夹相对路径、文件夹绝对路径

注意:如果是windows操作系统,cd操作如果要跨盘需要先切盘,然后再cd
切盘方法:C:、E:、D:

3.查看当前文件夹的内容

​ win: dir
​ Mac:ls

4.用指令创建虚拟环境

第一步:找到一个用来放虚拟环境的文件夹
第二步:通过cd指令进入到存放虚拟环境的文件夹中

第三步:创建虚拟环境
python -m venv 虚拟环境名
python3 -m venv 虚拟环境名

第四步:激活虚拟环境
(mac) source 虚拟环境目录/bin/activate
(windows) 虚拟环境目录\ Scripts\activate.bat

​ 第五步:退出虚拟环境(回到系统环境)
​ deactivate

5.常用pip指令(pip - Python包管理工具)

​ pip list - 查看当前环境已经安装过的所有的第三方库

pip install 第三方库名称 - 下载并且安装指定的第三方库
​ pip install 第三方库名称 -i 镜像地址 - 在指定的镜像地址中下载安装
​ pip install 第三方库名称==版本号 -i 镜像地址

​ pip install 第三方库名称1 第三方库名称2

​ pip freeze > 依赖文件名 - 生成依赖文件
​ pip install -r 依赖文件路径 - 批量安装

​ pip uninstall 第三方库名称 - 卸载指定的第三方库

data

<supermarket>
    <name>永辉超市</name>
    <staffs>
        <staff>
            <name class="c1">张三</name>
            <position>收营员</position>
            <salary>3500</salary>
        </staff>
        <staff>
            <name>小明</name>
            <position class="c1">收营员</position>
            <salary>3800</salary>
        </staff>
        <staff>
            <name class="c1">小花</name>
            <position>导购</position>
            <salary>4500</salary>
        </staff>
        <staff>
            <name>李华</name>
            <position>导购</position>
            <salary>5500</salary>
        </staff>
    </staffs>

    <goodsList>
        <goods>
            <name class="c1">面包</name>
            <price>5.5</price>
            <count>12</count>
        </goods>
        <goods tag="hot">
            <name>泡面</name>
            <price class="c1">3.5</price>
            <count>59</count>
        </goods>
        <goods tag="discount" discount="0.8">
            <name>火腿肠</name>
            <price>1.5</price>
            <count>30</count>
        </goods>
        <goods tag="hot">
            <name>矿泉水</name>
            <price>2</price>
            <count>210</count>
        </goods>
    </goodsList>

</supermarket>
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值