1、xpath的基本使用
from lxml import etree
tree = etree.parse('70_xpath的使用.html')
print(tree)
li = tree.xpath('//ul/li[@id="l1"]/@class')
li_list = tree.xpath('//ul/li[contains(@id,"l")]/text()')
li_list = tree.xpath('//ul/li[starts-with(@id,"c")]/text()')
li_list = tree.xpath('//ul/li[@id="l1" or @id="l2"]/text() ')
print(li_list)
print(len(li_list))
2、获取百度网站的 “百度一下”
import urllib.request
url = 'https://www.baidu.com/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.67 Safari/537.36'
}
request = urllib.request.Request(url=url,headers=headers)
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
from lxml import etree
tree = etree.HTML(content)
result = tree.xpath('//input[@id="su"]/@value')
print(result)
3、站长素材
import os
import urllib.request
from lxml import etree
def create_request(page):
if page == 1:
url = 'https://sc.chinaz.com/tupian/fengjingtupian.html'
else:
url = 'https://sc.chinaz.com/tupian/fengjingtupian_'+str(page)+'.html'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.67 Safari/537.36'
}
request = urllib.request.Request(url=url,headers=headers)
return request
def get_content(request):
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
return content
def down_load(content):
tree = etree.HTML(content)
name_list = tree.xpath('//div[@id="container"]//a/img/@alt')
src_list = tree.xpath('//div[@id="container"]//a/img/@src2')
for i in range(len(name_list)):
name = name_list[i]
src = src_list[i]
url = 'https:'+src
urllib.request.urlretrieve(url=url,filename='./fengjing/'+ name+'.jpg')
if __name__ == '__main__':
start_page = int(input('请输入开始页码:'))
end_page = int(input('请输入结束页码:'))
for page in range(start_page,end_page+1):
request = create_request(page)
content = get_content(request)
down_load(content)
print(len(os.listdir('./fengjing/')))
4、jsonpath基本使用
import json
import jsonpath
obj = json.load(open('073_jsonpath.json','r',encoding='utf-8'))
5、jsonpath解析淘票票实例
import urllib.request
url = 'https://dianying.taobao.com/cityAction.json?activityId&_ksTS=1653230282698_97&jsoncallback=jsonp98&action=cityAction&n_s=new&event_submit_doGetAllRegion=true'
headers = {
'accept': ' text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'accept-language': ' zh-CN,zh;q=0.9',
'cookie': ' cna=4O+LGOddyxgCAWp2iaSwYabo; t=9988905f6e39406f85d1ff46ab7e47ef; cookie2=14ed630db5c6825c1b6fae715eaa2e35; v=0; _tb_token_=78e0a5316ee5b; xlly_s=1; isg=BMTEtqdgej0fxM5xy0_6TS5flUK23ehHbBVWCN5nLQ9SCWDTBuzN14brSaHRESCf; l=eBPK3OElLmkvtDusBO5whurza77OBQAfGsPzaNbMiInca1yl1FG88NCh_Bb9RdtjgtfeXetPSMVCeRhw-Ozd0dlxdgF-1NKDnYvp-; tfstk=cDDhBQi5FXPCf3PiGpwBFNR3Ml8Aal6zoYksQHj0xIdUygDaYs4F_syp6h4Hle55.',
'referer':'https://dianying.taobao.com/',
'sec-ch-ua': ' " Not A;Brand";v="99", "Chromium";v="101", "Google Chrome";v="101"',
'sec-ch-ua-mobile': ' ?0',
'sec-ch-ua-platform': ' "Windows"',
'sec-fetch-dest': ' document',
'sec-fetch-mode': ' navigate',
'sec-fetch-site': ' same-origin',
'sec-fetch-user': ' ?1',
'upgrade-insecure-requests': ' 1',
'user-agent': ' Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.67 Safari/537.36'
}
request = urllib.request.Request(url=url,headers=headers)
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
content = content.split('(')[1].split(')')[0]
with open('123.json', 'w', encoding='utf-8')as fp:
fp.write(content)
import json
import jsonpath
obj = json.load(open('074_jsonpath解析淘票票.json','r',encoding='utf-8'))
city_list = jsonpath.jsonpath(obj,'$..regionName')
print(city_list)
6、bs4的基本使用
from bs4 import BeautifulSoup
soup = BeautifulSoup(open('075_bs4的基本使用.html',encoding='utf-8'),'lxml')
"""
如果标签对象中 只有内容 那么string和get_text()都可以使用
如果标签对象中 除了内容还有标签 那么string就获取不到数据 而get_text()是可以获取数据的
我们一般情况下 推荐使用get_text()
"""
obj = soup.select('#p1')[0]
obj = soup.select('#p1')[0]
print(obj.attrs.get('class'))
print(obj.get('class'))
print(obj['class'])
7、bs4获取xbk数据
import urllib.request
url = 'https://www.starbucks.com.cn/menu/'
response = urllib.request.urlopen(url)
content = response.read().decode('utf-8')
from bs4 import BeautifulSoup
soup = BeautifulSoup(content,'lxml')
name_list = soup.select('ul[class="grid padded-3 product"] strong')
for name in name_list:
print(name.get_text())