###导入模块
import requests
from lxml import etree
import requests,json
###网址
url="http://top.baidu.com/buzz?b=1&fr=20811"
###模拟浏览器
header={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'}
r = requests.get(url,headers=header)
with open('D:/1.html','wb') as f:
f.write(r.content)
###主函数
def main():
###获取html页面
html=etree.HTML(requests.get(url,headers=header).content)
# 获取内容
title=html.xpath('//a[@class="list-title"]/text()')
# title_text=html.xpath('//a[@class="info-title"]/text()')
# num=html.xpath('//span[@class="num-top"]/text()')
# 获取url连接
href=html.xpath('//a[@class="list-title"]//@href')
top=title[0]
affair=title[1:]
data="";
data+="### top:["+top+"](https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&rsv_idx=1&tn=baidu&wd="+top+"&rsv_pq=cc0b2ae500104a8f&rsv_t=8594gkWHOJpP8vhGnFVfsZhcRYTJV9ElJQ5Nk3qShVzXGX2bDtb6O2Q4F%2BY&rqlang=cn&rsv_enter=1&rsv_dl=tb&rsv_sug3=5&rsv_sug1=4&rsv_sug7=101&rsv_sug2=0&inputT=860&rsv_sug4=1019&rsv_sug=1"+") \n"
#print('{0:<10}\t{1:<40}'.format("top",top))
for i in range(0, len(affair)):
#print("{0:<10}\t{1:{3}<30}\t{2:{3}>20}".format(rank[i],affair[i],view[i],chr(12288)))
#print(href[i])
# data+=(">- "+str(i+1)+" ["+affair[i]+"]("+href[i+1]+") \n")
data+=(">- ["+affair[i]+"](https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&rsv_idx=1&tn=baidu&wd="+affair[i ]+"&rsv_pq=cc0b2ae500104a8f&rsv_t=8594gkWHOJpP8vhGnFVfsZhcRYTJV9ElJQ5Nk3qShVzXGX2bDtb6O2Q4F%2BY&rqlang=cn&rsv_enter=1&rsv_dl=tb&rsv_sug3=5&rsv_sug1=4&rsv_sug7=101&rsv_sug2=0&inputT=860&rsv_sug4=1019&rsv_sug=1"+") \n")
sendinfo_ding(data)
def sendinfo_ding(data):
url = 'url' #你的机器人webhook地址
program = {
"msgtype": "markdown",
"markdown": {
"title":"百度热搜",
"text": ""+data+""
},
}
headers = {'Content-Type': 'application/json'}
f = requests.post(url, data=json.dumps(program), headers=headers)
print(f)
main()