import requests
from lxml import etree
import pymysql
class qinghua():
def __init__(self):
self.headers={
'Referer': 'http://www.ainicr.cn/',
'Cookie':'UM_distinctid=17c96621588508-0006f186e395d8-513c1f42-154ac4-17c96621589136; Hm_lvt_eaa57ca47dacb4ad4f5a257001a3457c=1634662821,1634814827,1634814835,1634814842; BAIDU_SSP_lcr=https://www.baidu.com/link?url=f0jkJpuxWbNMN5o-TVZpgtW51twJHd2gfcZBZml6eF_&wd=&eqid=c67318b700011ae500000003617269fa; PHPSESSID=u2p24cmnb3ai3oa3qpughkqqo5; CNZZDATA1272896529=1159959131-1634602421-https%253A%252F%252Fwww.baidu.com%252F%7C1634888309',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36 Edg/94.0.992.50'
}
self.kuli=pymysql.connect(user='root',password='kobe123456',database='klkl',charset='utf8')
self.cursor=self.kuli.cursor()
def get_data(self,url):
response=requests.get(url,headers=self.headers)
return response.text
# print(response.text)
#解析
def parse_url(self,data):
lebron=etree.HTML(data)
kobe=lebron.xpath('//div[@class="item"]//div/a/@href')
# print(kobe)
return kobe
# 解析内容
def parse_data(self,url):
qinghua_data=self.get_data(url)
# print(qinghua_data)
pllp=etree.HTML(qinghua_data)
kobe1=pllp.xpath('//div[@class="stbody first"]//p/text()|//div[@class="stbody "]//p/text()')
#print(kobe1)
for p in kobe1:
print(p)
self.save_mysql(p)
# 保存数据
def save_mysql(self,qinghua):
sql = 'insert into asas(text) value(%s)'
self.cursor.execute(sql,[qinghua])
self.kuli.commit()
def main(self):
data=self.get_data(url)
kobe=self.parse_url(data)
for i in kobe:
self.parse_data(i)
if __name__ == '__main__':
#url = 'http://www.ainicr.cn/qh/t83.html'
url_list=[
'http://www.ainicr.cn/qh/5987.html',
'http://www.ainicr.cn/qh/t4.html',
'http://www.ainicr.cn/qh/t13.html',
'http://www.ainicr.cn/qh/t8.html',
]
for url in url_list:
abc=qinghua()
abc.main()