愚公移山日记·12
昨天没有来的及更新,一些非自然不可抗拒因素,哈哈哈
学习进程
今天使用不同的方法去爬取一网站。
xpath方法
import requests
import time
from lxml import etree
from fake_useragent import UserAgent
def get_hrnl(url):
count = 0
while True:
headers = {'user-agnent' :UserAgent().radnom}
response = requests.get(url,headers = headers)
if response.status_code == 200:
response.encoding = 'utf-8'
return response
else:
count += 1
if count ==3:
return
else:
continue
def get_infos(response):
html = etree.HTML(response.text)
luck =html.xpath('//p[@class="txt"]/text()')[0]
return luck
def write_txt(i,info):
with open('luck.text','a+',encoding = 'utf-8') as f:
info = info.strip()
f.write(i+ '\n')
f.write(info + '\n\n' +'xpath')
if __name__ =='__main__':
names = ['Aries', 'Taurus', 'Gemini', 'Cancer', 'Leo',
'Virgo', 'Libra', 'Scorpio', 'Sagittarius',
'Capricorn', 'Aquarius', 'Pisces']
for i in names:
url = 'http://www.d1xz.net/astro/{}/'.format(i)
response = get_html(url)
if response == None:
continue
info = get_infos(response)
write_txt(i,info)
time.sleep(1)
CCS
各种方法都引用requests库获得数据,下面我就不再敲重复的代码,仅仅把不同的方法中间不一样的地方写下来
def get_infos(response):
html = etree.HTML(response.text)
luck = html.cssselect('.xz_det.fr > p.txt')[0].text
return luck
def writer_txt(_,info):
with open('luck.text','a+',encoding = 'utf-8') as f :
info = info.strip()
f.write(_ + '\n')
f.write(info + '\n\n')
bs4
def get_infos(response):
html = BeautifulSoup(response.text,'lxml')
luck = html.find_all('p',class_="txt")[1].text.strip()
return luck
def write_txt(i,info):
with open ('luck.text','a+',encoding = 'utf-8')as f :
info = info.strip()
f.write(i + '\n')
f.write(info + 'bs4方法' + '\n\n')
正则表达式
def get_infos(response):
luck = re.findall('</p><p class="txt">(.*?)</p><ul',
response.text,
re.S)[0]
return luck
def write_txt(_,info):
with open('luck.txt','a+',encoding='utf-8') as f:
info = info.strip()
f.write(_ + '\n')
f.write(info + '\n\n')
好啦今天就到此结束吧