# -*- coding: UTF-8 -*-
import requests,re
from urllib.parse import urljoin
def download(url):
headers = {
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cookie': 'UM_distinctid=17595e9e062487-0c69096726151c-303464-1fa400-17595e9e063254; CNZZDATA1276802308=885000662-1604537933-%7C1604537933; Hm_lvt_bd95c97aa671342f1089573bea384965=1604537935; Hm_lpvt_bd95c97aa671342f1089573bea384965=1604537990'
}
response = requests.request("GET", url, headers=headers)
response.encoding = 'utf-8'
return response.text
base = 'http://www.tvix.cn/caoben/'
def url_join(base,html):
urls = re.findall(r'href="(/\w*/\w*\.html)"', html, re.I)
return {urljoin(base,u) for u in urls} # 通过集合推导式去重
def get_title(article):
return re.findall('<h3>([^a-z]+?)</h3>', article ,re.S)#通过?进行贪婪匹配,一旦出现a~z任意一次则不匹配
content = download(base)
p_url = url_join(base,content)
for u_single in p_url:
s_content = download(u_single)
print(get_title(s_content))