import requests
from bs4 import BeautifulSoup
def get_url():
urls=[] #定义一个列表,用来封装url
i=0
#每一页的url,用列表封装
page=['https://m.biqukk.cc/10/10643/']+[f'https://m.biqukk.cc/10/10643_{i}/' for i in range(1,21)]
for url in page:
headers={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0',
'Referer':'https://m.biqukk.cc/10/10643_2/'
}
r=requests.get(url,headers=headers,timeout=3) #timeout=3:三秒没有回应则跳过这一个循环进行后面的
if r.status_code!=200:
print("error")
continue
soup=BeautifulSoup(r.text,features='html.parser')
links= soup.find_all('div', class_='book_last')[1].find_all('a')
for link in links:
l= ['https://m.biqukk.cc'+link.get('href')] + ['https://m.biqukk.cc'+link.get('href')[:-5]+f'_{i}.html' for i in range(2,4)]
urls.append((l,link.get_text())) #用列表封装,每一个元素是一章的url跟题目
print(i)
i+=1
return urls
def get_text():
f=open("./龙王小说.txt",'a',encoding='utf-8')
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0'
}
i=1
for url1 in get_url():
f.write(str(url1[1])) #写入标题
f.write('\n')
for url in url1[0]:
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, features='html.parser')
contet = soup.find('div', id="chaptercontent", class_='Readarea ReadAjax_content').find_all('br')
f.write(str(contet).replace("<br/>",'').replace('</br>','').replace("笔趣阁阅读网址:m.biqukk.cc",'').replace('<br>','')) #将没用的内容删掉
f.write('\n')
print(i)
i+=1
f.close()
get_text()
爬取图片
import requests
from bs4 import BeautifulSoup
import os
def get_url():
url='https://pic.yesky.com/c/6_25152.shtml'
r=requests.get(url)
r.encoding='gb2312' #html的编码是gb2312
soup=BeautifulSoup(r.text,'html.parser')
links=soup.find('ul',class_='classification_listContent').find_all('img') #抓取每个图片的url
urls=[]
for link in links:
urls.append(link['src'])
return urls
def craw_url():
for url in get_url():
filename=os.path.basename(url) #basename是取出url的最后/后面的数,不带后缀名,用它们来做每个图片的名字
r=requests.get(url)
with open(f"美女图片/{filename}",'wb') as f: #打开文件用于二进制写入
f.write(r.content) #text是我们可以看的懂的内容,content是二进制格式内容
craw_url()