Python爬虫爬取网络小说
1.访问小说目录所在网址,爬取每一章网址。
2.利用爬虫依次访问每一章网址,匹配目的标签内容,下载该内容。
3.C语言处理爬取的文字,替换、删除标签等不需要的内容。
import requests
from bs4 import BeautifulSoup
web_file=open('c.txt','r')
f = open('《太古神王》.txt', 'a',encoding='utf-8',errors='ignore')
i=1
while i<=2062:
single_web=web_file.readline().replace('\n','')
url="https://www.kquanben.com"+single_web
print(url)
header={"user-agent":"Mozilla/5.0"}
data=requests.get(url=url,headers=header)
data.encoding = "gb18030"
html=data.text
#print(html)
soup=BeautifulSoup(html,"lxml")
lis=soup.find(name="div",attrs={"id":"content"})
print(i)
print(f"第{i}章\n",file=f)
print(lis,file = f)
print("\n\n\n",file=f)
i+=1
f.close()
#include<stdio.h>
#include<string.h>
#include<stdlib.h>
int Chinesestrfind(char *A,char*B);//在B中检索A的出现次数并删除//
int replacestr(char *A,char*B);//在B中检索A的出现次数并删除//
int main()
{
FILE*fp1;
FILE*fp2;
fp1=fopen("《太古神王》.txt","r");
fp2=fopen("b.txt","a");
char content_line[10000],to_del[]="</div>";
while(!feof(fp1))
{
fscanf(fp1,"%s",content_line);
printf("%d",Chinesestrfind(to_del,content_line));
fprintf(fp2,"%s\n",content_line);
}
fclose(fp1);
fclose(fp2);
return 0;
}
int replacestr(char *A,char*B)//在B中检索A的出现次数并删除//
{
int lA=strlen(A),lB=strlen(B),count=0,i,j,c,k;
char temp[lA+1];
for(i=0;i+lA<=lB;i++)
{
for(j=0;j<lA;j++)
{
temp[j]=B[i+j];
}
if(strcmp(A,temp)==0)
{
count++;
k=0;
while(k<lA)
{
B[i+k]=32;
k++;
}
}
}
// puts(B);
return count;
}
int Chinesestrfind(char *A,char*B)//在B中检索A的出现次数并删除//
{
int lA=strlen(A),lB=strlen(B),count=0,i,j,c,k;
char temp[lA+1];
for(i=0;i+lA<=lB;i++)
{
for(j=0;j<lA;j++)
{
temp[j]=B[i+j];
}
if(strcmp(A,temp)==0)
{
count++;
k=0;
while(i+lA+k<=lB)
{
B[i+k]=B[i+lA+k];
k++;
}
}
}
// pus(B);
return count;
}