Python
import re
import time
import requests
headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'
}
f = open('D://Spyder/WD/novel.txt', 'a+')
def get_links(url):
destination = requests.get(url, headers = headers)
links = re.findall('<li><a href="(.*?)" title', destination.text)
urls = ['http://www.doupoxs.com{}'.format(link) for link in links]
for url in urls:
get_info(url)
print(url+'...Done')
def get_info(url):
destination = requests.get(url, headers = headers)
contents = re.findall('<p>(.*?)</p>', destination.content.decode('utf-8'), re.S)
for content in contents:
f.write(content + '\n')
if __name__ in '__main__':
url = 'http://www.doupoxs.com/doupocangqiong/'
get_links(url)
time.sleep(1)
f.close()
R
# 加载包
library(stringr)
# 定义GetlinkFunc,获取每一章的链接地址
GetlinkFunc <- function(url) {
destination <- readLines(url, encoding = 'UTF-8')
data <- str_extract_all(destination, '<li><a href="/doupocangqiong/[\\d]+.html') %>%
unlist() %>% gsub('<li><a href="', '', .) %>% paste0('http://www.doupoxs.com', .)
}
# 定义GetinfoFunc,获取每一章链接中的正文,str_extract_all将标签名<p>也匹配出来,用gsub去除
GetinfoFunc <- function(url) {
for (i in seq_along(url)) {
destination <- readLines(url[i], encoding = 'UTF-8')
data <- str_extract_all(destination, '<p>(.*?)</p>') %>% unlist() %>% gsub('<p>|</p>', '', .)
print(sprintf('第%d条链接%s抓取成功', i, url[i]), sep = '\n')
write.table(data, row.names = FALSE, col.names = FALSE, sep = '\n', append = TRUE, 'novel.txt')
}
}
# 执行函数(导出txt文件)
url <- 'http://www.doupoxs.com/doupocangqiong/'
link <- GetlinkFunc(url)
novel <- GetinfoFunc(link)