爬虫项目示例
要求:
1、选取3-5个代表性的新闻网站(比如新浪新闻、网易新闻等,或者某个垂直领域权威性的网站比如经济领域的雪球财经、东方财富等,或者体育领域的腾讯体育、虎扑体育等等)建立爬虫,针对不同网站的新闻页面进行分析,爬取出编码、标题、作者、时间、关键词、摘要、内容、来源等结构化信息,存储在数据库中。
◦2、建立网站提供对爬取内容的分项全文搜索,给出所查关键词的时间热度分析。
◦3、必须采用Node.JS实现网络爬虫
◦4、必须采用Node.JS实现查询网站后端,HTML+JS实现前端(尽量不要使用任何前后端框架)
在实现爬虫项目示例的学习过程中,主要碰到以下几个难点:
1.node.js运行爬虫文件所需的基础代码的获取
这里要先提到npm,我的网速是很慢的,所以每次使用npm都会无法读取或者读取非常缓慢
所以我采用了淘宝镜像
$ npm install -g cnpm --registry=https://registry.npm.taobao.org
采用淘宝镜像,能够顺利获得所需的包,但是获取时使用cnpm替代npm。
$ cnpm install express
$ cnpm install cheerio
$ cnpm install request
2.代码
基础的爬虫代码
在这里插入代码片var source_name = "中国新闻网";
var domain = 'http://www.chinanews.com/';
var myEncoding = "utf-8";
var seedURL = 'http://www.chinanews.com/';
var seedURL_format = "$('a')";
var keywords_format = " $('meta[name=\"keywords\"]').eq(0).attr(\"content\")";
var title_format = "$('title').text()";
var date_format = "$('#pubtime_baidu').text()";
var author_format = "$('#editor_baidu').text()";
var content_format = "$('.left_zw').text()";
var desc_format = " $('meta[name=\"description\"]').eq(0).attr(\"content\")";
var source_format = "$('#source_baidu').text()";
var url_reg = /\/(\d{4})\/(\d{2})-(\d{2})\/(\d{7}).shtml/;
var regExp = /((\d{4}|\d{2})(\-|\/|\.)\d{1,2}\3\d{1,2})|(\d{4}年\d{1,2}月\d{1,2}日)/
var fs = require('fs');
var myRequest = require('request')
var myCheerio = require('cheerio')
var myIconv = require('iconv-lite')
require('date-utils');
//防止网站屏蔽我们的爬虫
var headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.65 Safari/537.36'
}
//request模块异步fetch url
function request(url, callback) {
var options = {
url: url,
encoding: null,
//proxy: 'http://x.x.x.x:8080',
headers: headers,
timeout: 10000 //
}
myRequest(options, callback)
}
request(seedURL, function(err, res, body) {
var html = myIconv.decode(body, myEncoding);
var $ = myCheerio.load(html, { decodeEntities: true });
var seedurl_news;
try {
seedurl_news = eval(seedURL_format);
//console.log(seedurl_news);
} catch (e) { console.log('url列表所处的html块识别出错:' + e) };
seedurl_news.each(function(i, e) {
var myURL = "";
try {
var href = "";
href = $(e).attr("href");
if (href.toLowerCase().indexOf('http://') >= 0) myURL = href;
else if (href.startsWith('//')) myURL = 'http:' + href;
else myURL = seedURL.substr(0, seedURL.lastIndexOf('/') + 1) + href;
} catch (e) { console.log('识别种子页面中的新闻链接出错:' + e) }
if (!url_reg.test(myURL)) return;
//console.log(myURL);
newsGet(myURL);
});
});
function newsGet(myURL) {
request(myURL, function(err, res, body) {
{
var html_news = myIconv.decode(body, myEncoding);
var $ = myCheerio.load(html_news, { decodeEntities: true });
myhtml = html_news;
console.log("转码读取成功:" + myURL);
var fetch = {};
fetch.title = "";
fetch.content = "";
fetch.publish_date = (new Date()).toFormat("YYYY-MM-DD");
fetch.url = myURL;
fetch.source_name = source_name;
fetch.source_encoding = myEncoding;
fetch.crawltime = new Date();
if (keywords_format == "") fetch.keywords = source_name; // eval(keywords_format); //没有关键词就用sourcename
else fetch.keywords = eval(keywords_format);
if (title_format == "") fetch.title = ""
else fetch.title = eval(title_format); /
if (date_format != "") fetch.publish_date = eval(date_format);
console.log('date: ' + fetch.publish_date);
fetch.publish_date = regExp.exec(fetch.publish_date)[0];
fetch.publish_date = fetch.publish_date.replace('年', '-')
fetch.publish_date = fetch.publish_date.replace('月', '-')
fetch.publish_date = fetch.publish_date.replace('日', '')
fetch.publish_date = new Date(fetch.publish_date).toFormat("YYYY-MM-DD");
if (author_format == "") fetch.author = source_name;
else fetch.author = eval(author_format);
if (content_format == "") fetch.content = "";
else fetch.content = eval(content_format).replace("\r\n" + fetch.author, "");
if (source_format == "") fetch.source = fetch.source_name;
else fetch.source = eval(source_format).replace("\r\n", "");
if (desc_format == "") fetch.desc = fetch.title;
else fetch.desc = eval(desc_format).replace("\r\n", "");
var filename = source_name + "_" + (new Date()).toFormat("YYYY-MM-DD") +
"_" + myURL.substr(myURL.lastIndexOf('/') + 1) + ".json";
fs.writeFileSync(filename, JSON.stringify(fetch));
});
}
接着开始尝试进行代码修改,爬取其他网站,举例网易新闻,我们先写url
https://news.163.com/这是网易新闻首页的url。
正则://\d{2}/\d{4}/\d{2}/\w{16}[.]html/
我们可以用^(http://|https://)?((?:[A-Za-z0-9]±[A-Za-z0-9]+|[A-Za-z0-9]+).)+([A-Za-z]+)[/?:]?.*$来判断url正则表达式是否合法(无域名的ip无法使用)
然后我们将 source_name 中替换为“网易新闻”
将domain和seedURL等替换为https://news.163.com/
将regExp替换为//\d{2}/\d{4}/\d{2}/\w{16}[.]html/
var source_name = "网易新闻";
var domain = 'https://news.163.com/';
var myEncoding = "GBK";
var seedURL = 'https://news.163.com/';
var seedURL_format = "$('a')";
var regExp = /\/\d{2}\/\d{4}\/\d{2}\/\w{16}[.]html/;
var title_format = "$('meta[property=\"og:title\"]')eq(0).attr(\"content\")";
var content_format = "$('#endText').text()";
var fs = require('fs');
var myRequest = require('request')
var myCheerio = require('cheerio')
var myIconv = require('iconv-lite')
require('date-utils');
爬取结果
使用fs能得到json文件
var filename = source_name + "_" + (new Date ()).toFormat("YYYY-MM-DD") + "_"
+ myURL.substr(myURL.lastIndexOf('/') + 1) + ".json";
fs.writeFileSync(filename, JSON.stringify(fetch));
接下来是将爬取的内容存入mysql,在bin下创建新数据库。然后键入代码
调用mysql
var mysql = require("mysql");
var pool = mysql.createPool({
host: '127.0.0.1',
user: 'root',
password: 'root',
database: 'crawl'
});
var query = function(sql, sqlparam, callback) {
pool.getConnection(function(err, conn) {
if (err) {
callback(err, null, null);
} else {
conn.query(sql, sqlparam, function(qerr, vals, fields) {
conn.release(); //释放连接
callback(qerr, vals, fields); //事件驱动回调
});
}
});
};
var query_noparam = function(sql, callback) {
pool.getConnection(function(err, conn) {
if (err) {
callback(err, null, null);
} else {
conn.query(sql, function(qerr, vals, fields) {
conn.release(); //释放连接
callback(qerr, vals, fields); //事件驱动回调
});
}
});
};
exports.query = query;
exports.query_noparam = query_noparam;
msyql>select url,title from fetchse;
尝试构建网站并访问mysql实现搜索,cmd下express创建新文件夹search_site并置入mysql.js.
然后cnpm install mysql --save安装包到package.json文件下,
安装后用vscode打开文件夹search_site,修改index文件,
然后html搭建前端
<!DOCTYPE html>
<html>
<header>
<script src="https://cdn.bootcss.com/jquery/3.4.1/jquery.js"></script>
</header>
<body>
<form>
<br> 标题:<input type="text" name="title_text">
<input class="form-submit" type="button" value="查询">
</form>
<div class="cardLayout" style="margin: 10px 0px">
<table width="100%" id="record2"></table>
</div>
<script>
$(document).ready(function() {
$("input:button").click(function() {
$.get('/process_get?title=' + $("input:text").val(), function(data) {
$("#record2").empty();
$("#record2").append('<tr class="cardLayout"><td>url</td><td>source_name</td>' +
'<td>title</td><td>author</td><td>publish_date</td></tr>');
for (let list of data) {
let table = '<tr class="cardLayout"><td>';
Object.values(list).forEach(element => {
table += (element + '</td><td>');
});
$("#record2").append(table + '</td></tr>');
}
});
});
});
</script>
</body>
</html>
因为我们是在bin下创建的,所以最后cmd下node bin/www调试好前端服务器即可进行查询。