目录结构
- controller
- spider.js // 封装的spider方法
- index.js // 项目入口
index.js
var cheerio = require("cheerio");
var server = require("./controller/spider");
// var url = "http://v.163.com/special/opencourse/englishs1.html";
var url = 'http://zwbk.com/';
server.fetchData(url, function(data) {
if (data) {
//console.log(data);
var $ = cheerio.load(data);
$("a").each(function(i, e) {
console.log(1111);
});
console.log("done");
} else {
console.log("error");
}
});
复制代码
spider.js
var http = require("http");
var fetchData = function (url, callback) {
http.get(url, function(res) {
var data = "";
res.on('data', function (chunk) {
data += chunk;
});
res.on("end", function() {
callback(data);
});
}).on("error", function() {
callback(null);
});
}
exports.fetchData = fetchData;
复制代码
执行node index.js
,发现只输出了一个done
,猜测可能是做了反爬虫,在网上随便找了一个域名,执行后果然有了输出。为了解决这个问题,我引入了superAgent
来发送请求,superAgent
可以很方便的模拟浏览器的一些属性,例如refer,请求头等...还有个好处是superAgent
可以直接抓取https
的页面.
现在讲地址替换成https://github.com/azoth1991
,已经可以抓到页面内容了
$("a").each(function(i, e) {
console.log(i,e.attribs.href);
});
0 '#start-of-content'
1 'https://github.com/'
2 '/features'
3 '/features/code-review/'
4 '/features/project-management/'
5 '/features/integrations'
6 '/features/actions'
7 '/features#team-management'
8 '/features#social-coding'
9 '/features#documentation'
10 '/features#code-hosting'
11 '/customer-stories'
12 '/security'
13 '/enterprise'
14 '/explore'
...
复制代码
仓库地址 欢迎star ^_^
待续...