java nutch_Java分布式爬虫Nutch教程——导入Nutch工程,执行完整爬取

importjava.util.;importjava.text.;

// Commons Logging importsimportorg.apache.commons.lang.StringUtils;importorg.slf4j.Logger;importorg.slf4j.LoggerFactory;

importorg.apache.hadoop.fs.;importorg.apache.hadoop.conf.;importorg.apache.hadoop.mapred.*;importorg.apache.hadoop.util.Tool;importorg.apache.hadoop.util.ToolRunner;importorg.apache.nutch.parse.ParseSegment;importorg.apache.nutch.indexer.IndexingJob;//import org.apache.nutch.indexer.solr.SolrDeleteDuplicates;importorg.apache.nutch.util.HadoopFSUtil;importorg.apache.nutch.util.NutchConfiguration;importorg.apache.nutch.util.NutchJob;

importorg.apache.nutch.fetcher.Fetcher;

publicclassCrawlextendsConfiguredimplementsTool{publicstaticfinalLoggerLOG=LoggerFactory.getLogger(Crawl.class);

private static String getDate() {

return new SimpleDateFormat("yyyyMMddHHmmss").format

(new Date(System.currentTimeMillis()));

}

/* Perform complete crawling and indexing (to Solr) given a set of root urls and the -solr

parameter respectively. More information and Usage parameters can be found below. */

public static void main(String args[]) throws Exception {

Configuration conf = NutchConfiguration.create();

int res = ToolRunner.run(conf, new Crawl(), args);

System.exit(res);

}

@Override

public int run(String[] args) throws Exception {

/*种子所在文件夹*/

Path rootUrlDir = new Path("/tmp/urls");

/*存储爬取信息的文件夹*/

Path dir = new Path("/tmp","crawl-" + getDate());

int threads = 50;

/*广度遍历时爬取的深度,即广度遍历树的层数*/

int depth = 2;

long topN = 10;

JobConf job = new NutchJob(getConf());

FileSystem fs = FileSystem.get(job);

if (LOG.isInfoEnabled()) {

LOG.info("crawl started in: " + dir);

LOG.info("rootUrlDir = " + rootUrlDir);

LOG.info("threads = " + threads);

LOG.info("depth = " + depth);

if (topN != Long.MAX_VALUE)

LOG.info("topN = " + topN);

}

Path crawlDb = new Path(dir + "/crawldb");

Path linkDb = new Path(dir + "/linkdb");

Path segments = new Path(dir + "/segments");

Path indexes = new Path(dir + "/indexes");

Path index = new Path(dir + "/index");

Path tmpDir = job.getLocalPath("crawl"+Path.SEPARATOR+getDate());

Injector injector = new Injector(getConf());

Generator generator = new Generator(getConf());

Fetcher fetcher = new Fetcher(getConf());

ParseSegment parseSegment = new ParseSegment(getConf());

CrawlDb crawlDbTool = new CrawlDb(getConf());

LinkDb linkDbTool = new LinkDb(getConf());

// initialize crawlDb

injector.inject(crawlDb, rootUrlDir);

int i;

for (i = 0; i < depth; i++) { // generate new segment

Path[] segs = generator.generate(crawlDb, segments, -1, topN, System

.currentTimeMillis());

if (segs == null) {

LOG.info("Stopping at depth=" + i + " - no more URLs to fetch.");

break;

}

fetcher.fetch(segs[0], threads); // fetch it

if (!Fetcher.isParsing(job)) {

parseSegment.parse(segs[0]); // parse it, if needed

}

crawlDbTool.update(crawlDb, segs, true, true); // update crawldb

}

/*

if (i > 0) {

linkDbTool.invert(linkDb, segments, true, true, false); // invert links

if (solrUrl != null) {

// index, dedup & merge

FileStatus[] fstats = fs.listStatus(segments, HadoopFSUtil.getPassDirectoriesFilter(fs));

IndexingJob indexer = new IndexingJob(getConf());

indexer.index(crawlDb, linkDb,

Arrays.asList(HadoopFSUtil.getPaths(fstats)));

SolrDeleteDuplicates dedup = new SolrDeleteDuplicates();

dedup.setConf(getConf());

dedup.dedup(solrUrl);

}

} else {

LOG.warn("No URLs to fetch - check your seed list and URL filters.");

}

*/

if (LOG.isInfoEnabled()) { LOG.info("crawl finished: " + dir); }

return 0;

}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值