importjava.util.;importjava.text.;
// Commons Logging importsimportorg.apache.commons.lang.StringUtils;importorg.slf4j.Logger;importorg.slf4j.LoggerFactory;
importorg.apache.hadoop.fs.;importorg.apache.hadoop.conf.;importorg.apache.hadoop.mapred.*;importorg.apache.hadoop.util.Tool;importorg.apache.hadoop.util.ToolRunner;importorg.apache.nutch.parse.ParseSegment;importorg.apache.nutch.indexer.IndexingJob;//import org.apache.nutch.indexer.solr.SolrDeleteDuplicates;importorg.apache.nutch.util.HadoopFSUtil;importorg.apache.nutch.util.NutchConfiguration;importorg.apache.nutch.util.NutchJob;
importorg.apache.nutch.fetcher.Fetcher;
publicclassCrawlextendsConfiguredimplementsTool{publicstaticfinalLoggerLOG=LoggerFactory.getLogger(Crawl.class);
private static String getDate() {
return new SimpleDateFormat("yyyyMMddHHmmss").format
(new Date(System.currentTimeMillis()));
}
/* Perform complete crawling and indexing (to Solr) given a set of root urls and the -solr
parameter respectively. More information and Usage parameters can be found below. */
public static void main(String args[]) throws Exception {
Configuration conf = NutchConfiguration.create();
int res = ToolRunner.run(conf, new Crawl(), args);
System.exit(res);
}
@Override
public int run(String[] args) throws Exception {
/*种子所在文件夹*/
Path rootUrlDir = new Path("/tmp/urls");
/*存储爬取信息的文件夹*/
Path dir = new Path("/tmp","crawl-" + getDate());
int threads = 50;
/*广度遍历时爬取的深度,即广度遍历树的层数*/
int depth = 2;
long topN = 10;
JobConf job = new NutchJob(getConf());
FileSystem fs = FileSystem.get(job);
if (LOG.isInfoEnabled()) {
LOG.info("crawl started in: " + dir);
LOG.info("rootUrlDir = " + rootUrlDir);
LOG.info("threads = " + threads);
LOG.info("depth = " + depth);
if (topN != Long.MAX_VALUE)
LOG.info("topN = " + topN);
}
Path crawlDb = new Path(dir + "/crawldb");
Path linkDb = new Path(dir + "/linkdb");
Path segments = new Path(dir + "/segments");
Path indexes = new Path(dir + "/indexes");
Path index = new Path(dir + "/index");
Path tmpDir = job.getLocalPath("crawl"+Path.SEPARATOR+getDate());
Injector injector = new Injector(getConf());
Generator generator = new Generator(getConf());
Fetcher fetcher = new Fetcher(getConf());
ParseSegment parseSegment = new ParseSegment(getConf());
CrawlDb crawlDbTool = new CrawlDb(getConf());
LinkDb linkDbTool = new LinkDb(getConf());
// initialize crawlDb
injector.inject(crawlDb, rootUrlDir);
int i;
for (i = 0; i < depth; i++) { // generate new segment
Path[] segs = generator.generate(crawlDb, segments, -1, topN, System
.currentTimeMillis());
if (segs == null) {
LOG.info("Stopping at depth=" + i + " - no more URLs to fetch.");
break;
}
fetcher.fetch(segs[0], threads); // fetch it
if (!Fetcher.isParsing(job)) {
parseSegment.parse(segs[0]); // parse it, if needed
}
crawlDbTool.update(crawlDb, segs, true, true); // update crawldb
}
/*
if (i > 0) {
linkDbTool.invert(linkDb, segments, true, true, false); // invert links
if (solrUrl != null) {
// index, dedup & merge
FileStatus[] fstats = fs.listStatus(segments, HadoopFSUtil.getPassDirectoriesFilter(fs));
IndexingJob indexer = new IndexingJob(getConf());
indexer.index(crawlDb, linkDb,
Arrays.asList(HadoopFSUtil.getPaths(fstats)));
SolrDeleteDuplicates dedup = new SolrDeleteDuplicates();
dedup.setConf(getConf());
dedup.dedup(solrUrl);
}
} else {
LOG.warn("No URLs to fetch - check your seed list and URL filters.");
}
*/
if (LOG.isInfoEnabled()) { LOG.info("crawl finished: " + dir); }
return 0;
}