JobConf mergeJob = CrawlDb.createJob(getConf(), crawlDb);
//可以看到上一个MP的输出tempDir,就是这个MP的输入
FileInputFormat.addInputPath(mergeJob, tempDir);
mergeJob.setReducerClass(InjectReducer.class);
JobClient.runJob(mergeJob);
CrawlDb.install(mergeJob, crawlDb);
public void configure(JobConf job) {
interval = job.getInt("db.fetch.interval.default", 2592000);
scoreInjected = job.getFloat("db.score.injected", 1.0f);
overwrite = job.getBoolean("db.injector.overwrite", false);
update = job.getBoolean("db.injector.update", false);
}
主要就是过滤和规范化
public void map(Text key, CrawlDatum value, OutputCollector<Text, CrawlDatum> output, Reporter reporter) throws IOException { String url = key.toString(); // https://issues.apache.org/jira/browse/NUTCH-1101 check status first, cheaper than normalizing or filtering if (url404Purging && CrawlDatum.STATUS_DB_GONE == value.getStatus()) { url = null; } if (urlNormalizers) { try { url = normalizers.normalize(url, scope); // normalize the url } catch (Exception e) { LOG.warn("Skipping " + url + ":" + e); url = null; } } if (url != null && urlFiltering) { try { url = filters.filter(url); // filter the url } catch (Exception e) { LOG.warn("Skipping " + url + ":" + e); url = null; } } if (url != null) { // if it passes newKey.set(url); // collect it output.collect(newKey, value); } }
public static JobConf createJob(Configuration config, Path crawlDb) throws IOException { //生成输出目录 Path newCrawlDb = new Path(crawlDb, Integer.toString(new Random().nextInt(Integer.MAX_VALUE))); JobConf job = new NutchJob(config); job.setJobName("crawldb " + crawlDb); Path current = new Path(crawlDb, CURRENT_NAME); if (FileSystem.get(job).exists(current)) { FileInputFormat.addInputPath(job, current); } job.setInputFormat(SequenceFileInputFormat.class); job.setMapperClass(CrawlDbFilter.class); job.setReducerClass(CrawlDbReducer.class); FileOutputFormat.setOutputPath(job, newCrawlDb); job.setOutputFormat(MapFileOutputFormat.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(CrawlDatum.class); // https://issues.apache.org/jira/browse/NUTCH-1110 //hadoop的配置,是否生成job运行成功文件 job.setBoolean("mapreduce.fileoutputcommitter.marksuccessfuljobs", false); return job; }
public void reduce(Text key, Iterator<CrawlDatum> values, OutputCollector<Text, CrawlDatum> output, Reporter reporter) throws IOException { boolean oldSet = false; boolean injectedSet = false; while (values.hasNext()) { CrawlDatum val = values.next(); //判断url是新的还是已抓取过的 if (val.getStatus() == CrawlDatum.STATUS_INJECTED) { injected.set(val); injected.setStatus(CrawlDatum.STATUS_DB_UNFETCHED); injectedSet = true; } else { old.set(val); oldSet = true; } } CrawlDatum res = null; /** * Whether to overwrite, ignore or update existing records * @see https://issues.apache.org/jira/browse/NUTCH-1405 */ // Injected record already exists and overwrite but not update //存在,重写,但不更新 if (injectedSet && oldSet && overwrite) { res = injected; if (update) { LOG.info(key.toString() + " overwritten with injected record but update was specified."); } } // Injected record already exists and update but not overwrite //存在更新,但不重写的情况 if (injectedSet && oldSet && update && !overwrite) { res = old; old.putAllMetaData(injected); old.setScore(injected.getScore() != scoreInjected ? injected.getScore() : old.getScore()); old.setFetchInterval(injected.getFetchInterval() != interval ? injected.getFetchInterval() : old.getFetchInterval()); } // Old default behaviour if (injectedSet && !oldSet) { res = injected; } else { res = old; } output.collect(key, res); } }
基本就是将之前处理的数据汇总,判断是否对历史数据更新。
public static void install(JobConf job, Path crawlDb) throws IOException { boolean preserveBackup = job.getBoolean("db.preserve.backup", true); Path newCrawlDb = FileOutputFormat.getOutputPath(job); FileSystem fs = new JobClient(job).getFs(); Path old = new Path(crawlDb, "old"); Path current = new Path(crawlDb, CURRENT_NAME); if (fs.exists(current)) { if (fs.exists(old)) fs.delete(old, true); fs.rename(current, old); } fs.mkdirs(crawlDb); fs.rename(newCrawlDb, current); if (!preserveBackup && fs.exists(old)) fs.delete(old, true); Path lock = new Path(crawlDb, LOCK_NAME); LockUtil.removeLockFile(fs, lock); }
CrawlDbFilter主要是对url进行过滤和正规化。
CrawlDbReducer主要是用来聚合相同url(老的与新产生的)的,这东东写得很复杂,下面来分析一下其源代码:
public void reduce(Text key, Iterator<CrawlDatum> values,
OutputCollector<Text, CrawlDatum> output, Reporter reporter)
throws IOException {
CrawlDatum fetch = new CrawlDatum();
CrawlDatum old = new CrawlDatum();
boolean fetchSet = false;
boolean oldSet = false;
byte[] signature = null;
boolean multiple = false; // avoid deep copy when only single value exists
linked.clear();
org.apache.hadoop.io.MapWritable metaFromParse = null;
// 这个循环主要是遍历所有相同url的value(CrawlDatum)值,对old和fetch两个变量进行赋值。
// 和收集其外链接,把它们放入一个按分数排序的优先队列中去
while (values.hasNext()) {
CrawlDatum datum = (CrawlDatum)values.next();
// 判断是否要对CrawlDatum进行深度复制
if (!multiple && values.hasNext()) multiple = true;
// 判断CrawlDatum中是否有数据库相关的参数,如STATUS_DB_(UNFETCHED|FETCHED|GONE|REDIR_TEMP|REDIR_PERM|NOTMODIFIED)
if (CrawlDatum.hasDbStatus(datum)) {
if (!oldSet) {
if (multiple) {
old.set(datum);
} else {
// no need for a deep copy - this is the only value
old = datum;
}
oldSet = true;
} else {
// always take the latest version
// 总是得到最新的CrawlDatum版本
if (old.getFetchTime() < datum.getFetchTime()) old.set(datum);
}
continue;
}
// 判断CrawlDatum是否有关抓取的状态,如STATUS_FETCH_(SUCCESS|RETRY|REDIR_TEMP|REDIR_PERM|GONE|NOTMODIFIED)
if (CrawlDatum.hasFetchStatus(datum)) {
if (!fetchSet) {
if (multiple) {
fetch.set(datum);
} else {
fetch = datum;
}
fetchSet = true;
} else {
// always take the latest version
if (fetch.getFetchTime() < datum.getFetchTime()) fetch.set(datum);
}
continue;
}
// 根据CrawlDatum的状态来收集另一些信息
switch (datum.getStatus()) { // collect other info
// 如果这个CrawlDatum是一个外链接,那放入一个优先队列中,按分数的降序来做
case CrawlDatum.STATUS_LINKED:
CrawlDatum link;
if (multiple) {
link = new CrawlDatum();
link.set(datum);
} else {
link = datum;
}
linked.insert(link);
break;
case CrawlDatum.STATUS_SIGNATURE:
// 得到其唯一ID号
signature = datum.getSignature();
break;
case CrawlDatum.STATUS_PARSE_META:
// 得到其元数据
metaFromParse = datum.getMetaData();
break;
default:
LOG.warn("Unknown status, key: " + key + ", datum: " + datum);
}
}
// copy the content of the queue into a List
// in reversed order
int numLinks = linked.size();
List<CrawlDatum> linkList = new ArrayList<CrawlDatum>(numLinks);
for (int i = numLinks - 1; i >= 0; i--) {
linkList.add(linked.pop());
}
// 如果这个CrawlDatum集合中没有数据库相关的状态(也就是说没有这个url的原始状态)或者配置了不添加外链接,直接返回
// if it doesn't already exist, skip it
if (!oldSet && !additionsAllowed) return;
// if there is no fetched datum, perhaps there is a link
// 如果这个CrawlDatum集合中没有和抓取相关的状态,并且外链接数量要大于0
if (!fetchSet && linkList.size() > 0) {
fetch = linkList.get(0); // 得到第一个外链接
fetchSet = true;
}
// still no new data - record only unchanged old data, if exists, and return
// 如果没有抓取相头的状态,也没有外链接,也就是说这个CrawlDatum是老的,
if (!fetchSet) {
// 判断是否有和数据库相关的状态,有的话就输出,没有的话就直接返回
if (oldSet) {// at this point at least "old" should be present
output.collect(key, old);
} else {
LOG.warn("Missing fetch and old value, signature=" + signature);
}
return;
}
// 下面是用来初始化最新的CrawlDatum版本
if (signature == null) signature = fetch.getSignature();
long prevModifiedTime = oldSet ? old.getModifiedTime() : 0L;
long prevFetchTime = oldSet ? old.getFetchTime() : 0L;
// initialize with the latest version, be it fetch or link
result.set(fetch);
if (oldSet) {
// copy metadata from old, if exists
if (old.getMetaData().size() > 0) {
result.putAllMetaData(old);
// overlay with new, if any
if (fetch.getMetaData().size() > 0)
result.putAllMetaData(fetch);
}
// set the most recent valid value of modifiedTime
if (old.getModifiedTime() > 0 && fetch.getModifiedTime() == 0) {
result.setModifiedTime(old.getModifiedTime());
}
}
下面是用来确定其最新的状态
switch (fetch.getStatus()) { // determine new status
case CrawlDatum.STATUS_LINKED: // it was link
if (oldSet) { // if old exists
result.set(old); // use it
} else {
result = schedule.initializeSchedule((Text)key, result);
result.setStatus(CrawlDatum.STATUS_DB_UNFETCHED);
try {
scfilters.initialScore((Text)key, result);
} catch (ScoringFilterException e) {
if (LOG.isWarnEnabled()) {
LOG.warn("Cannot filter init score for url " + key +
", using default: " + e.getMessage());
}
result.setScore(0.0f);
}
}
break;
case CrawlDatum.STATUS_FETCH_SUCCESS: // succesful fetch
case CrawlDatum.STATUS_FETCH_REDIR_TEMP: // successful fetch, redirected
case CrawlDatum.STATUS_FETCH_REDIR_PERM:
case CrawlDatum.STATUS_FETCH_NOTMODIFIED: // successful fetch, notmodified
// determine the modification status
int modified = FetchSchedule.STATUS_UNKNOWN;
if (fetch.getStatus() == CrawlDatum.STATUS_FETCH_NOTMODIFIED) {
modified = FetchSchedule.STATUS_NOTMODIFIED;
} else {
if (oldSet && old.getSignature() != null && signature != null) {
if (SignatureComparator._compare(old.getSignature(), signature) != 0) {
modified = FetchSchedule.STATUS_MODIFIED;
} else {
modified = FetchSchedule.STATUS_NOTMODIFIED;
}
}
}
// set the schedule
result = schedule.setFetchSchedule((Text)key, result, prevFetchTime,
prevModifiedTime, fetch.getFetchTime(), fetch.getModifiedTime(), modified);
// set the result status and signature
if (modified == FetchSchedule.STATUS_NOTMODIFIED) {
result.setStatus(CrawlDatum.STATUS_DB_NOTMODIFIED);
if (oldSet) result.setSignature(old.getSignature());
} else {
switch (fetch.getStatus()) {
case CrawlDatum.STATUS_FETCH_SUCCESS:
result.setStatus(CrawlDatum.STATUS_DB_FETCHED);
break;
case CrawlDatum.STATUS_FETCH_REDIR_PERM:
result.setStatus(CrawlDatum.STATUS_DB_REDIR_PERM);
break;
case CrawlDatum.STATUS_FETCH_REDIR_TEMP:
result.setStatus(CrawlDatum.STATUS_DB_REDIR_TEMP);
break;
default:
LOG.warn("Unexpected status: " + fetch.getStatus() + " resetting to old status.");
if (oldSet) result.setStatus(old.getStatus());
else result.setStatus(CrawlDatum.STATUS_DB_UNFETCHED);
}
result.setSignature(signature);
if (metaFromParse != null) {
for (Entry<Writable, Writable> e : metaFromParse.entrySet()) {
result.getMetaData().put(e.getKey(), e.getValue());
}
}
}
// if fetchInterval is larger than the system-wide maximum, trigger
// an unconditional recrawl. This prevents the page to be stuck at
// NOTMODIFIED state, when the old fetched copy was already removed with
// old segments.
if (maxInterval < result.getFetchInterval())
result = schedule.forceRefetch((Text)key, result, false);
break;
case CrawlDatum.STATUS_SIGNATURE:
if (LOG.isWarnEnabled()) {
LOG.warn("Lone CrawlDatum.STATUS_SIGNATURE: " + key);
}
return;
case CrawlDatum.STATUS_FETCH_RETRY: // temporary failure
if (oldSet) {
result.setSignature(old.getSignature()); // use old signature
}
result = schedule.setPageRetrySchedule((Text)key, result, prevFetchTime,
prevModifiedTime, fetch.getFetchTime());
if (result.getRetriesSinceFetch() < retryMax) {
result.setStatus(CrawlDatum.STATUS_DB_UNFETCHED);
} else {
result.setStatus(CrawlDatum.STATUS_DB_GONE);
}
break;
case CrawlDatum.STATUS_FETCH_GONE: // permanent failure
if (oldSet)
result.setSignature(old.getSignature()); // use old signature
result.setStatus(CrawlDatum.STATUS_DB_GONE);
result = schedule.setPageGoneSchedule((Text)key, result, prevFetchTime,
prevModifiedTime, fetch.getFetchTime());
break;
default:
throw new RuntimeException("Unknown status: " + fetch.getStatus() + " " + key);
}
// 这里用来更新result的分数
try {
scfilters.updateDbScore((Text)key, oldSet ? old : null, result, linkList);
} catch (Exception e) {
if (LOG.isWarnEnabled()) {
LOG.warn("Couldn't update score, key=" + key + ": " + e);
}
}
// remove generation time, if any
result.getMetaData().remove(Nutch.WRITABLE_GENERATE_TIME_KEY);
output.collect(key, result); // 写出数据
}
}
其中流程就是对三个目录进行合并,对相同的url的value(CrawlDatum)进行聚合,产生新的CarwlDatum,再写回原来的数据库中。