nutch近期学习进展

从开始看nutch到现在有3天多的时间了。记录下 算做个总结。

以前写的代码基本全扔了。。习惯了,学习的过程无非就是这样,朋友都觉得我进步挺快,但是我自己还是很急,不会的东西太多太多,总觉得自己是垃圾。最近晚上老是失眠。半夜起来调程序,看代码。自己努力决定自己的未来,等忙过这段时间,我想去参加cfa考试,总不能一直玩程序吧,22岁的了,总该多多规划,做好面对以后的一切机遇与挑战。程序方面准备把nutch代码完全弄OK,然后搞数据挖掘的东西。A~!za~za~! Fighting!不要管自己做的是对是错,总之加油就好了。

主要运行

if [ "$COMMAND" = "crawl" ] ; then
  CLASS=org.apache.nutch.crawl.Crawl
elif [ "$COMMAND" = "inject" ] ; then
  CLASS=org.apache.nutch.crawl.Injector
elif [ "$COMMAND" = "generate" ] ; then
  CLASS=org.apache.nutch.crawl.Generator
elif [ "$COMMAND" = "fetch" ] ; then
  CLASS=org.apache.nutch.fetcher.Fetcher
elif [ "$COMMAND" = "parse" ] ; then
  CLASS=org.apache.nutch.parse.ParseSegment
elif [ "$COMMAND" = "readdb" ] ; then
  CLASS=org.apache.nutch.crawl.CrawlDbReader
elif [ "$COMMAND" = "mergedb" ] ; then
  CLASS=org.apache.nutch.crawl.CrawlDbMerger
elif [ "$COMMAND" = "readlinkdb" ] ; then
  CLASS=org.apache.nutch.crawl.LinkDbReader
elif [ "$COMMAND" = "segread" ] ; then
  CLASS=org.apache.nutch.segment.SegmentReader
elif [ "$COMMAND" = "mergesegs" ] ; then
  CLASS=org.apache.nutch.segment.SegmentMerger
elif [ "$COMMAND" = "updatedb" ] ; then
  CLASS=org.apache.nutch.crawl.CrawlDb
elif [ "$COMMAND" = "invertlinks" ] ; then
  CLASS=org.apache.nutch.crawl.LinkDb
elif [ "$COMMAND" = "mergelinkdb" ] ; then
  CLASS=org.apache.nutch.crawl.LinkDbMerger
elif [ "$COMMAND" = "index" ] ; then
  CLASS=org.apache.nutch.indexer.Indexer
elif [ "$COMMAND" = "dedup" ] ; then
  CLASS=org.apache.nutch.indexer.DeleteDuplicates
elif [ "$COMMAND" = "merge" ] ; then
  CLASS=org.apache.nutch.indexer.IndexMerger
elif [ "$COMMAND" = "plugin" ] ; then
  CLASS=org.apache.nutch.plugin.PluginRepository
elif [ "$COMMAND" = "server" ] ; then
  CLASS='org.apache.nutch.searcher.DistributedSearch$Server' 

可以改成bat文件运行
java -cp ./lib/commons-cli-2.0-SNAPSHOT.jar;./lib/commons-lang-2.1.jar;./lib/commons-logging-1.0.4.jar;./lib/commons-logging-api-1.0.4.jar;./lib/concurrent-1.3.4.jar;./lib/hadoop-0.4.0.jar;./lib/jakarta-oro-2.0.7.jar;./lib/jetty-5.1.4.jar./lib/junit-3.8.1.jar;./lib/lucene-core-1.9.1.jar;./lib/log4j-1.2.13.jar;./lib/lucene-misc-1.9.1.jar;./lib/servlet-api.jar;./lib/taglibs-i18n.jar;./lib/xerces-2_6_2.jar;./lib/xerces-2_6_2-apis.jar;./lib/jetty-ext/ant.jar;./lib/jetty-ext/commons-el.jar;./lib/jetty-ext/jasper-compiler.jar;./lib/jetty-ext/jasper-runtime.jar;./lib/jetty-ext/jsp-api.jar;./lib/pmd-ext/jakarta-oro-2.0.8.jar;./lib/pmd-ext/jaxen-1.1-beta-7.jar;./lib/pmd-ext/pmd-3.6.jar;../nutch-0.8.jar;./.; org.apache.nutch.crawl.Crawl Crawl urls -dir WebDb -depth 5

import java.text.SimpleDateFormat;
import java.util.Date;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.nutch.util.NutchConfiguration;
import org.apache.nutch.util.NutchJob;
import org.apache.nutch.parse.ParseSegment;
import org.apache.nutch.crawl.CrawlDb;
import org.apache.nutch.crawl.Generator;
import org.apache.nutch.crawl.Injector;
import org.apache.nutch.crawl.LinkDb;
import org.apache.nutch.fetcher.Fetcher;
import org.apache.nutch.indexer.DeleteDuplicates;
import org.apache.nutch.indexer.IndexMerger;
import org.apache.nutch.indexer.Indexer;


public class Crawls {
 
   private static String getDate() {
///
           return new SimpleDateFormat("yyyyMMddHHmmss").format(new Date(System.currentTimeMillis()));
      /获取数据的时间
        }
   public static void main(String args[]) throws Exception {
  

      Configuration config=NutchConfiguration.create();
               config.get("/");//取得本地信息
               config.addDefaultResource("crawl-tool.xml");//加入配置文件
      /*
       * 取得爬虫的配置文件
       */        
      JobConf job = new NutchJob(config);
      System.out.println("################################################################");
      System.out.println(job.getJobName());
      //
      config.get("sesrcher.dir");
      //
      //
      String dir="/xtwtx";
      Path crawlDb = new Path(dir + "/crawldb");
      Path linkDb = new Path(dir + "/linkdb");
      Path segments = new Path(dir + "/segments");
      Path indexes = new Path(dir + "/indexes");
      Path index = new Path(dir + "/index");
      System.out.println("################################################################");
      System.out.println(dir+"文件夹建立");
     
      int threads = job.getInt("fetcher.threads.fetch", 100);//初始线称数
      int depth = 20;//初始深度
      int topN = Integer.MAX_VALUE;//网页的数目
     
      FileSystem fs = FileSystem.get(job);//LocalFS
      Path rootUrlDir = new Path("urls");//urls地址
    
      Path tmpDir = job.getLocalPath("crawl"+Path.SEPARATOR+getDate());//临时文件夹
      System.out.println(tmpDir);
     
      new Injector(job).inject(crawlDb, rootUrlDir);//构建一个插入器
      
      for (int i = 0; i < depth; i++) {    //深度
      //生成一个片段
        Path segment = new Generator(job).generate(crawlDb, segments, -1, topN, System.currentTimeMillis());
        new Fetcher(job).fetch(segment, threads, Fetcher.isParsing(job));  // fetch it
        if (!Fetcher.isParsing(job)) {
          new ParseSegment(job).parse(segment);    // parse it, if needed
        }
        new CrawlDb(job).update(crawlDb, segment); // update crawldb
      }
        //网页获取部分到此结束
      new LinkDb(job).invert(linkDb, segments); // invert links
// 以下建立index link
      // index, dedup & merge
      new Indexer(job).index(indexes, crawlDb, linkDb, fs.listPaths(segments));
      new DeleteDuplicates(job).dedup(new Path[] { indexes });
      new IndexMerger(fs, fs.listPaths(indexes), index, tmpDir, job).merge();
 }
}

cygwin其实装不装都OK。只要把所有的bat文件全部改成bat文件就OK。

if [ -n "$1" ]
then
  crawl_dir=$1
  if [ -d $1 ]; then
    echo "error: crawl already exists: '$1'"
    exit 1
  fi
else
  echo "Usage: bin/mergecrawl newcrawl-path crawl1-path crawl2-path, USE ABSOLUTE PATHS"
  exit 1
fi

if [ -n "$2" ]
then
  crawl_1=$2
else
  echo "Usage: bin/mergecrawl newcrawl-path crawl1-path crawl2-path, USE ABSOLUTE PATHS"
  exit 1
fi

if [ -n "$3" ]
then
  crawl_2=$3
else
  echo "Usage: bin/mergecrawl newcrawl-path crawl1-path crawl2-path, USE ABSOLUTE PATHS"
  exit 1
fi


#Sets the path to bin
nutch_dir=`dirname $0`

echo "Creating new crawl in: " $crawl_dir
mkdir $crawl_dir
webdb_dir=$crawl_dir/crawldb
segments_dir=$crawl_dir/segments
linkdb_dir=$crawl_dir/linkdb
index_dir=$crawl_dir/index

echo Merge linkdb
$nutch_dir/nutch mergelinkdb $linkdb_dir $crawl_1/linkdb $crawl_2/linkdb

echo Merge crawldb
$nutch_dir/nutch mergedb $webdb_dir $crawl_1/crawldb $crawl_2/crawldb

echo Merge segments
segments_1=`ls -d $crawl_1/segments/*`
 #echo 1 $segments_1
segments_2=`ls -d $crawl_2/segments/*`
 #echo 2 $segments_2
$nutch_dir/nutch mergesegs $segments_dir $segments_1 $segments_2


# From there, identical to recrawl.sh

echo Update segments
$nutch_dir/nutch invertlinks $linkdb_dir -dir $segments_dir

echo Index segments
new_indexes=$crawl_dir/newindexes
segment=`ls -d $segments_dir/* | tail -1`
$nutch_dir/nutch index $new_indexes $webdb_dir $linkdb_dir $segment

echo De-duplicate indexes
$nutch_dir/nutch dedup $new_indexes

echo Merge indexes
$nutch_dir/nutch merge $index_dir $new_indexes

echo Some stats
$nutch_dir/nutch readdb $webdb_dir -stats

把两次的webdb文件合并。

效果如下:

$ ./merg.sh E:/index/1111 E:/index/WebDb E:/index/xtwtx
Creating new crawl in:  E:/index/1111
Merge linkdb
Merge crawldb
Merge segments
Update segments
Index segments
De-duplicate indexes
Merge indexes
Some stats
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值