namenode 加内存的操作实践

1.10.200.91.164停止服务
  hbase-daemon.sh stop master
  hadoop-daemon.sh stop namenode
  hadoop-daemon.sh stop zkfc



2.重启之后,设置10.200.91.164参数
  vim conf/hadoop-env.sh
  hbase-daemon.sh start master
  hadoop-daemon.sh start namenode
  hadoop-daemon.sh start zkfc
 

3.切换namenode
 bin/hdfs haadmin -DFSHAadmin -failover nn1 nn2
   

4.10.100.91.36
sh bin/slave_cmd1_nocheck.sh "hadoop-daemon.sh stop tasktracker"
sh bin/slave_cmd2_nocheck.sh "hadoop-daemon.sh stop tasktracker"
sh bin/slave_cmd3_nocheck.sh "hadoop-daemon.sh stop tasktracker"
  hbase-daemon.sh stop master
  hadoop-daemon.sh stop zkfc
  hadoop-daemon.sh stop namenode
  hadoop-daemon.sh stop jobtracker
nohup sh bin/slave_cmd1_nocheck.sh "mkdir -p /data/hadoop/data1/mapred/local/taskTracker;mkdir -p /data/hadoop/data2/mapred/local/taskTracker;mkdir -p /data/hadoop/data3/mapred/local/taskTracker;mkdir -p /data/hadoop/data4/mapred/local/taskTracker;mkdir -p /data/hadoop/data5/mapred/local/taskTracker;mkdir -p /data/hadoop/data6/mapred/local/taskTracker;mkdir -p /data/hadoop/data7/mapred/local/taskTracker;mkdir -p /data/hadoop/data8/mapred/local/taskTracker;mkdir -p /data/hadoop/data9/mapred/local/taskTracker;mkdir -p /data/hadoop/data10/mapred/local/taskTracker;mkdir -p /data/hadoop/data11/mapred/local/taskTracker;mkdir -p /data/hadoop/data12/mapred/local/taskTracker" > slave_cmd1_20141202_createDir.log 2>&1 &
nohup sh bin/slave_cmd2_nocheck.sh "mkdir -p /data/hadoop/data1/mapred/local/taskTracker;mkdir -p /data/hadoop/data2/mapred/local/taskTracker;mkdir -p /data/hadoop/data3/mapred/local/taskTracker;mkdir -p /data/hadoop/data4/mapred/local/taskTracker;mkdir -p /data/hadoop/data5/mapred/local/taskTracker;mkdir -p /data/hadoop/data6/mapred/local/taskTracker;mkdir -p /data/hadoop/data7/mapred/local/taskTracker;mkdir -p /data/hadoop/data8/mapred/local/taskTracker;mkdir -p /data/hadoop/data9/mapred/local/taskTracker;mkdir -p /data/hadoop/data10/mapred/local/taskTracker;mkdir -p /data/hadoop/data11/mapred/local/taskTracker;mkdir -p /data/hadoop/data12/mapred/local/taskTracker" > slave_cmd2_20141202_createDir.log 2>&1 &
nohup sh bin/slave_cmd3_nocheck.sh "mkdir -p /data/hadoop/data1/mapred/local/taskTracker;mkdir -p /data/hadoop/data2/mapred/local/taskTracker;mkdir -p /data/hadoop/data3/mapred/local/taskTracker;mkdir -p /data/hadoop/data4/mapred/local/taskTracker;mkdir -p /data/hadoop/data5/mapred/local/taskTracker;mkdir -p /data/hadoop/data6/mapred/local/taskTracker;mkdir -p /data/hadoop/data7/mapred/local/taskTracker;mkdir -p /data/hadoop/data8/mapred/local/taskTracker;mkdir -p /data/hadoop/data9/mapred/local/taskTracker;mkdir -p /data/hadoop/data10/mapred/local/taskTracker;mkdir -p /data/hadoop/data11/mapred/local/taskTracker;mkdir -p /data/hadoop/data12/mapred/local/taskTracker" > slave_cmd3_20141202_createDir.log 2>&1 &



#看http://10.200.91.164:60010/master-status ,确保当前结点成为master
5.10.100.91.36关机,重启
  sh bin/slave_cmd.sh "hadoop-daemon.sh stop tasktracker"
  vim conf/hadoop-env.sh
 
  hadoop-daemon.sh start jobtracker
  hbase-daemon.sh start master
  hadoop-daemon.sh start namenode
  hadoop-daemon.sh start zkfc
 
nohup sh bin/slave_cmd1_nocheck.sh "hadoop-daemon.sh start tasktracker" > slave_cmd1_20141202_start_tasktracker.log 2>&1 &
nohup sh bin/slave_cmd2_nocheck.sh "hadoop-daemon.sh start tasktracker" > slave_cmd2_20141202_start_tasktracker.log 2>&1 &
nohup sh bin/slave_cmd3_nocheck.sh "hadoop-daemon.sh start tasktracker" > slave_cmd3_20141202_start_tasktracker.log 2>&1 &

sh bin/remains_cmd.sh "hadoop-daemon.sh stop tasktracker"

sh bin/remains_cmd.sh "mkdir -p /data/hadoop/data1/mapred/local/taskTracker;mkdir -p /data/hadoop/data2/mapred/local/taskTracker;mkdir -p /data/hadoop/data3/mapred/local/taskTracker;mkdir -p /data/hadoop/data4/mapred/local/taskTracker;mkdir -p /data/hadoop/data5/mapred/local/taskTracker;mkdir -p /data/hadoop/data6/mapred/local/taskTracker;mkdir -p /data/hadoop/data7/mapred/local/taskTracker;mkdir -p /data/hadoop/data8/mapred/local/taskTracker;mkdir -p /data/hadoop/data9/mapred/local/taskTracker;mkdir -p /data/hadoop/data10/mapred/local/taskTracker;mkdir -p /data/hadoop/data11/mapred/local/taskTracker;mkdir -p /data/hadoop/data12/mapred/local/taskTracker" > slave_cmd1_20141202_createDir.log 2>&1 &
sh bin/remains_cmd.sh "hadoop-daemon.sh start tasktracker"





conf/hadoop-env.sh改之前的配置
export HADOOP_NAMENODE_OPTS="-Xmx11g -Xms5g -Xmn2g $JVM_OPTS -XX:ErrorFile=$HADOOP_LOG_DIR/nn_error_gc.log -Xloggc:$HADOOP_LOG_DIR/nn_gc.log -XX:HeapDumpPath=$HADOOP_LOG_DIR/nn_error.hprof"
export HADOOP_DATANODE_OPTS="-Xmx2g -Xms2g -Xmn1g  $JVM_OPTS -XX:ErrorFile=$HADOOP_LOG_DIR/dn_error_gc.log -Xloggc:$HADOOP_LOG_DIR/dn_gc.log -XX:HeapDumpPath=$HADOOP_LOG_DIR/dn_error.hprof "

export HADOOP_JOBTRACKER_OPTS="-Xmx5g -Xms4g -Xmn3g  -server -verbose:gc -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:-HeapDumpOnOutOfMemoryError -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=9 -XX:GCLogFileSize=20m -XX:+DisableExplicitGC -XX:+UseCompressedOops -XX:PermSize=160m -XX:MaxPermSize=160m -XX:GCTimeRatio=19 -XX:SoftRefLRUPolicyMSPerMB=0 -XX:SurvivorRatio=2 -XX:MaxTenuringThreshold=300 -XX:+UseFastAccessorMethods -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=0 -XX:+CMSClassUnloadingEnabled -XX:CMSMaxAbortablePrecleanTime=300 -XX:+CMSScavengeBeforeRemark -XX:ErrorFile=$HADOOP_LOG_DIR/jt_error_gc.log -Xloggc:$HADOOP_LOG_DIR/jt_gc.log -XX:HeapDumpPath=$HADOOP_LOG_DIR/jt_error.hprof "

conf/hadoop-env.sh改之后的配置
export HADOOP_NAMENODE_OPTS="-Xmx22g -Xms5g -Xmn4g $JVM_OPTS -XX:ErrorFile=$HADOOP_LOG_DIR/nn_error_gc.log -Xloggc:$HADOOP_LOG_DIR/nn_gc.log -XX:HeapDumpPath=$HADOOP_LOG_DIR/nn_error.hprof"
export HADOOP_DATANODE_OPTS="-Xmx2g -Xms2g -Xmn1g  $JVM_OPTS -XX:ErrorFile=$HADOOP_LOG_DIR/dn_error_gc.log -Xloggc:$HADOOP_LOG_DIR/dn_gc.log -XX:HeapDumpPath=$HADOOP_LOG_DIR/dn_error.hprof "

export HADOOP_JOBTRACKER_OPTS="-Xmx8g -Xms4g -Xmn3g  -server -verbose:gc -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:-HeapDumpOnOutOfMemoryError -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=9 -XX:GCLogFileSize=20m -XX:+DisableExplicitGC -XX:+UseCompressedOops -XX:PermSize=160m -XX:MaxPermSize=160m -XX:GCTimeRatio=19 -XX:SoftRefLRUPolicyMSPerMB=0 -XX:SurvivorRatio=2 -XX:MaxTenuringThreshold=300 -XX:+UseFastAccessorMethods -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=0 -XX:+CMSClassUnloadingEnabled -XX:CMSMaxAbortablePrecleanTime=300 -XX:+CMSScavengeBeforeRemark -XX:ErrorFile=$HADOOP_LOG_DIR/jt_error_gc.log -Xloggc:$HADOOP_LOG_DIR/jt_gc.log -XX:HeapDumpPath=$HADOOP_LOG_DIR/jt_error.hprof "







hadoop   23608  0.4  2.4 2821948 598428 ?      Sl   Sep02 541:35 /usr/local/java/bin/java -XX:OnOutOfMemoryError=kill -9 %p -Xmx1000m -Xmx2g -Xms2g -Xmn1g -server -verbose:gc -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+HeapDumpOnOutOfMemoryError -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=9 -XX:GCLogFileSize=20m -XX:+DisableExplicitGC -XX:+UseCompressedOops -XX:PermSize=160m -XX:MaxPermSize=160m -XX:GCTimeRatio=19 -XX:SoftRefLRUPolicyMSPerMB=0 -XX:SurvivorRatio=2 -XX:MaxTenuringThreshold=1 -XX:+UseFastAccessorMethods -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=0 -XX:+CMSClassUnloadingEnabled -XX:CMSMaxAbortablePrecleanTime=300 -XX:+CMSScavengeBeforeRemark -Xloggc:/usr/local/hbase/logs/thrift_gc.log -Dhbase.log.dir=/usr/local/hbase/logs -Dhbase.log.file=hbase-hadoop-thrift-hadoopNN1.com.log -Dhbase.home.dir=/usr/local/hbase -Dhbase.id.str=hadoop -Dhbase.root.logger=INFO,DRFA -Djava.library.path=/usr/local/hadoop/lib/native/Linux-amd64-64:/usr/local/hbase/lib/native/Linux-amd64-64 -Dhbase.security.logger=INFO,DRFAS -classpath /usr/local/hbase/conf:/usr/local/java/lib/tools.jar:/usr/local/hbase:/usr/local/hbase/hbase-0.94.14.jar:/usr/local/hbase/lib/activation-1.1.jar:/usr/local/hbase/lib/asm-3.1.jar:/usr/local/hbase/lib/avro-1.5.3.jar:/usr/local/hbase/lib/avro-ipc-1.5.3.jar:/usr/local/hbase/lib/commons-beanutils-1.7.0.jar:/usr/local/hbase/lib/commons-beanutils-core-1.8.0.jar:/usr/local/hbase/lib/commons-cli-1.2.jar:/usr/local/hbase/lib/commons-codec-1.4.jar:/usr/local/hbase/lib/commons-collections-3.2.1.jar:/usr/local/hbase/lib/commons-configuration-1.6.jar:/usr/local/hbase/lib/commons-digester-1.8.jar:/usr/local/hbase/lib/commons-el-1.0.jar:/usr/local/hbase/lib/commons-httpclient-3.1.jar:/usr/local/hbase/lib/commons-io-2.1.jar:/usr/local/hbase/lib/commons-lang-2.5.jar:/usr/local/hbase/lib/commons-logging-1.1.1.jar:/usr/local/hbase/lib/commons-math-2.1.jar:/usr/local/hbase/lib/commons-net-1.4.1.jar:/usr/local/hbase/lib/core-3.1.1.jar:/usr/local/hbase/lib/guava-11.0.2.jar:/usr/local/hbase/lib/hadoop-core-1.1.2.jar:/usr/local/hbase/lib/hadoop-hdfs-1.1.2.jar:/usr/local/hbase/lib/high-scale-lib-1.1.1.jar:/usr/local/hbase/lib/httpclient-4.1.2.jar:/usr/local/hbase/lib/httpcore-4.1.3.jar:/usr/local/hbase/lib/jackson-core-asl-1.8.8.jar:/usr/local/hbase/lib/jackson-jaxrs-1.8.8.jar:/usr/local/hbase/lib/jackson-mapper-asl-1.8.8.jar:/usr/local/hbase/lib/jackson-xc-1.8.8.jar:/usr/local/hbase/lib/jamon-runtime-2.3.1.jar:/usr/local/hbase/lib/jaxb-api-2.1.jar:/usr/local/hbase/lib/jaxb-impl-2.2.3-1.jar:/usr/local/hbase/lib/jersey-core-1.8.jar:/usr/local/hbase/lib/jersey-json-1.8.jar:/usr/local/hbase/lib/jersey-server-1.8.jar:/usr/local/hbase/lib/jettison-1.1.jar:/usr/local/hbase/lib/jetty-6.1.26.jar:/usr/local/hbase/lib/jetty-util-6.1.26.jar:/usr/local/hbase/lib/jruby-complete-1.6.5.jar:/usr/local/hbase/lib/jsp-2.1-6.1.14.jar:/usr/local/hbase/lib/jsp-api-2.1-6.1.14.jar:/usr/local/hbase/lib/jsr305-1.3.9.jar:/usr/local/hbase/lib/log4j-1.2.16.jar:/usr/local/hbase/lib/metrics-core-2.1.2.jar:/usr/local/hbase/lib/netty-3.2.4.Final.jar:/usr/local/hbase/lib/phoenix-2.1.1-client.jar:/usr/local/hbase/lib/phoenix-2.1.1.jar:/usr/local/hbase/lib/protobuf-java-2.4.0a.jar:/usr/local/hbase/lib/servlet-api-2.5-6.1.14.jar:/usr/local/hbase/lib/slf4j-api-1.4.3.jar:/usr/local/hbase/lib/slf4j-log4j12-1.4.3.jar:/usr/local/hbase/lib/snappy-java-1.0.3.2.jar:/usr/local/hbase/lib/stax-api-1.0.1.jar:/usr/local/hbase/lib/velocity-1.7.jar:/usr/local/hbase/lib/xmlenc-0.52.jar:/usr/local/hbase/lib/zookeeper-3.4.5.jar:/usr/local/hadoop/conf:/usr/local/hadoop/:/usr/local/hadoop/lib/*:/usr/local/hadoop/* org.apache.hadoop.hbase.thrift.ThriftServer start

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值