大数据笔记

修改主机名

vi /etc/hosts

vi /etc/sysconfig/network

zookeeper

   wget https://mirrors.tuna.tsinghua.edu.cn/apache/zookeeper/zookeeper-3.5.9/apache-zookeeper-3.5.9-bin.tar.gz
    tar -zxvf apache-zookeeper-3.5.9-bin.tar.gz -C apps/
    mv apache-zookeeper-3.5.9-bin zookeeper

 /etc/profile

export ZOOKEEPER_HOME=/home/muzi/apps/zookeeper
export PATH=$PATH:$JAVA_HOME/bin:$ZOOKEEPER_HOME/bin:$ZOOKEEPER_HOME/conf

zoo.cfg

    cd zookeeper/conf
    cp zoo_sample.cfg zoo.cfg

    dataDir=/root/zkdata
    server.1=192.168.64.145:2888:3888
    server.2=192.168.64.146:2888:3888
    server.3=192.168.64.147:2888:3888

myid

        cd /root
        mkdir zkdata
        echo 1 > zkdata/myid:
        cd /home/muzi/apps
        scp -r zookeeper/ root@192.168.64.146:/home/muzi/apps/
        scp -r zookeeper/ root@192.168.64.147:/home/muzi/apps/

## 146
	mkdir /root/zkdata
	echo 2 > /root/zkdata/myid
## 147
	mkdir /root/zkdata
	echo 3 > /root/zkdata/myid
#!/bin/bash
echo "$1 zkServer"
for i in 192.168.64.145 192.168.64.146 192.168.64.147
do
	ssh $i "source /etc/profile;/home/muzi/apps/zookeeper/bin/zkServer.sh $1"
done

bin/zkCli.sh -server 192.168.64.145:2181,192.168.64.146:2181,192.168.64.147:2181

 redis

安装

yum install gcc
yum install gcc-c++
tar zxvf redis-3.2.5.tar.gz 
cd redis-3.2.5
make
make install

 

 命令

 # 启动

备份redis.conf到/root/Desktop/redis

vi /root/Desktop/redis/redis.conf

        daemonize yes

redis-server /root/Desktop/redis/redis.conf

# 1)

redis-cli # 进入客户端

ping

shutdown # 关闭

quit        #退出

# 2)

redis-cli shutdown #关闭服务端

 redis-3.2.5/src

 ./redis-trib.rb create --replicas 1 192.168.64.145:6379 192.168.64.145:6380 192.168.64.145:6381 192.168.64.145:6382 192.168.64.145:6383 192.168.64.145:6384

redis-cli -c -p 6379

redis-cli -p 6379 

hadoop

安装

tar -zxvf hadoop-2.10.1.tar.gz -C /usr/local

cd /usr/local

mv hadoop-2.10.1/ hadoop/
cd hadoop/etc/hadoop

vi core-site.xml

<configuration>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>file:/home/muzi/hadoop/tmp</value>
        <description>Abase for other temporary directories.</description>
    </property>
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://192.168.64.145:9000</value>
    </property>
</configuration>

vi hdfs-site.xml 

<configuration>
        <property>
                <name>dfs.namenode.secondary.http-address</name>
                <value>muzid1:50090</value>
        </property>
        <property>
                <name>dfs.replication</name>
                <value>2</value>
        </property>
        <property>
                <name>dfs.namenode.name.dir</name>
                <value>file:/usr/local/hadoop/tmp/dfs/name</value>
        </property>
        <property>
                <name>dfs.datanode.data.dir</name>
                <value>file:/usr/local/hadoop/tmp/dfs/data</value>
        </property>
</configuration>

 cp mapred-site.xml.template mapred-site.xml

vi mapred-site.xml

<configuration>
         <property>
                <name>mapreduce.framework.name</name>
                <value>yarn</value>
        </property>
        <property>
                <name>mapreduce.jobhistory.address</name>
                <value>muzid1:10020</value>
        </property>
        <property>
                <name>mapreduce.jobhistory.webapp.address</name>
                <value>muzid1:19888</value>
        </property>
</configuration>

vi yarn-site.xml

<configuration>

<!-- Site specific YARN configuration properties -->
        <property>
                <name>yarn.resourcemanager.hostname</name>
                <value>muzid1</value>
        </property>
        <property>
                <name>yarn.nodemanager.aux-services</name>
                <value>mapreduce_shuffle</value>
        </property>
</configuration>

<!-- 日志聚集功能使能 -->
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>

<!-- 日志保留时间设置7天 -->
<property>
<name>yarn.log-aggregation.retain-seconds</name>
<value>604800</value>
</property>

 

scp -r /usr/local/hadoop/etc/hadoop root@192.168.64.147:/usr/local/hadoop/etc

 scp -r /usr/local/hadoop/etc/hadoop root@192.168.64.146:/usr/local/hadoop/etc

/usr/local/hadoop/bin/hdfs namenode -format

sbin/hadoop-daemon.sh start namenode

sbin/yarn-daemon.sh start resourcemanager

sbin/yarn-daemon.sh start nodemanager

sbin/hadoop-daemon.sh start datanode

sbin/start-all.sh 报错 JAVA_HOME is not set and could not be found.

vim hadoop/etc/hadoop/hdoop-env.sh

 export JAVA_HOME=/home/muzi/apps/jdk1.8.0_221

/etc/profile

export HADOOP=/usr/local/hadoop

export PATH=$PATH:$HADOOP/sbin:$HADOOP/bin
 

source /etc/profile

scp /etc/profile root@192.168.64.146:/etc

export HADOOP_ROOT_LOGGER=DEBUG,console

export HADOOP_ROOT_LOGGER=INFO

/usr/local/hadoop/bin/hadoop fs -ls /

hdfs dfs -mkdir /input

hdfs dfs -put /home/muzi/Desktop/hadooptest/d1.txt /input

hadoop fs -cat /input/d1.txt

 

 

命令 

hdfs dfs -mkdir -p /user/hadoop

hdfs dfs -mkdir /input

hdfs dfs -put etc/hadoop/*.xml /input

hdfs dfs -cat output/*

hadoop fs -get /input/d1.txt

hadoop fs -get /input/d1.txt hadooptest/d2.txt

hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.8.2.jar wordcount input output

hadoop fs -setrep 3 /a.txt 

hadoop fs -mkdir -p /user

hadoop fs -test -e text.txt

echo $?

hadoop fs -put text.txt

hadoop fs -appendToFile local.txt text.txt

hadoop fs -cat text.txt

hadoop fs -copyFromLocal -f local.txt text.txt

hadoop fs -ls -R -h /user

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值