2021-03-26

ROOT:Hadoop123
atguigu:atguigu/123456
内存2G,硬盘50G
1. 安装好linux
    /boot 200M
    /swap 2g
    / 剩余    
2. *安装VMTools
3. 关闭防火墙
   sudo systemctl stop firewalld
   sudo systemctl disable firewalld
4. 设置静态IP,改主机名
    编辑vim /etc/sysconfig/network-scripts/ifcfg-eth0(阿里云此处不修改)
    改成
=================================
DEVICE=eth0
TYPE=Ethernet
ONBOOT=yes
BOOTPROTO=static
NAME="eth0"
IPADDR=192.168.5.101
PREFIX=24
GATEWAY=192.168.5.2
DNS1=192.168.5.2
=================================
 编辑vim /etc/sysconfig/network
 改HOSTNAME=那一行   
5. 配置/etc/hosts增加私网IP,在c盘C:\Windows\System32\drivers\etc中hosts文件中增加公网IP(阿里云服务器)
    vim /etc/hosts
=============================
39.101.171.194 172.26.89.117
39.101.177.65  172.26.89.119
39.99.145.17   172.26.89.118
=============================
6. 创建一个一般用户atguigu,给他配置密码
    useradd atguigu
    passwd atguigu   
7. 配置这个用户为/etc/sudoers,          sudo vi /etc/sudoers
    在root    ALL=(ALL)       ALL
    添加atguigu    ALL=(ALL)       NOPASSWD:ALL
    保存时wq!保存   
8. 在/opt目录下创建两个文件夹module和software,并把所有权赋给atguigu
    mkdir /opt/module /opt/software
    chown atguigu:atguigu /opt/module /opt/software
从这里开始要以一般用户登陆
9. 搞一个分发脚本
    cd ~
    vim xsync
    内容如下:
=================================================================
#!/bin/bash
#1. 判断参数个数
if [ $# -lt 1 ]
then
    echo Not Enough Arguement!
    exit;
fi
#2. 遍历集群所有机器
for host in hadoop102 hadoop103 hadoop104
do
    echo ====================    $host    ====================
    #3. 遍历所有目录,挨个发送
    for file in $@
    do
        #4 判断文件是否存在
        if [ -e $file ]
        then
            #5. 获取父目录
            pdir=$(cd -P $(dirname $file); pwd)
            
            #6. 获取当前文件的名称
            fname=$(basename $file)

            
            ssh $host "mkdir -p $pdir"
            rsync -av $pdir/$fname $host:$pdir
            
        else
            echo $file does not exists!
        fi
    done
done
==============================================================
 chmod +x xsync
 mkdir /bin
 sudo cp xsync /bin
 sudo xsync /bin/xsync
10. 配置免密登陆(阿里云免密)
如果有ssh文件需要先删除及可:rm -rf .ssh
   1.生成密钥对       ssh-keygen -t rsa 三次回车     
   2. 发送公钥到本机  ssh-copy-id hadoop102 输入一次密码(如果没有生成authorized_keys文件,则要先cat id_rsa.pub中把公钥拷贝,再vim authorized_keys编辑这个文件并把公钥粘贴过来 )
   3.ssh 登录一下本机  
      ssh hadoop102
      exit
  4.在hadoop103和hadoop104上重复1-3步骤
  5.在hadoop102上将authorized_keys中的AAA........atguigu@hadoop102复制
  6.在hadoop103和hadoop104的zuthorized_keys中将hadoop102上authorized_keys中的AAA........atguigu@hadoop102粘贴并保存 :wq  
  7. 在hadoop102上分别ssh登陆一下所有虚拟机
        ssh hadoop103
        exit
        ssh hadoop104
        exit
  8.root用户及atguigu用户,hadoop103以及hadoop104重复上述1-7步骤即可(如果SSH被拒绝则修改#chmod 700 /home/Hadoop/.ssh
#chmod 644 /home/Hadoop/.ssh/authorized_keys)
11.  解压hadoop以及JDK
        tar -zxvf jdk-8u212-linux-x64.tar.gz -C /opt/module/
        tar -zxvf hadoop-3.1.3.tar.gz -C /opt/module/
 进行分发hadoop及JDK  xsync hadoop-3.1.3/ jdk1.8.0_212/
12.  配置环境变量
*在/etc/profile.d文件夹下新建一个sh文件,内容如下sudo vim /etc/profile.d/my_env.sh
=======================================================
#JAVA_HOME
 JAVA_HOME=/opt/module/jdk1.8.0_212
 PATH=$PATH:$JAVA_HOME/bin
#HADOOP_HOME
HADOOP_HOME=/opt/module/hadoop-3.1.3
PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
export PATH JAVA_HOME HADOOP_HOME
========================================================
source /etc/profile 或是重启xshell窗口,让环境变量生效
sudo xsync /etc/profile.d/my_env.sh
13.cd /opt/module/hadoop-3.1.3/etc/hadoop/
 vim hadoop-env.sh
==================================================================
export JAVA_HOME=/opt/module/jdk1.8.0_212
==================================================================
14 .  cd hadoop-3.1.3/etc/hadoop/
   vim core-site.xml
================================================================================
  <!-- 指定NameNode的地址 -->
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://hadoop102:9820</value>
</property>
<!-- 指定hadoop数据的存储目录 -->
    <property>
        <name>hadoop.data.dir</name>
        <value>/opt/module/hadoop-3.1.3/data</value>
</property>
<!-- 配置该atguigu(superUser)允许通过代理访问的主机节点 -->
    <property>
        <name>hadoop.proxyuser.atguigu.hosts</name>
        <value>*</value>
</property>
<!-- 配置该atguigu(superuser)允许代理的用户所属组 -->
    <property>
        <name>hadoop.proxyuser.atguigu.groups</name>
        <value>*</value>
</property>
<!-- 配置该atguigu(superuser)允许代理的用户-->
    <property>
        <name>hadoop.proxyuser.atguigu.users</name>
        <value>*</value>
    </property>
</configuration>
===================================================================================
vim hdfs-site.xml
===================================================================================
<!-- 指定NameNode数据的存储目录 -->
  <property>
    <name>dfs.namenode.name.dir</name>
    <value>file://${hadoop.data.dir}/name</value>
  </property>
 <!-- 指定Datanode数据的存储目录 -->
  <property>
    <name>dfs.datanode.data.dir</name>
    <value>file://${hadoop.data.dir}/data</value>
  </property>  
   <!-- 指定SecondaryNameNode数据的存储目录 -->
  <property>
    <name>dfs.namenode.checkpoint.dir</name>
    <value>file://${hadoop.data.dir}/namesecondary</value>
  </property>
    <!-- 兼容配置,先跳过 -->
    <property>
    <name>dfs.client.datanode-restart.timeout</name>
    <value>30s</value>
  </property>
 <!-- nn web端访问地址-->
<property>
  <name>dfs.namenode.http-address</name>
  <value>hadoop102:9870</value>
</property>
  <!-- 2nn web端访问地址-->
<property>
    <name>dfs.namenode.secondary.http-address</name>
    <value>hadoop104:9868</value>
  </property>
<!-- 解决hdfs的web页面的操作权限-->
<property>
        <name>dfs.permissions.enabled</name>
        <value>false</value>
</property>
  ===========================================================================
  vim yarn-site.xml
  ===============================================================================
   <!-- 指定mr在yarn上运行是shuffle-->
   <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
</property>
     <!-- 指定ResourceManager的地址-->
    <property>
        <name>yarn.resourcemanager.hostname</name>
        <value>hadoop103</value>
</property>
<!-- 环境变量的继承 -->
    <property>
        <name>yarn.nodemanager.env-whitelist</name>
        <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
    </property>
<!-- 解决MR任务运行过程中出现虚拟内存超标问题 -->
 <property>
    <name>yarn.nodemanager.vmem-check-enabled</name>
    <value>false</value>
  </property>
==============================================================================
vim mapred-site.xml
===================================================================================
<!--指定MapReduce程序运行在Yarn上 -->
  <property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
  </property>
<!-- 历史服务器端地址 -->
<property>
    <name>mapreduce.jobhistory.address</name>
    <value>hadoop102:10020</value>
</property>
<!-- 历史服务器web端地址 -->
<property>
    <name>mapreduce.jobhistory.webapp.address</name>
    <value>hadoop102:19888</value>
</property>
 ======= ==================================================================================
 15. cd /opt/module/hadoop-3.1.3/etc
  xsync hadoop/
 16. 格式化Namenode 在hadoop102
    hdfs    namenode   -format
17.单点启动hdfs:
hdfs --daemon start namenode(在102上启动)
hdfs --daemon start datanode(在102,103,104上启动)
hdfs --daemon start secondarynamenode(在104 上启动)
单点启动yarn:
yarn --daemon start resourcemanager(在103上启动)
yarn --daemon start nodemanager(在102,103,104上启动)
18.sudo vim /opt/module/hadoop-3.1.3/etc/hadoop/workers
================================================================================================
hadoop102
hadoop103
hadoop104
===================================================================================================
xsync /opt/module/hadoop-3.1.3/etc/hadoop/workers
19. 启动hdfs
    start-dfs.sh

20. 在配置了Resourcemanager机器上执行
    在Hadoop103上启动start-yarn.sh

21 关 stop-dfs.sh stop-yarn.sh

如果集群出了问题
    stop-dfs.sh
    stop-yarn.sh
    #三台机器都要执行
    cd hadoop-3.1.3/etc/hadoop/
    rm -rf data logs
    回到16
22 启动历史服务器:mapred --daemon start historyserver(在hadoop102)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
2021-03-26 20:54:33,596 - Model - INFO - Epoch 1 (1/200): 2021-03-26 20:57:40,380 - Model - INFO - Train Instance Accuracy: 0.571037 2021-03-26 20:58:16,623 - Model - INFO - Test Instance Accuracy: 0.718528, Class Accuracy: 0.627357 2021-03-26 20:58:16,623 - Model - INFO - Best Instance Accuracy: 0.718528, Class Accuracy: 0.627357 2021-03-26 20:58:16,623 - Model - INFO - Save model... 2021-03-26 20:58:16,623 - Model - INFO - Saving at log/classification/pointnet2_msg_normals/checkpoints/best_model.pth 2021-03-26 20:58:16,698 - Model - INFO - Epoch 2 (2/200): 2021-03-26 21:01:26,685 - Model - INFO - Train Instance Accuracy: 0.727947 2021-03-26 21:02:03,642 - Model - INFO - Test Instance Accuracy: 0.790858, Class Accuracy: 0.702316 2021-03-26 21:02:03,642 - Model - INFO - Best Instance Accuracy: 0.790858, Class Accuracy: 0.702316 2021-03-26 21:02:03,642 - Model - INFO - Save model... 2021-03-26 21:02:03,643 - Model - INFO - Saving at log/classification/pointnet2_msg_normals/checkpoints/best_model.pth 2021-03-26 21:02:03,746 - Model - INFO - Epoch 3 (3/200): 2021-03-26 21:05:15,349 - Model - INFO - Train Instance Accuracy: 0.781606 2021-03-26 21:05:51,538 - Model - INFO - Test Instance Accuracy: 0.803641, Class Accuracy: 0.738575 2021-03-26 21:05:51,538 - Model - INFO - Best Instance Accuracy: 0.803641, Class Accuracy: 0.738575 2021-03-26 21:05:51,539 - Model - INFO - Save model... 2021-03-26 21:05:51,539 - Model - INFO - Saving at log/classification/pointnet2_msg_normals/checkpoints/best_model.pth 我有类似于这样的一段txt文件,请你帮我写一段代码来可视化这些训练结果
02-06

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值