国产服务器安装kerberos,hadoop集成kerberos

Kerberos安装

1、查看是否有libcrypto.so库

[root@hadoop01 software]# ln /software/openssl-1.1.1k/libcrypto.so.1.1 /usr/lib64/libcrypto.so.1.1
ln: 无法创建硬链接 '/usr/lib64/libcrypto.so.1.1': File exists
[root@hadoop01 software]# find / -name "libcrypto.so.1.1"
/usr/lib64/libcrypto.so.1.1
/usr/local/openssl/lib/libcrypto.so.1.1
/usr/lib/vmware-tools/lib64/libcrypto.so.1.1
/usr/lib/vmware-tools/lib64/libcrypto.so.1.1/libcrypto.so.1.1
/usr/lib/vmware-tools/lib32/libcrypto.so.1.1
/usr/lib/vmware-tools/lib32/libcrypto.so.1.1/libcrypto.so.1.1
/opt/context/software/bigdata/mysql8018/bin/libcrypto.so.1.1
/opt/context/software/bigdata/mysql8018/lib/libcrypto.so.1.1
/opt/context/software/bigdata/mysql8018/lib/plugin/libcrypto.so.1.1
/opt/context/software/bigdata/mysql8018/lib/plugin/debug/libcrypto.so.1.1
/software/openssl-1.1.1k/libcrypto.so.1.1

如果没有则可以通过自己编译openSSL获取

#下载openssl源码包,该源码也可以在资料中获取,名为:openssl-1.1.1k.tar.gz 
[root@hadoop01 ~]# cd /software && wget https://www.openssl.org/source/openssl-1.1.1k.tar.gz --no-check-certificate 
#安装编译源码所需依赖 
[root@hadoop01 openssl-1.1.1k]# yum -y install gcc+ gcc-c++ zlib-devel 
#解压并编译 
[root@hadoop01 software]# tar -zxvf openssl-1.1.1k.tar.gz 
[root@hadoop01 software]# cd openssl-1.1.1k 
[root@hadoop01 openssl-1.1.1k]# ./config --prefix=/usr/local/openssl --openssldir=/usr/local/openssl shared zlib 
[root@hadoop01 openssl-1.1.1k]# make 
[root@hadoop01 openssl-1.1.1k]# make install
#将编译后的发送给其他节点
scp -r -P2406 /software/openssl-1.1.1k pukka@hadoop02:/software/
#创建软链接
[root@hadoop01 openssl-1.1.1k]# ln /software/openssl-1.1.1k/libcrypto.so.1.1 /usr/lib64/libcrypto.so.1.1

2、kerberos组件安装

#hadoop01安装服务端
yum install -y krb5-workstation krb5-libs
#hadoop01/02/03安装客户端
yum install -y krb5-workstation krb5-libs

3、配置并启动

3.1、配置kdc.conf
[root@hadoop01 ~]# vim /var/kerberos/krb5kdc/kdc.conf
[kdcdefaults]
 kdc_ports = 88
 kdc_tcp_ports = 88
[realms]
 PUKKA.COM = {
  #master_key_type = aes256-cts
  acl_file = /var/kerberos/krb5kdc/kadm5.acl
  dict_file = /usr/share/dict/words
  admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
  max_life = 1d
  max_renewable_life = 7d
  supported_enctypes = aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal camellia256-cts:normal camellia128-cts:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
 }

注意事项:

​ (1)修改realms,修改为PUKKA.COM

​ (2)注意把aes256-cts去掉,JAVA使用aes256-cts验证方式需要安装额外的jar包,所有这里不用

3.2、配置kadm5.acl
[root@hadoop01 ~]# vim /var/kerberos/krb5kdc/kadm5.acl
#修改为以下内容:
*/admin@PUKKA.COM      *

注意事项:将admin@HADOOP.COM修改为admin@PUKKA.COM

3.3、配置krb5.conf
[root@hadoop01 ~]# vim /etc/krb5.conf
#includedir /etc/krb5.conf.d/

[logging]
 default = FILE:/var/log/krb5libs.log
 kdc = FILE:/var/log/krb5kdc.log
 admin_server = FILE:/var/log/kadmind.log

[libdefaults]
 dns_lookup_realm = false
 ticket_lifetime = 24h
 renew_lifetime = 7d
 forwardable = true
 rdns = false
 pkinit_anchors = /etc/pki/tls/certs/ca-bundle.crt
 default_realm = PUKKA.COM
 #default_ccache_name = KEYRING:persistent:%{uid}
 udp_preference_limit = 1
[realms]
 PUKKA.COM = {
  kdc = hadoop01
  admin_server = hadoop01
}

[domain_realm]
# .example.com = EXAMPLE.COM
# example.com = EXAMPLE.COM

注意事项:

​ (1)放开default_realm,并修改为PUKKA.COM,与kdc.conf中配置保持一致

​ (2)修改[realms],修改为PUKKA.COM,与kdc.conf中配置保持一致,kdc和admin_server指向本机主机名

​ (3)udp_preference_limit= 1:禁止使用 udp,可以防止一个 Hadoop 中的错误

​ (4)includedir /etc/krb5.conf.d/注释掉,该目录下配置了default_ccache_name,且使用的是KCM,会导致hadoop使用shell指令时出错

​ (5)default_ccache_name = KEYRING:persistent:%{uid}注释掉,,会导致hadoop使用shell指令时出错

3.4、将krb5.conf同步到其他节点
scp /etc/krb5.conf root@pukka102:/etc/
scp /etc/krb5.conf root@pukka103:/etc/
3.5、hadoop01上生成Kerberos数据库
[root@hadoop01 ~]# kdb5_util create -s
Loading random data
Initializing database '/var/kerberos/krb5kdc/principal' for realm 'HADOOP.COM',
master key name 'K/M@HADOOP.COM'
You will be prompted for the database Master Password.
It is important that you NOT FORGET this password.
Enter KDC database master key: (输入密码)
Re-enter KDC database master key to verify:(确认密码)

密码设置为:Pukka@159357

3.6、启动kerberos服务
#启动krb5kdc
systemctl start krb5kdc
#启动kadmin
systemctl start kadmin
#设置开机自启
systemctl enable krb5kdc
systemctl enable kadmin
3.7、赋予pukka用户启动kerberos权限
chown -R pukka. /var/log/kadmind.log
chown -R pukka. /var/kerberos
[pukka@hadoop01 kerberos]$ kadmin.local
Authenticating as principal pukka/admin@PUKKA.COM with password.
kadmin.local:

hadoop集成kerberos

1、配置所需用户及免密登录

#需要在所有节点上执行
#创建所需用户并赋予分组
groupadd hadoop
useradd hdfs -g hadoop 
useradd yarn -g hadoop 
useradd mapred -g hadoop
usermod -a -G pukka hdfs
usermod -a -G pukka yarn
usermod -a -G pukka mapred
usermod -a -G root hdfs
usermod -a -G root yarn
usermod -a -G root mapred
usermod -a -G hadoop pukka
#所有节点上设置以上用户密码,这里设置为merlewang123456789@Q
passwd hdfs 
passwd yarn 
passwd mapred
#生成ssh秘钥,hdfs、yarn、mapred三个用户均需要执行
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
#将秘钥分发给其他服务器,hdfs、yarn、mapred三个用户均需要执行
ssh-copy-id -p 2406 hadoop01
ssh-copy-id -p 2406 hadoop02
ssh-copy-id -p 2406 hadoop03

配置免密登录遇到的问题

问题描述:

pukka用户可以配置SSH免密,其他用户比如root、hdfs等用户无法配置SSH免密,提示/usr/bin/ssh-copy-id: ERROR: Bad owner or permissions on /etc/ssh/ssh_config.d/05-redhat.conf。该怎么才能为所有用户都配置上SSH免密,尝试修改该配置文件权限,但是没成功

解决办法:

/etc/ssh/ssh_config.d/05-redhat.conf文件只有pukka有访问权限,修改该文件权限后pukka用户SSH报错,故不能修改该文件权限
该文件是/etc/ssh/ssh_config中配置了Include /etc/ssh/ssh_config.d/*.conf,才会去读取该配置文件,配置文件中无实际意义的配置,所以将该配置注释掉后,重启ssh可正常使用
systemctl restart sshd

2、修改本地目录权限

Hadoop中不同的用户对不同目录拥有不同权限,下面是Hadoop官方给出的不同Hadoop相关目录对应的权限,这里需要按照该权限进行设置。

FilesystemPathUser:GroupPermissions配置路径权限
localdfs.namenode.name.dirhdfs:hadoopdrwx------/opt/context/software/bigdata/hadoop-3.2.3/tmp/dfs/name700
localdfs.datanode.data.dirhdfs:hadoopdrwx------/opt/context/software/bigdata/hadoop-3.2.3/tmp/dfs/data700
local$HADOOP_LOG_DIRhdfs:hadoopdrwxrwxr-x/opt/context/software/bigdata/hadoop-3.2.3/logs775
local$YARN_LOG_DIRyarn:hadoopdrwxrwxr-x/opt/context/software/bigdata/hadoop-3.2.3/logs775
localyarn.nodemanager.local-dirsyarn:hadoopdrwxrwxr-x/opt/context/software/bigdata/hadoop-3.2.3/tmp/nm-local-dir775
localyarn.nodemanager.log-dirsyarn:hadoopdrwxrwxr-x/opt/context/software/bigdata/hadoop-3.2.3/logs/userlogs775
localdfs.journalnode.edits.dirhdfs:hadoopdrwx------/opt/context/software/bigdata/hadoop-3.2.3/tmp/dfs/ha/jn700

对于以上目录的解释如下:

  • dfs.namenode.name.dir

该配置项用于指定NameNode的数据存储目录,NameNode将所有的文件、目录和块信息存储在该目录下。该配置项配置在hdfs-site.xml中,默认值为file://hadoop.tmp.dir/dfs/name,{hadoop.tmp.dir}配置于core-site.xml中,默认为/opt/data/hadoop。这里我们需要修改/opt/context/software/bigdata/hadoop-3.2.3/tmp/dfs/name路径的所有者和组为hdfs:hadoop,访问权限为700。

  • dfs.datanode.data.dir

该配置项用于指定DataNode的数据存储目录,DataNode将块数据存储在该目录下。在hdfs-site.xml中配置,默认值为file://${hadoop.tmp.dir}/dfs/data,这里我们需要修改/opt/context/software/bigdata/hadoop-3.2.3/tmp/dfs/data路径的所有者和组为hdfs:hadoop,访问权限为700。

  • HADOOP_LOG_DIR与YARN_LOG_DIR

HADOOP_LOG_DIR为Hadoop各组件的日志目录,YARN_LOG_DIR为YARN各组件的日志目录,两者默认日志路径为HADOOP_HOME/logs。这里我们需要修改/opt/context/software/bigdata/hadoop-3.2.3/logs路径的所有者和组为hdfs:hadoop,访问权限为775。

  • yarn.nodemanager.local-dirs

该配置指定指定NodeManager的本地工作目录。在yarn-site.xml中配置,默认值为file://${hadoop.tmp.dir}/nm-local-dir。这里我们修改/opt/context/software/bigdata/hadoop-3.2.3/tmp/nm-local-dir路径的所有者和组为yarn:hadoop,访问权限为755。

  • yarn.nodemanager.log-dirs

该配置项指定NodeManager的日志目录。在yarn-site.xml中配置,默认值为 HADOOP_LOG_DIR/userlogs。这里我们修改/opt/context/software/bigdata/hadoop-3.2.3/logs/userlogs路径的所有者和组为yarn:hadoop,访问权限为755。

  • dfs.journalnode.edits.dir

该配置项指定JournalNode的edits存储目录。在hdfs-site.xml中配置,默认值为/tmp/hadoop/dfs/journalnode/。这里我们配置的路径为/opt/context/software/bigdata/hadoop-3.2.3/tmp/dfs/ha/jn,所以这里修改该路径的所有者和组为hdfs:hadoop,访问权限为700。

#服务权限时,pukka用户无权限,切到root用户后执行的
#在hadoop01 hadoop02 NameNode节点执行
chown -R hdfs:hadoop /opt/context/software/bigdata/hadoop-3.2.3/tmp/dfs/name
chmod 700 /opt/context/software/bigdata/hadoop-3.2.3/tmp/dfs/name

#在hadoop01-03 DataNode节点执行
chown -R hdfs:hadoop /opt/context/software/bigdata/hadoop-3.2.3/tmp/dfs/data
chmod 700 /opt/context/software/bigdata/hadoop-3.2.3/tmp/dfs/data

#在hadoop01-03所有节点执行
chown hdfs:hadoop /opt/context/software/bigdata/hadoop-3.2.3/logs
chmod 775 /opt/context/software/bigdata/hadoop-3.2.3/logs

#在hadoop01 hadoop02 JournalNode节点执行
chown -R hdfs:hadoop /opt/context/software/bigdata/hadoop-3.2.3/tmp/dfs/ha/jn
chmod 700 /opt/context/software/bigdata/hadoop-3.2.3/tmp/dfs/ha/jn

#在hadoop01-03 NodeManager节点执行
chown -R yarn:hadoop /opt/context/software/bigdata/hadoop-3.2.3/tmp/nm-local-dir
chmod -R 755 /opt/context/software/bigdata/hadoop-3.2.3/tmp/nm-local-dir
chown -R yarn:hadoop /opt/context/software/bigdata/hadoop-3.2.3/logs/userlogs
chmod -R 755 /opt/context/software/bigdata/hadoop-3.2.3/logs/userlogs

3、创建各服务Princial主体

Hadoop配置Kerberos安全认证后,为了让Hadoop集群中的服务能够相互通信并在集群中安全地进行数据交换,需要为每个服务实例配置其Kerberos主体,这样,各个服务实例就能够以其Kerberos主体身份进行身份验证,并获得访问Hadoop集群中的数据和资源所需的授权,Hadoop服务主体格式如下:ServiceName/HostName@REAL。

根据Hadoop集群各个节点服务分布,在Hadoop中创建的Kerbreos服务主体如下:

#在Kerberos服务端hadoop01节点执行如下命令
kadmin.local -q "addprinc -pw Pukka@159357 nn/hadoop01"
kadmin.local -q "addprinc -pw Pukka@159357 nn/hadoop02"
kadmin.local -q "addprinc -pw Pukka@159357 dn/hadoop01"
kadmin.local -q "addprinc -pw Pukka@159357 dn/hadoop02"
kadmin.local -q "addprinc -pw Pukka@159357 dn/hadoop03"
kadmin.local -q "addprinc -pw Pukka@159357 jn/hadoop01"
kadmin.local -q "addprinc -pw Pukka@159357 jn/hadoop02"
kadmin.local -q "addprinc -pw Pukka@159357 rm/hadoop01"
kadmin.local -q "addprinc -pw Pukka@159357 rm/hadoop02"
kadmin.local -q "addprinc -pw Pukka@159357 nm/hadoop01"
kadmin.local -q "addprinc -pw Pukka@159357 nm/hadoop02"
kadmin.local -q "addprinc -pw Pukka@159357 nm/hadoop03"
kadmin.local -q "addprinc -pw Pukka@159357 HTTP/hadoop01"
kadmin.local -q "addprinc -pw Pukka@159357 HTTP/hadoop02"
kadmin.local -q "addprinc -pw Pukka@159357 HTTP/hadoop03"

4、将Hadoop 服务主体写入到 keytab 文件

#hadoop01-03创建keytabs存储路径
mkdir /home/pukka/keytabs
#在Kerberos服务端hadoop01节点上,执行如下命令将Hadoop各服务主体写入到keytab文件。
kadmin.local -q "ktadd -norandkey -kt /home/pukka/keytabs/nn.service.keytab nn/hadoop01@PUKKA.COM"
kadmin.local -q "ktadd -norandkey -kt /home/pukka/keytabs/nn.service.keytab nn/hadoop02@PUKKA.COM"
kadmin.local -q "ktadd -norandkey -kt /home/pukka/keytabs/dn.service.keytab dn/hadoop01@PUKKA.COM"
kadmin.local -q "ktadd -norandkey -kt /home/pukka/keytabs/dn.service.keytab dn/hadoop02@PUKKA.COM"
kadmin.local -q "ktadd -norandkey -kt /home/pukka/keytabs/dn.service.keytab dn/hadoop03@PUKKA.COM"
kadmin.local -q "ktadd -norandkey -kt /home/pukka/keytabs/jn.service.keytab jn/hadoop01@PUKKA.COM"
kadmin.local -q "ktadd -norandkey -kt /home/pukka/keytabs/jn.service.keytab jn/hadoop02@PUKKA.COM"
kadmin.local -q "ktadd -norandkey -kt /home/pukka/keytabs/rm.service.keytab rm/hadoop01@PUKKA.COM"
kadmin.local -q "ktadd -norandkey -kt /home/pukka/keytabs/rm.service.keytab rm/hadoop02@PUKKA.COM"
kadmin.local -q "ktadd -norandkey -kt /home/pukka/keytabs/nm.service.keytab nm/hadoop01@PUKKA.COM"
kadmin.local -q "ktadd -norandkey -kt /home/pukka/keytabs/nm.service.keytab nm/hadoop02@PUKKA.COM"
kadmin.local -q "ktadd -norandkey -kt /home/pukka/keytabs/nm.service.keytab nm/hadoop03@PUKKA.COM"
kadmin.local -q "ktadd -norandkey -kt /home/pukka/keytabs/spnego.service.keytab HTTP/hadoop01@PUKKA.COM"
kadmin.local -q "ktadd -norandkey -kt /home/pukka/keytabs/spnego.service.keytab HTTP/hadoop02@PUKKA.COM"
kadmin.local -q "ktadd -norandkey -kt /home/pukka/keytabs/spnego.service.keytab HTTP/hadoop03@PUKKA.COM"
#将keytab发送到其他服务器
[pukka@hadoop01 keytabs]$ scp -r -P 2406 /home/pukka/keytabs/* pukka@hadoop02:/home/pukka/keytabs/
[pukka@hadoop01 keytabs]$ scp -r -P 2406 /home/pukka/keytabs/* pukka@hadoop03:/home/pukka/keytabs/
#修改keytab 文件权限
chown -R pukka:hadoop /home/pukka/keytabs
chmod 770 /home/pukka/keytabs/* 
chmod 770 /home/pukka/keytabs/

5、修改配置文件

这里分别需要对Hadoop各个节点core-site.xml、hdfs-site.xml、yarn-site.xml配置kerberos安全认证。

#修改hadoop01上的core-site.xml、hdfs-site.xml、yarn-site.xml,并同步给其他节点
cd /opt/context/software/bigdata/hadoop-3.2.3/etc/hadoop
5.1、配置core-site.xml

在文件中追加以下配置

注意:

(1)PUKKA.COM为kerberos设置的域名,按照实际情况修改域名

<!-- 启用Kerberos安全认证 -->
    <property>
      <name>hadoop.security.authentication</name>
      <value>kerberos</value>
    </property>
  
    <!-- 启用Hadoop集群授权管理 -->
    <property>
      <name>hadoop.security.authorization</name>
      <value>true</value>
    </property>

    <!-- 外部系统用户身份映射到Hadoop用户的机制 -->
    <property>
      <name>hadoop.security.auth_to_local.mechanism</name>
      <value>MIT</value>
    </property>

    <!-- Kerberos主体到Hadoop用户的具体映射规则 -->
    <property>
      <name>hadoop.security.auth_to_local</name>
      <value>
        RULE:[2:$1/$2@$0]([ndj]n\/.*@PUKKA\.COM)s/.*/hdfs/
        RULE:[2:$1/$2@$0]([rn]m\/.*@PUKKA\.COM)s/.*/yarn/
        RULE:[2:$1/$2@$0](jhs\/.*@PUKKA\.COM)s/.*/mapred/
        DEFAULT
      </value>
    </property>

解释:

以上关于 "hadoop.security.auth_to_local"配置项中的value解释如下,规则 RULE:2:$1/$2@$0s/./hdfs/ 表示对于 Kerberos 主体中以 nn/、dn/、jn/ 开头的名称,在 EXAMPLE.COM 域中使用正则表达式 . 进行匹配,将其映射为 Hadoop 中的 hdfs 用户名。

5.2、配置hdfs-site.xml

追加如下配置

注意:

(1)_HOST将被替换为运行 Web 服务的实际主机名,无需修改

(2)PUKKA.COM为kerberos域名,按照实际情况修改

(3)keytab路径按照实际情况修改

<!-- 开启访问DataNode数据块需Kerberos认证 -->
    <property>
      <name>dfs.block.access.token.enable</name>
      <value>true</value>
    </property>
  
    <!-- NameNode服务的Kerberos主体 -->
    <property>
      <name>dfs.namenode.kerberos.principal</name>
      <value>nn/_HOST@PUKKA.COM</value>
    </property>
  
    <!-- NameNode服务的keytab密钥文件路径 -->
    <property>
      <name>dfs.namenode.keytab.file</name>
      <value>/home/pukka/keytabs/nn.service.keytab</value>
    </property>
  
    <!-- DataNode服务的Kerberos主体 -->
    <property>
      <name>dfs.datanode.kerberos.principal</name>
      <value>dn/_HOST@PUKKA.COM</value>
    </property>
  
    <!-- DataNode服务的keytab密钥文件路径 -->
    <property>
      <name>dfs.datanode.keytab.file</name>
      <value>/home/pukka/keytabs/dn.service.keytab</value>
    </property>
  
    <!-- JournalNode服务的Kerberos主体 -->
    <property>
      <name>dfs.journalnode.kerberos.principal</name>
      <value>jn/_HOST@PUKKA.COM</value>
    </property>
  
    <!-- JournalNode服务的keytab密钥文件路径 -->
    <property>
      <name>dfs.journalnode.keytab.file</name>
      <value>/home/pukka/keytabs/jn.service.keytab</value>
    </property>
  
    <!-- 配置HDFS支持HTTPS协议 -->
    <property>
      <name>dfs.http.policy</name>
      <value>HTTPS_ONLY</value>
    </property>
  
    <!-- 配置DataNode数据传输保护策略为仅认证模式 -->
    <property>
      <name>dfs.data.transfer.protection</name>
      <value>authentication</value>
    </property>
  
    <!-- HDFS WebUI服务认证主体 -->
    <property>
      <name>dfs.web.authentication.kerberos.principal</name>
      <value>HTTP/_HOST@PUKKA.COM</value>
    </property>
  
    <!-- HDFS WebUI服务keytab密钥文件路径 -->
    <property>
      <name>dfs.web.authentication.kerberos.keytab</name>
      <value>/home/pukka/keytabs/spnego.service.keytab</value>
    </property>
  
    <!-- NameNode WebUI 服务认证主体 -->
    <property>
      <name>dfs.namenode.kerberos.internal.spnego.principal</name>
      <value>HTTP/_HOST@PUKKA.COM</value>
    </property>
  
    <!-- JournalNode WebUI 服务认证主体 -->
    <property>
      <name>dfs.journalnode.kerberos.internal.spnego.principal</name>
      <value>HTTP/_HOST@PUKKA.COM</value>
    </property>

注意:

此外,还需要修改hdfs-site.xml中如下属性为hdfs用户下的rsa私钥文件,否则在节点之间HA切换时不能正常切换。

	<property>
        <name>dfs.ha.fencing.ssh.private-key-files</name>
        <value>/home/hdfs/.ssh/id_rsa</value>
    </property>
5.3、配置yarn-site.xml

追加如下配置

注意:

(1)_HOST将被替换为运行 Web 服务的实际主机名,无需修改

(2)PUKKA.COM为kerberos域名,按照实际情况修改

(3)keytab路径按照实际情况修改

<!-- ResourceManager 服务主体 -->
    <property>
      <name>yarn.resourcemanager.principal</name>
      <value>rm/_HOST@PUKKA.COM</value>
    </property>
  
    <!-- ResourceManager 服务keytab密钥文件 -->
    <property>
      <name>yarn.resourcemanager.keytab</name>
      <value>/home/pukka/keytabs/rm.service.keytab</value>
    </property>
  
    <!-- NodeManager 服务主体-->
    <property>
      <name>yarn.nodemanager.principal</name>
      <value>nm/_HOST@PUKKA.COM</value>
    </property>
  
    <!-- NodeManager 服务keytab密钥文件 -->
    <property>
      <name>yarn.nodemanager.keytab</name>
      <value>/home/pukka/keytabs/nm.service.keytab</value>
    </property>

6、配置hadoop的https访问

官方建议对Hadoop采用HTTPS访问方式保证数据安全传输,防止在数据传输过程中被窃听、篡改或伪造等攻击,提高数据的保密性、完整性和可靠性。实际上不配置不行(好一个建议)。

如下命令使用的是root用户执行,最后将所需结果文件修改为pukka权限

6.1、生成keystore和truststore文件
#1.在hadoop01节点执行如下命令生成私钥和证书文件,密码设置为Pukka@159357
[root@hadoop01 root]# openssl req -new -x509 -keyout /root/hdfs_ca_key -out /root/hdfs_ca_cert -days 36500 -subj '/C=CN/ST=beijing/L=haidian/O=devA/OU=devB/CN=devC'
#2.将私钥和证书文件发送给其他节点
[root@hadoop01 root]# cd /root
[root@hadoop01 root]# scp -r -P 2406 ./hdfs_ca_cert ./hdfs_ca_key hadoop02:/root/
[root@hadoop01 root]# scp -r -P 2406 ./hdfs_ca_cert ./hdfs_ca_key hadoop03:/root/
#3.生成 keystore 文件,所有节点均需执行,密码设置为Pukka@159357
[root@hadoop01 root]# keytool -keystore /root/keystore -alias hadoop01 -genkey -keyalg RSA -dname "CN=hadoop01, OU=dev, O=dev, L=dev, ST=dev, C=CN"

[root@hadoop02 root]# keytool -keystore /root/keystore -alias hadoop02 -genkey -keyalg RSA -dname "CN=hadoop02, OU=dev, O=dev, L=dev, ST=dev, C=CN"

[root@hadoop02 root]# keytool -keystore /root/keystore -alias hadoop02 -genkey -keyalg RSA -dname "CN=hadoop02, OU=dev, O=dev, L=dev, ST=dev, C=CN"
#4.生成 truststore 文件,所有节点均需执行,密码设置为Pukka@159357,是否加入证书输入:y
[root@hadoop01 root]# keytool -keystore /root/truststore -alias CARoot -import -file /root/hdfs_ca_cert
[root@hadoop02 root]# keytool -keystore /root/truststore -alias CARoot -import -file /root/hdfs_ca_cert
[root@hadoop03 root]# keytool -keystore /root/truststore -alias CARoot -import -file /root/hdfs_ca_cert

#5.从 keystore 中导出 cert,所有节点均需执行,密码设置为Pukka@159357
[root@hadoop01 root]# keytool -certreq -alias hadoop01 -keystore /root/keystore -file /root/cert
[root@hadoop02 root]# keytool -certreq -alias hadoop02 -keystore /root/keystore -file /root/cert
[root@hadoop03 root]# keytool -certreq -alias hadoop03 -keystore /root/keystore -file /root/cert

#6.生成自签名证书,所有节点均需执行,密码设置为Pukka@159357
[root@hadoop01 root]# openssl x509 -req -CA /root/hdfs_ca_cert -CAkey /root/hdfs_ca_key -in /root/cert -out /root/cert_signed -days 36500 -CAcreateserial

[root@hadoop02 root]# openssl x509 -req -CA /root/hdfs_ca_cert -CAkey /root/hdfs_ca_key -in /root/cert -out /root/cert_signed -days 36500 -CAcreateserial

[root@hadoop03 root]# openssl x509 -req -CA /root/hdfs_ca_cert -CAkey /root/hdfs_ca_key -in /root/cert -out /root/cert_signed -days 36500 -CAcreateserial

#7.将 CA 证书导入到 keystore,所有节点均需执行,密码设置为Pukka@159357,是否加入证书输入:y
[root@hadoop01 root]# keytool -keystore /root/keystore -alias CARoot -import -file /root/hdfs_ca_cert
[root@hadoop02 root]# keytool -keystore /root/keystore -alias CARoot -import -file /root/hdfs_ca_cert
[root@hadoop03 root]# keytool -keystore /root/keystore -alias CARoot -import -file /root/hdfs_ca_cert

#8.将自签名证书导入到 keystore,所有节点均需执行,密码设置为Pukka@159357
[root@hadoop01 root]# keytool -keystore /root/keystore -alias hadoop01 -import -file /root/cert_signed
[root@hadoop02 root]# keytool -keystore /root/keystore -alias hadoop02 -import -file /root/cert_signed
[root@hadoop03 root]# keytool -keystore /root/keystore -alias hadoop03 -import -file /root/cert_signed

#9.将 keystore和trustores存入目标目录/home/pukka
#hadoop01-03均需执行
cp keystore truststore /home/pukka
chown -R pukka:hadoop /home/pukka/keystore 
chown -R pukka:hadoop /home/pukka/truststore 
chmod 770 /home/pukka/keystore 
chmod 770 /home/pukka/truststore
6.2、配置 ssl-server.xml 文件

ssl-server.xml位于/opt/context/software/bigdata/hadoop-3.2.3/etc/hadoop目录下

cd /opt/context/software/bigdata/hadoop-3.2.3/etc/hadoop
mv ssl-server.xml.example ssl-server.xml
vim ssl-server.xml
#配置的ssl-server.xml文件内容如下:
<configuration>

<property>
  <name>ssl.server.truststore.location</name>
  <value>/home/pukka/truststore</value>
  <description>Truststore to be used by NN and DN. Must be specified.
  </description>
</property>

<property>
  <name>ssl.server.truststore.password</name>
  <value>Pukka@159357</value>
  <description>Optional. Default value is "".
  </description>
</property>

<property>
  <name>ssl.server.truststore.type</name>
  <value>jks</value>
  <description>Optional. The keystore file format, default value is "jks".
  </description>
</property>

<property>
  <name>ssl.server.truststore.reload.interval</name>
  <value>10000</value>
  <description>Truststore reload check interval, in milliseconds.
  Default value is 10000 (10 seconds).
  </description>
</property>

<property>
  <name>ssl.server.keystore.location</name>
  <value>/home/pukka/keystore</value>
  <description>Keystore to be used by NN and DN. Must be specified.
  </description>
</property>

<property>
  <name>ssl.server.keystore.password</name>
  <value>Pukka@159357</value>
  <description>Must be specified.
  </description>
</property>

<property>
  <name>ssl.server.keystore.keypassword</name>
  <value>Pukka@159357</value>
  <description>Must be specified.
  </description>
</property>

<property>
  <name>ssl.server.keystore.type</name>
  <value>jks</value>
  <description>Optional. The keystore file format, default value is "jks".
  </description>
</property>

<property>
  <name>ssl.server.exclude.cipher.list</name>
  <value>TLS_ECDHE_RSA_WITH_RC4_128_SHA,SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
  SSL_RSA_WITH_DES_CBC_SHA,SSL_DHE_RSA_WITH_DES_CBC_SHA,
  SSL_RSA_EXPORT_WITH_RC4_40_MD5,SSL_RSA_EXPORT_WITH_DES40_CBC_SHA,
  SSL_RSA_WITH_RC4_128_MD5</value>
  <description>Optional. The weak security cipher suites that you want excluded
  from SSL communication.</description>
</property>

</configuration>
#将配置文件分发给其他节点
scp -r -P2406 ./ssl-server.xml pukka@hadoop02:$PWD
scp -r -P2406 ./ssl-server.xml pukka@hadoop03:$PWD
6.3、配置ssl-client.xml 文件

ssl-client.xml位于/opt/context/software/bigdata/hadoop-3.2.3/etc/hadoop目录下

cd /opt/context/software/bigdata/hadoop-3.2.3/etc/hadoop
mv ssl-client.xml.example ssl-client.xml
vim ssl-client.xml
#配置的ssl-client.xml文件内容如下:
#node1~node5所有节点执行
cd /software/hadoop-3.3.4/etc/hadoop/
mv ssl-client.xml.example ssl-client.xml

#配置的ssl-client.xml文件内容如下:
<configuration>

<property>
  <name>ssl.client.truststore.location</name>
  <value>/home/pukka/truststore</value>
  <description>Truststore to be used by clients like distcp. Must be
  specified.
  </description>
</property>

<property>
  <name>ssl.client.truststore.password</name>
  <value>Pukka@159357</value>
  <description>Optional. Default value is "".
  </description>
</property>

<property>
  <name>ssl.client.truststore.type</name>
  <value>jks</value>
  <description>Optional. The keystore file format, default value is "jks".
  </description>
</property>

<property>
  <name>ssl.client.truststore.reload.interval</name>
  <value>10000</value>
  <description>Truststore reload check interval, in milliseconds.
  Default value is 10000 (10 seconds).
  </description>
</property>

<property>
  <name>ssl.client.keystore.location</name>
  <value>/home/pukka/keystore</value>
  <description>Keystore to be used by clients like distcp. Must be
  specified.
  </description>
</property>

<property>
  <name>ssl.client.keystore.password</name>
  <value>Pukka@159357</value>
  <description>Optional. Default value is "".
  </description>
</property>

<property>
  <name>ssl.client.keystore.keypassword</name>
  <value>Pukka@159357</value>
  <description>Optional. Default value is "".
  </description>
</property>

<property>
  <name>ssl.client.keystore.type</name>
  <value>jks</value>
  <description>Optional. The keystore file format, default value is "jks".
  </description>
</property>

</configuration>
#将配置文件分发给其他节点
scp -r -P2406 ./ssl-client.xml pukka@hadoop02:$PWD
scp -r -P2406 ./ssl-client.xml pukka@hadoop03:$PWD

7、yarn配置LCE

在使用Kerberos进行身份验证和安全通信时,需要使用LCE作为容器的执行器。LinuxContainerExecutor(LCE)是Hadoop用于管理容器的一种执行器,它可以创建、启动和停止应用程序容器,并且能够隔离和限制容器内的资源使用,例如内存、CPU、网络和磁盘等资源。

7.1、修改 container-executor 所有者和权限
#将pukka用户加入到hadoop组中
usermod -a -G hadoop pukka
#修改hadoop目录权限
chown -R pukka:hadoop /opt/context/software/bigdata/hadoop-3.2.3
#hadoop01-03所有节点执行
chown root:hadoop /opt/context/software/bigdata/hadoop-3.2.3/bin/container-executor
chmod 6050 /opt/context/software/bigdata/hadoop-3.2.3/bin/container-executor
7.2、配置container-executor.cfg
#hadoop01上修改后发送给其他节点
vim /opt/context/software/bigdata/hadoop-3.2.3/etc/hadoop/container-executor.cfg
#配置内容如下
yarn.nodemanager.linux-container-executor.group=hadoop
banned.users=yarn,mapred
min.user.id=0
allowed.system.users=foo,bar,pukka,root,hdfs
feature.tc.enabled=false

#发送给其他节点
cd /opt/context/software/bigdata/hadoop-3.2.3/etc/hadoop/
scp -r -P 2406 container-executor.cfg pukka@hadoop02:$PWD
scp -r -P 2406 container-executor.cfg pukka@hadoop03:$PWD
7.3、修改 container-executor.cfg 所有者和权限
#所有节点执行
chown root:hadoop /opt/context/software/bigdata/hadoop-3.2.3/etc/hadoop/container-executor.cfg
chown root:hadoop /opt/context/software/bigdata/hadoop-3.2.3/etc/hadoop
chown root:hadoop /opt/context/software/bigdata/hadoop-3.2.3/etc
chown root:hadoop /opt/context/software/bigdata/hadoop-3.2.3
chown root:hadoop /opt/context/software/bigdata
chown root:hadoop /opt/context/software
chown root:hadoop /opt/context
chown root:hadoop /opt
chmod 400 /opt/context/software/bigdata/hadoop-3.2.3/etc/hadoop/container-executor.cfg
7.4、配置 yarn-site.xml
#hadoop01上配置后,发送给其他节点
vim /opt/context/software/bigdata/hadoop-3.2.3/etc/hadoop/yarn-site.xml 
#追加内容如下
    <!-- 配置NodeManager使用LinuxContainerExecutor管理Container -->
    <property>
      <name>yarn.nodemanager.container-executor.class</name>
      <value>org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor</value>
    </property>
  
    <!-- 配置NodeManager的启动用户的所属组 -->
    <property>
      <name>yarn.nodemanager.linux-container-executor.group</name>
      <value>hadoop</value>
    </property>
  
    <!-- LinuxContainerExecutor脚本路径 -->
    <property>
      <name>yarn.nodemanager.linux-container-executor.path</name>
      <value>/opt/context/software/bigdata/hadoop-3.2.3/bin/container-executor</value>
    </property>

#发送给其他节点
cd /opt/context/software/bigdata/hadoop-3.2.3/etc/hadoop
scp -r -P 2406 yarn-site.xml pukka@hadoop02:$PWD
scp -r -P 2406 yarn-site.xml pukka@hadoop03:$PWD

8、启动集成kerberos后的集群

8.1、修改启动脚本

修改start-dfs.sh 和 stop-dfs.sh脚本,路径如下:/opt/context/software/bigdata/hadoop-3.2.3/sbin

# 在两个文件中加入如下配置
HDFS_DATANODE_USER=hdfs
HDFS_DATANODE_SECURE_USER=hdfs
HDFS_NAMENODE_USER=hdfs
HDFS_JOURNALNODE_USER=hdfs
HDFS_ZKFC_USER=hdfs

修改start-yarn.sh和stop-yarn.sh,路径如下:/opt/context/software/bigdata/hadoop-3.2.3/sbin

#在两个文件中加入如下配置
YARN_RESOURCEMANAGER_USER=yarn
YARN_NODEMANAGER_USER=yarn

将脚本发送至其他节点

cd /opt/context/software/bigdata/hadoop-3.2.3/sbin
scp -r -P 2406 start-dfs.sh stop-dfs.sh start-yarn.sh stop-yarn.sh pukka@hadoop02:$PWD
scp -r -P 2406 start-dfs.sh stop-dfs.sh start-yarn.sh stop-yarn.sh pukka@hadoop03:$PWD
8.2、启动集群
cd /opt/context/software/bigdata/hadoop-3.2.3/sbin
./start-dfs.sh
./start-yarn.sh

执行脚本后发现namenode和datanode进程未启动,查看错误日志如下:

2023-09-07 14:12:20,713 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: registered UNIX signal handlers for [TERM, HUP, INT]
2023-09-07 14:12:21,586 INFO org.apache.hadoop.hdfs.server.datanode.checker.ThrottledAsyncChecker: Scheduling a check for [DISK]file:/opt/context/software/bigdata/hadoop-3.2.3/tmp/dfs/data
2023-09-07 14:12:21,691 WARN org.apache.hadoop.hdfs.server.datanode.checker.StorageLocationChecker: Exception checking StorageLocation [DISK]file:/opt/context/software/bigdata/hadoop-3.2.3/tmp/dfs/data
org.apache.hadoop.util.DiskChecker$DiskErrorException: Directory is not readable: /opt/context/software/bigdata/hadoop-3.2.3/tmp/dfs/data
        at org.apache.hadoop.util.DiskChecker.checkAccessByFileMethods(DiskChecker.java:162)
        at org.apache.hadoop.util.DiskChecker.checkDirInternal(DiskChecker.java:142)
        at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:116)
        at org.apache.hadoop.hdfs.server.datanode.StorageLocation.check(StorageLocation.java:239)
        at org.apache.hadoop.hdfs.server.datanode.StorageLocation.check(StorageLocation.java:52)
        at org.apache.hadoop.hdfs.server.datanode.checker.ThrottledAsyncChecker$1.call(ThrottledAsyncChecker.java:142)
        at com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:125)
        at com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:57)
        at com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:78)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:748)
2023-09-07 14:12:21,696 ERROR org.apache.hadoop.hdfs.server.datanode.DataNode: Exception in secureMain
org.apache.hadoop.util.DiskChecker$DiskErrorException: Too many failed volumes - current valid volumes: 0, volumes configured: 1, volumes failed: 1, volume failures tolerated: 0
        at org.apache.hadoop.hdfs.server.datanode.checker.StorageLocationChecker.check(StorageLocationChecker.java:231)
        at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:2806)
        at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:2721)
        at org.apache.hadoop.hdfs.server.datanode.DataNode.createDataNode(DataNode.java:2763)
        at org.apache.hadoop.hdfs.server.datanode.DataNode.secureMain(DataNode.java:2907)
        at org.apache.hadoop.hdfs.server.datanode.DataNode.main(DataNode.java:2931)

原因分析:使用pukka用户启动脚本,无法访问hdfs:hadoop权限的文件

解决方案:

#将pukka用户加入到hadoop组中
usermod -a -G hadoop pukka
#修改hadoop目录权限
chown -R pukka:hadoop /opt/context/software/bigdata/hadoop-3.2.3
chown root:hadoop /opt/context/software/bigdata/hadoop-3.2.3/bin/container-executor
chmod 6050 /opt/context/software/bigdata/hadoop-3.2.3/bin/container-executor
chown root:hadoop /opt/context/software/bigdata/hadoop-3.2.3/etc/hadoop/container-executor.cfg
chown root:hadoop /opt/context/software/bigdata/hadoop-3.2.3/etc/hadoop
chown root:hadoop /opt/context/software/bigdata/hadoop-3.2.3/etc
chown root:hadoop /opt/context/software/bigdata/hadoop-3.2.3
chown root:hadoop /opt/context/software/bigdata
chown root:hadoop /opt/context/software
chown root:hadoop /opt/context
chown root:hadoop /opt

重新执行start-dfs.sh和start-yarn.sh后成功启动

8.3、查看hadoop webUI

由于开启的SSL配置,HDFS默认的WEBUI访问端口为9871

hadoop WEBUI地址:https://hadoop01:9871

yarn WEBUI地址:http://hadoop01:8088

https://hadoop01:9871页面可以正常访问

外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传

但是HDFS数据无法查看

外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传

http://hadoop01:8088可以正常访问

外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传

但是任务日志无法查看

外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传

8.4、使用shell脚本查询hdfs
#kerberos创建hdfs/hadoop用户,hadoop01上执行
kadmin.local -q "addprinc -pw Pukka@159357 hdfs/hadoop"
kadmin.local -q "ktadd -norandkey -kt /home/pukka/keytabs/hdfs.hadoop.keytab hdfs/hadoop@PUKKA.COM"
#kinit初始化票据 hdfs/hadoop用户,必须是hdfs/hadoop用户,否则无法操作hdfs
[root@hadoop01 keytabs]# kinit -kt /home/pukka/keytabs/hdfs.hadoop.keytab hdfs/hadoop@PUKKA.COM
[root@hadoop01 keytabs]# klist
Ticket cache: FILE:/tmp/krb5cc_0
Default principal: hdfs/hadoop@PUKKA.COM

Valid starting       Expires              Service principal
2023-09-08T11:29:55  2023-09-09T11:29:55  krbtgt/PUKKA.COM@PUKKA.COM
        renew until 2023-09-15T11:29:55
[root@hadoop01 keytabs]# hdfs dfs -ls /
Found 3 items
drwxr-xr-x   - pukka supergroup          0 2023-08-30 13:33 /data
drwx-wx-wx   - pukka supergroup          0 2023-08-29 16:41 /tmp
drwxr-xr-x   - root  supergroup          0 2023-08-30 14:06 /user

出现问题:无法使用hdfs dfs -ls /指令查看hdfs

报错信息:

2023-09-08 11:24:36,233 WARN ipc.Client: Exception encountered while connecting to the server
org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]
        at org.apache.hadoop.security.SaslRpcClient.selectSaslClient(SaslRpcClient.java:173)
        at org.apache.hadoop.security.SaslRpcClient.saslConnect(SaslRpcClient.java:390)
        at org.apache.hadoop.ipc.Client$Connection.setupSaslConnection(Client.java:623)
        at org.apache.hadoop.ipc.Client$Connection.access$2300(Client.java:414)
        at org.apache.hadoop.ipc.Client$Connection$2.run(Client.java:832)
        at org.apache.hadoop.ipc.Client$Connection$2.run(Client.java:828)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:422)
        at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)
        at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:828)
        at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:414)
        at org.apache.hadoop.ipc.Client.getConnection(Client.java:1653)
        at org.apache.hadoop.ipc.Client.call(Client.java:1469)
        at org.apache.hadoop.ipc.Client.call(Client.java:1422)
        at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:231)
        at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:118)
        at com.sun.proxy.$Proxy9.getFileInfo(Unknown Source)
        at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getFileInfo(ClientNamenodeProtocolTranslatorPB.java:910)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:422)
        at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:165)
        at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:157)
        at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:95)
        at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:359)
        at com.sun.proxy.$Proxy10.getFileInfo(Unknown Source)
        at org.apache.hadoop.hdfs.DFSClient.getFileInfo(DFSClient.java:1679)
        at org.apache.hadoop.hdfs.DistributedFileSystem$29.doCall(DistributedFileSystem.java:1602)
        at org.apache.hadoop.hdfs.DistributedFileSystem$29.doCall(DistributedFileSystem.java:1599)
        at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
        at org.apache.hadoop.hdfs.DistributedFileSystem.getFileStatus(DistributedFileSystem.java:1614)
        at org.apache.hadoop.fs.Globber.getFileStatus(Globber.java:65)
        at org.apache.hadoop.fs.Globber.doGlob(Globber.java:294)
        at org.apache.hadoop.fs.Globber.glob(Globber.java:149)
        at org.apache.hadoop.fs.FileSystem.globStatus(FileSystem.java:2050)
        at org.apache.hadoop.fs.shell.PathData.expandAsGlob(PathData.java:353)
        at org.apache.hadoop.fs.shell.Command.expandArgument(Command.java:250)
        at org.apache.hadoop.fs.shell.Command.expandArguments(Command.java:233)
        at org.apache.hadoop.fs.shell.FsCommand.processRawArguments(FsCommand.java:104)
        at org.apache.hadoop.fs.shell.Command.run(Command.java:177)
        at org.apache.hadoop.fs.FsShell.run(FsShell.java:327)
        at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:76)
        at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:90)
        at org.apache.hadoop.fs.FsShell.main(FsShell.java:390)
ls: DestHost:destPort hadoop01:9000 , LocalHost:localPort hadoop01/10.0.16.159:0. Failed on local exception: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]

查看票据缓存:

#该节点Ticket cache: KCM:0
[root@hadoop01 keytabs]# kinit -kt ./hdfs.keytab hdfs/admin@PUKKA.COM
[root@hadoop01 keytabs]# klist
Ticket cache: KCM:0
Default principal: hdfs/admin@PUKKA.COM

Valid starting       Expires              Service principal
2023-09-08T11:05:47  2023-09-09T11:05:47  krbtgt/PUKKA.COM@PUKKA.COM
        renew until 2023-09-15T11:05:47
#正常服务Ticket cache: FILE:/tmp/krb5cc_0
[root@hadoop01 keytabs]# klist
Ticket cache: FILE:/tmp/krb5cc_0
Default principal: hdfs/hadoop@PUKKA.COM

Valid starting       Expires              Service principal
2023-09-08T11:29:55  2023-09-09T11:29:55  krbtgt/PUKKA.COM@PUKKA.COM
        renew until 2023-09-15T11:29:55

排查:

经排查发现/etc/krb5.conf中有一个配置
includedir /etc/krb5.conf.d/
查看includedir /etc/krb5.conf.d/里的配置文件
[root@hadoop01 keytabs]# cd /etc/krb5.conf.d/
[root@hadoop01 krb5.conf.d]# ll
总用量 4
lrwxrwxrwx. 1 root  root   42  8月 22 19:04 crypto-policies -> /etc/crypto-policies/back-ends/krb5.config
-rw-r--r--  1 pukka pukka 493  9月  8 11:23 kcm_default_ccache
[root@hadoop01 krb5.conf.d]# cat kcm_default_ccache
# This file should normally be installed by your distribution into a
# directory that is included from the Kerberos configuration file (/etc/krb5.conf)
# On Fedora/RHEL/CentOS, this is /etc/krb5.conf.d/
#
# To enable the KCM credential cache enable the KCM socket and the service:
#   systemctl enable sssd-kcm.socket
#   systemctl start sssd-kcm.socket
#
# To disable the KCM credential cache, comment out the following lines.

[libdefaults]
    default_ccache_name = KCM:
#修改/etc/krb5.conf,禁掉includedir /etc/krb5.conf.d/和default_ccache_name = KEYRING:persistent:%{uid}后重启kerberos

#includedir /etc/krb5.conf.d/

[logging]
 default = FILE:/var/log/krb5libs.log
 kdc = FILE:/var/log/krb5kdc.log
 admin_server = FILE:/var/log/kadmind.log

[libdefaults]
 dns_lookup_realm = false
 ticket_lifetime = 24h
 renew_lifetime = 7d
 forwardable = true
 rdns = false
 pkinit_anchors = /etc/pki/tls/certs/ca-bundle.crt
 default_realm = PUKKA.COM
 #default_ccache_name = KEYRING:persistent:%{uid}
 udp_preference_limit = 1
[realms]
 PUKKA.COM = {
  kdc = hadoop01
  admin_server = hadoop01
}

[domain_realm]
# .example.com = EXAMPLE.COM
# example.com = EXAMPLE.COM

注:之前启动kerberos后想连接kadmin.local,出现KCM拒绝连接,然后要启动ssh-kcm也是这个原因,修改之后就不需要再启动ssh-kcm了

8.5、修改HDFS目录权限
#初始化票据
kinit -kt /home/pukka/keytabs/hdfs.hadoop.keytab hdfs/hadoop@PUKKA.COM
#修改HDFS目录权限
hdfs dfs -mkdir /tmp
hdfs dfs -mkdir /user
hdfs dfs -mkdir /tmp/logs
hdfs dfs -mkdir -p /tmp/hadoop-yarn/staging/history/done_intermediate
hdfs dfs -mkdir -p /tmp/hadoop-yarn/staging/history/done
#设置HDFS对应目录权限
hadoop fs -chown hdfs:hadoop / /tmp /user
hadoop fs -chmod 755 /
hadoop fs -chmod 777 /tmp
hadoop fs -chmod 755 /user

hadoop fs -chown yarn:hadoop /tmp/logs
hadoop fs -chmod 777 /tmp/logs

hadoop fs -chown mapred:hadoop /tmp/hadoop-yarn/staging/history/done_intermediate
hadoop fs -chmod 777 /tmp/hadoop-yarn/staging/history/done_intermediate

hadoop fs -chown mapred:hadoop /tmp/hadoop-yarn/staging/history/
hadoop fs -chown mapred:hadoop /tmp/hadoop-yarn/staging/
hadoop fs -chown mapred:hadoop /tmp/hadoop-yarn/

hadoop fs -chmod 770 /tmp/hadoop-yarn/staging/history/
hadoop fs -chmod 770 /tmp/hadoop-yarn/staging/
hadoop fs -chmod 770 /tmp/hadoop-yarn/

hadoop fs -chown mapred:hadoop /tmp/hadoop-yarn/staging/history/done
hadoop fs -chmod 777 /tmp/hadoop-yarn/staging/history/done

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值