##kerberos客户端使用:kadmin (需要登录验证)##kerberos服务端使用: kadmin.local (无需登录验证)[root@cdh-node1 ~]# kadmin
Authenticating as principal scmroot/admin@CDH.COM with password.
Password for scmroot/admin@CDH.COM:
kadmin: ?
Available kadmin requests:
add_principal, addprinc, ank
Add principal
delete_principal, delprinc
Delete principal
modify_principal, modprinc
Modify principal
rename_principal, renprinc
Rename principal
change_password, cpw Change password
get_principal, getprinc Get principal
list_principals, listprincs, get_principals, getprincs
List principals
add_policy, addpol Add policy
modify_policy, modpol Modify policy
delete_policy, delpol Delete policy
get_policy, getpol Get policy
list_policies, listpols, get_policies, getpols
List policies
get_privs, getprivs Get privileges
ktadd, xst Add entry(s) to a keytab
ktremove, ktrem Remove entry(s) from a keytab
lock Lock database exclusively (use with extreme caution!)
unlock Release exclusive database lock
purgekeys Purge previously retained old keys from a principal
get_strings, getstrs Show string attributes on a principal
set_string, setstr Set a string attribute on a principal
del_string, delstr Delete a string attribute on a principal
list_requests, lr, ? List available requests.
quit, exit, q Exit program.
kadmin: change_password
usage: change_password [-randkey][-keepold][-e keysaltlist][-pw password] principal
kadmin: change_password
usage: change_password [-randkey][-keepold][-e keysaltlist][-pw password] principal
kadmin: change_password scmroot/admin@CDH.COM
Enter password for principal "scmroot/admin@CDH.COM":
Re-enter password for principal "scmroot/admin@CDH.COM":
Password for"scmroot/admin@CDH.COM" changed.
kadmin: listprincs
HTTP/cdh-node1@CDH.COM
hbase/cdh-node1@CDH.COM
kadmin: ktadd
Usage: ktadd [-k[eytab] keytab][-q][-e keysaltlist][principal | -glob princ-exp][...]
kadmin: ktadd -k /root/hbae.keytab hbase/cdh-node1@CDH.COM
Entry for principal hbase/cdh-node1@CDH.COM with kvno 3, encryption type aes256-cts-hmac-sha1-96 added to keytab WRFILE:/root/hbae.keytab.
Entry for principal hbase/cdh-node1@CDH.COM with kvno 3, encryption type aes128-cts-hmac-sha1-96 added to keytab WRFILE:/root/hbae.keytab.
Entry for principal hbase/cdh-node1@CDH.COM with kvno 3, encryption type des3-cbc-sha1 added to keytab WRFILE:/root/hbae.keytab.
Entry for principal hbase/cdh-node1@CDH.COM with kvno 3, encryption type arcfour-hmac added to keytab WRFILE:/root/hbae.keytab.
Entry for principal hbase/cdh-node1@CDH.COM with kvno 3, encryption type des-hmac-sha1 added to keytab WRFILE:/root/hbae.keytab.
Entry for principal hbase/cdh-node1@CDH.COM with kvno 3, encryption type des-cbc-md5 added to keytab WRFILE:/root/hbae.keytab.
kadmin: quit
[root@cdh-node1 ~]# ls /root/hbae.keytab
/root/hbae.keytab
####################1, kdc服务安装 ####################
yum -y install krb5-libs krb5-workstation krb5-auth-dialog krb5-server
##a,修改配置(client端) : /etc/krb5.confsed -i s/EXAMPLE/CDH/ /etc/krb5.conf
sed -i "s/example.com/$(hostname)/" /etc/krb5.conf
##b,修改配置(server端) : /var/kerberos/krb5kdc/kadm5.acl sed -i s/EXAMPLE/CDH/ /var/kerberos/krb5kdc/kadm5.acl
##c,修改配置(server端) : /var/kerberos/krb5kdc/kdc.conf --> sed -i -e '8i\\tmax_renewable_life= 7d 0h 0m 0s' -e s/EXAMPLE/CDH/ /var/kerberos/krb5kdc/kdc.conf
####################2,设置kerberso数据库账号 #####################创建kdc数据库: 设置密码: -P kdc, -s 生成存储规则的文件stash file#If you choose not to install a stash file, the KDC will prompt you for the master key each time it starts up.#This means that the KDC will not be able to start automatically, such as after a system reboot
kdb5_util create -s -r CDH.COM -P kdc
#生成如下文件:[root@test-c6 ~]# ls /var/kerberos/krb5kdc/ -a
.k5.CDH.COM kadm5.acl kdc.conf principal principal.kadm5 principal.kadm5.lock principal.ok
kdb5_util会创建如下文件:
two Kerberos database files: principal, principal.ok
the Kerberos administrative database file:principal.kadm5
the administrative database lock file:principal.kadm5.lock
the stash file, in this example: .k5.CDH.COM
b, 创建kdc管理员账号
#创建kdc管理员账号echo -e "addprinc admin/admin@CDH.KDC.COM \nadmin\nadmin"|kadmin.local
#交互式命令行=> kadmin.local: addprinc admin/admin@CDH.KDC.COM #设置密码: admin#启动服务, 测试账号chkconfig krb5kdc on
chkconfig kadmin on
service krb5kdc start
service kadmin start
####################3,设置kerberos中的cdh管理员账号 #####################交互式命令行⇒ kadmin.local: addprinc scmroot/admin@CDH.KDC.COM, 设置密码:scmrootecho -e "addprinc scmroot/admin@CDH.COM \nscmroot\nscmroot"|kadmin.local
#验证登录账户echo"scmroot"|kinit scmroot/admin@CDH.COM
klist
3, CDH集群启用kerberos
a, Administrator: security
b, Enable Kerberos
c, 确认并勾选
d, 填写kdc信息
e, 略过CDH管理kerberos, 填写CDH管理员账号
4, 使用kerberos认证,调用hive,hbase等服务
a, 获取CDH各服务的kerberos账号
[root@cdh-node1 ~]# kdestroy [root@cdh-node1 ~]# klist
klist: No credentials cache found (ticket cache FILE:/tmp/krb5cc_0)[root@cdh-node1 ~]# su hdfs[hdfs@cdh-node1 root]$ hive
##登录报错,Caused by: GSSException: No valid credentials provided
Logging initialized using configuration in jar:file:/opt/cloudera/parcels/CDH-5.12.0-1.cdh5.12.0.p0.29/jars/hive-common-1.1.0-cdh5.12.0.jar!/hive-log4j.properties
Exception in thread "main" java.lang.RuntimeException: java.io.IOException: Failed on local exception: java.io.IOException: javax.security.sasl.SaslException: GSS initiate failed [Caused by GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt)]; Host Details :localhost is: "cdh-node1/192.168.56.161"; destination host is: "cdh-node1":8020;..........#2, 查询cdh中的hive,hbase等服务注册的kerberos账号[root@cdh-node1 ~]# klist
klist: No credentials cache found (ticket cache FILE:/tmp/krb5cc_0)[root@cdh-node1 ~]# echo -e "scmroot" |kinit scmroot/admin@CDH.COM
Password for scmroot/admin@CDH.COM:
[root@cdh-node1 ~]# klist
Ticket cache: FILE:/tmp/krb5cc_0
Default principal: scmroot/admin@CDH.COM
Valid starting Expires Service principal
01/20/20 02:43:24 01/21/20 02:43:24 krbtgt/CDH.COM@CDH.COM
renew until 01/27/20 02:43:24
[root@cdh-node1 ~]# kadmin
Authenticating as principal scmroot/admin@CDH.COM with password.
Password for scmroot/admin@CDH.COM:
kadmin: listprincs
HTTP/cdh-node1@CDH.COM
K/M@CDH.COM
admin/admin@CDH.COM
hbase/cdh-node1@CDH.COM
hdfs/cdh-node1@CDH.COM
hive/cdh-node1@CDH.COM
hue/cdh-node1@CDH.COM
kadmin/admin@CDH.COM
kadmin/cdh-node2@CDH.COM
kadmin/changepw@CDH.COM
krbtgt/CDH.COM@CDH.COM
mapred/cdh-node1@CDH.COM
oozie/cdh-node1@CDH.COM
scm/admin@CDH.COM
scmroot/admin@CDH.COM
test/cdh-node2@CDH.COM
yarn/cdh-node1@CDH.COM
zookeeper/cdh-node1@CDH.COM
b, 使用keytab登录hive
# 找到hive的keytab文件,验证kerberos账号
[root@cdh-node1 ~]# find /opt/ |grep keytab |grep hive
/opt/cm-5.12.2/run/cloudera-scm-agent/process/91-hive-HIVESERVER2/hive.keytab
/opt/cm-5.12.2/run/cloudera-scm-agent/process/90-hive-HIVEMETASTORE/hive.keytab
[root@cdh-node1 ~]# kinit -kt /opt/cm-5.12.2/run/cloudera-scm-agent/process/91-hive-HIVESERVER2/hive.keytab hive/cdh-node1@CDH.COM
[root@cdh-node1 ~]# klist
Ticket cache: FILE:/tmp/krb5cc_0
Default principal: hive/cdh-node1@CDH.COM
Valid starting Expires Service principal
01/20/20 02:45:44 01/21/20 02:45:44 krbtgt/CDH.COM@CDH.COM
renew until 01/25/20 02:45:44
#登录Hive
[root@cdh-node1 ~]# hive
Logging initialized using configuration in jar:file:/opt/cloudera/parcels/CDH-5.12.0-1.cdh5.12.0.p0.29/jars/hive-common-1.1.0-cdh5.12.0.jar!/hive-log4j.properties
WARNING: Hive CLI is deprecated and migration to Beeline is recommended.
hive> show tables;
OK
t1
Time taken: 1.948 seconds, Fetched: 1 row(s)
hive> select * from t1;
OK
1
2
Time taken: 0.398 seconds, Fetched: 2 row(s)
hive> insert into t1 values(3);
Query ID = root_20200120024646_9b765d52-d72e-432d-8bd9-2c73405b856e
Total jobs = 3
Launching Job 1 out of 3
Number of reduce tasks is set to 0 since there's no reduce operator
Starting Job = job_1579483243323_0001, Tracking URL = http://cdh-node1:8088/proxy/application_1579483243323_0001/
Kill Command = /opt/cloudera/parcels/CDH-5.12.0-1.cdh5.12.0.p0.29/lib/hadoop/bin/hadoop job -kill job_1579483243323_0001
Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 0
2020-01-20 02:46:57,558 Stage-1 map = 0%, reduce = 0%
2020-01-20 02:47:04,998 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 1.49 sec
MapReduce Total cumulative CPU time: 1 seconds 490 msec
Ended Job = job_1579483243323_0001
Stage-4 is selected by condition resolver.
Stage-3 is filtered out by condition resolver.
Stage-5 is filtered out by condition resolver.
Moving data to: hdfs://cdh-node1:8020/user/hive/warehouse/t1/.hive-staging_hive_2020-01-20_02-46-41_281_126491748162214148-1/-ext-10000
Loading data to table default.t1
Table default.t1 stats: [numFiles=3, numRows=3, totalSize=6, rawDataSize=3]
MapReduce Jobs Launched:
Stage-Stage-1: Map: 1 Cumulative CPU: 1.49 sec HDFS Read: 3214 HDFS Write: 68 SUCCESS
Total MapReduce CPU Time Spent: 1 seconds 490 msec
OK
Time taken: 25.386 seconds
hive>
c, 使用keytab登录hbase
#################### 登录hbase ####################
[root@cdh-node1 ~]# kinit -kt /opt/cm-5.12.2/run/cloudera-scm-agent/process/82-hbase-REGIONSERVER/hbase.keytab hbase/cdh-node1@CDH.COM
[root@cdh-node1 ~]# klist
Ticket cache: FILE:/tmp/krb5cc_0
Default principal: hbase/cdh-node1@CDH.COM
Valid starting Expires Service principal
01/20/20 02:50:37 01/21/20 02:50:37 krbtgt/CDH.COM@CDH.COM
renew until 01/25/20 02:50:37
[root@cdh-node1 ~]# hbase shell
Version 1.2.0-cdh5.12.0, rUnknown, Thu Jun 29 04:42:07 PDT 2017
hbase(main):001:0> list
TABLE
t1
1 row(s) in 0.1630 seconds
=> ["t1"]
hbase(main):002:0> scan 't1'
ROW COLUMN+CELL
r1 column=f:name, timestamp=1579423658722, value=a
r2 column=f:name, timestamp=1579430009816, value=b
2 row(s) in 0.1070 seconds
hbase(main):003:0> put 't1','r3','f:name','cc'
0 row(s) in 0.0600 seconds