acl作为pacemaker集群的一个可选特性,默认是处于disable状态
如果acl设置为disable状态,root用户和haclient组成员对cluster配置有全部的read/write 访问权限
如果acl设置为enable状态并且进行了acl配置,仅有root和hacluster用户对cluster配置有全部的read/write 访问权限
访问权限分为read,write,deny三种kind,对象可以是type, ID reference, 或者 XPath 表达式
acl创建role,然后可以针对role添加、删除Permission,然后将role赋予操作系统中属于haclient用户组的系统用户
一 开启集群acl属性
pcs property --defaults | grep acl
pcs property set enable-acl=true --force 或者 pcs acl enable
pcs property | grep acl
二 创建角色
在集群节点hatest1上执行命令操作:
添加针对/cib的只读角色:
pcs acl role create read-only description="Read access to cluster" read xpath /cib
添加针对/cib的读写角色:
pcs acl role create write-access description="Full access" write xpath /cib
添加针对/cib/configuration的读写角色
pcs acl permission add write_config write xpath /cib/configuration
查看acl:
pcs acl
三 创建用户
在集群每个使用acl用户的节点上执行命令操作:
创建系统用户,规划一个只读用户,一个读写用户
id rouser 2>/dev/null || useradd -G haclient rouser
id rwuser 2>/dev/null || useradd -G haclient rwuser
passwd rouser
passwd rwuser
在集群节点hatest1上执行命令操作:
为rouser用户设置只读角色
pcs acl user create rouser read-only
为rwuser用户设置读写角色
pcs acl user create rwuser write-access
pcs acl role assign write_config to rwuser
查看acl用户
pcs acl
四 用户权限验证
在集群每个预期使用acl用户的节点上执行命令操作:
创建一个普通系统用户
id test 2>/dev/null || useradd test
在集群节点hatest1上执行命令操作:
验证普通用户
su - test
pcs acl
exit
验证rouser用户
su - rouser
pcs acl
pcs client local-auth
pcs status
pcs property set stonith-enabled=true
exit
验证rwuser用户
su - rwuser
pcs acl
pcs client local-auth
pcs status
pcs property | grep stonith-enabled
pcs property set stonith-enabled=true
pcs property | grep stonith-enabled
exit
五 cib文件内容
对/cib存在权限就是对整个
<cib>....</cib>
内容存在权限
对/cib/configuration存在权限就是对configuration之间的区域存在权限
<cib>
<configuration>
....
</configuration>
</cib>
# pcs cluster cib
<cib crm_feature_set="3.4.1" validate-with="pacemaker-3.4" epoch="27" num_updates="8" admin_epoch="0" cib-last-written="Wed Jan 27 15:27:17 2021" update-origin="hatest1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="2">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-6.oe1-2deceaa3ae"/>
<nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
<nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="hacluster"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="1" uname="hatest1"/>
<node id="2" uname="hatest2"/>
</nodes>
<resources>
<primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy">
<operations>
<op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
<op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
<op id="dummy-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
<op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
<op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
<op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
</operations>
</primitive>
</resources>
<constraints/>
<acls>
<acl_role description="Read access to cluster" id="read-only">
<acl_permission id="read-only-read" kind="read" xpath="/cib"/>
</acl_role>
<acl_role description="Full access" id="write-access">
<acl_permission id="write-access-write" kind="write" xpath="/cib"/>
</acl_role>
<acl_target id="rouser">
<role id="read-only"/>
</acl_target>
<acl_target id="rwuser">
<role id="write-access"/>
<role id="write_config"/>
</acl_target>
<acl_role id="write_config">
<acl_permission id="write_config-write" kind="write" xpath="/cib/configuration"/>
</acl_role>
</acls>
</configuration>
<status>
<node_state id="2" uname="hatest2" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="2">
<lrm_resources>
<lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="2:123:7:d7de0b72-3f6a-40f6-9326-6a11e81ceed0" transition-magic="0:7;2:123:7:d7de0b72-3f6a-40f6-9326-6a11e81ceed0" exit-reason="" on_node="hatest2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1611732437" last-run="1611732437" exec-time="15" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="1" uname="hatest1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="1">
<lrm_resources>
<lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="3:123:0:d7de0b72-3f6a-40f6-9326-6a11e81ceed0" transition-magic="0:0;3:123:0:d7de0b72-3f6a-40f6-9326-6a11e81ceed0" exit-reason="" on_node="hatest1" call-id="6" rc-code="0" op-status="0" interval="0" last-rc-change="1611732437" last-run="1611732437" exec-time="17" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="4:123:0:d7de0b72-3f6a-40f6-9326-6a11e81ceed0" transition-magic="0:0;4:123:0:d7de0b72-3f6a-40f6-9326-6a11e81ceed0" exit-reason="" on_node="hatest1" call-id="7" rc-code="0" op-status="0" interval="10000" last-rc-change="1611732437" exec-time="13" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>