hadoop暂时保留

[root@nn01 hadoop]# history
1 ls
2 cd nsd1808/
3 ls
4 cd
5 ls
6 tar -zxf hadoop-2.7.6.tar.gz
7 ls
8 mv hadoop-2.7.6 /usr/local/hadoop
9 cd /usr/local/hadoop/etc/hadoop/
10 vim hadoop-env.sh
11 cd /usr/local/hadoop/
12 ./bin/hadoop version
13 cp /usr/local/hadoop/etc/hadoop/hadoop-env.sh /usr/local/hadoop/aa/a.txt
14 pwd
15 mkdir aa
16 cp /usr/local/hadoop/etc/hadoop/hadoop-env.sh /usr/local/hadoop/aa/a.txt
17 ./bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.6.jar wordcount aa bb
18 cd bb
19 ls
20 cat part-r-00000
21 cd /root/.ssh/
22 ssh-keygen -b 2018 -t rsa -N ''
23 for i in 9{1..5};do ssh-copy-id 192.168.3.$i ;done
24 ls
25 ssh node1
26 ls
27 for i in 9{1..5};do ssh-copy-id 192.168.3.$i ;done
28 ssh node1
29 ssh-copy-id 192.168.3.91
30 ls
31 rm rf *
32 rm -rf *
33 ssh-keygen -b 2018 -t rsa -N ''
34 ls
35 for i in 9{1..5};do ssh-copy-id 192.168.3.$i ;done
36 ssh node1
37 ssh-copy-id 192.168.3.91
38 ssh node1
39 ssh node2
40 vim /etc/ssh/ssh_config
41 ssh node3
42 cd /usr/local/hadoop/etc/hadoop/
43 vim core-site.xml
44 vim hdfs-site.xml
45 vim slaves
46 for i in node{1..4};do rsync -aSH /usr/local/hadoop/ $i:/usr/local/hadoop/ ;done
47 rsync -aSH /usr/local/hadoop/ 192.168.3.95:/usr/local/hadoop/
48 for i in nn01 node{1..5}; do ssh ${i} mkdir /var/hadoop; done
49 ssh-copy-id 192.168.3.90
50 ssh nn01
51 ls /var/hadoop/
52 cd
53 cd /usr/local/hadoop/
54 ./bin/hdfs namenode -format
55 ./sbin/start-dfs.sh
56 jps
57 ./bin/hdfs dfsadmin -report
58 cd
59 cd /usr/local/hadoop/etc/hadoop/
60 ls
61 mv mapred-site.xml.template mapred-site.xml
62 vim mapred-site.xml
63 vim yarn-site.xml
64 for i in node{1..5};do rsync -aSH /usr/local/hadoop/etc $i:/usr/local/hadoop/ ;done
65 for i in node{1..4};do rsync -aSH /usr/local/hadoop/etc $i:/usr/local/hadoop/ ;done
66 cd /usr/local/hadoop/
67 ./sbin/start-yarn.sh
68 jps
69 ./bin/yarn node -list
70 ./bin/hadoop fs -mkdir /abc
71 ./bin/hadoop fs -ls /
72 cd /dev/shm/
73 /usr/local/hadoop/bin/hadoop fs -get /abc/*.txt
74 cd /usr/local/hadoop/
75 ls
76 ./bin/hadoop fs -put *.txt /abc
77 cd /dev/shm/
78 /usr/local/hadoop/bin/hadoop fs -get /abc/*.txt
79 ls
80 cd /usr/local/hadoop/
81 ./bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.6.jar wordcount /abc /output
82 /usr/local/hadoop/bin/hadoop fs -ls /output
83 ./sbin/stop-all.sh
84 cd logs/
85 rm -rf *
86 jps
87 id 700
88 groupadd -g 700 nsd1808
89 useradd -u 700 -g 700 -r nsd1808
90 vim core-site.xml
91 cd ..
92 cd etc/
93 cd hadoop/
94 vim core-site.xml
95 for i in 9{1..5} nfsgw ;do rsync -aSH --delete /usr/local/hadoop/ 192.168.3.$i:/usr/local/ ;done
96 for i in 9{1..5};do rsync -aSH --delete /usr/local/hadoop/ 192.168.3.$i:/usr/local/ ;done
97 /usr/local/hadoop/sbin/start-dfs.sh
98 for i in 9{1..5};do rsync -aSH /usr/local/hadoop/ 192.168.3.$i:/usr/local/ ;done
99 for i in 9{1..5};do rsync -av /usr/local/hadoop/ 192.168.3.$i:/usr/local/ ;done
100 for i in 9{1..5};do rsync -av /usr/local/hadoop/ 192.168.3.$i:/usr/local/hadoop ;done
101 /usr/local/hadoop/sbin/start-dfs.sh
102 /usr/local/hadoop/bin/hdfs dfsadmin -report
103 /usr/local/hadoop/sbin/start-dfs.sh
104 vim /usr/local/hadoop/etc/hadoop/core-site.xml
105 /usr/local/hadoop/sbin/start-dfs.sh
106 vim /usr/local/hadoop/etc/hadoop/core-site.xml
107 /usr/local/hadoop/sbin/start-dfs.sh
108 vim /usr/local/hadoop/etc/hadoop/core-site.xml
109 for i in 9{1..5};do rsync -aSH --delete /usr/local/hadoop/ 192.168.3.$i:/usr/local/ ;done
110 for i in 9{1..5};do rsync -aSH /usr/local/hadoop/ 192.168.3.$i:/usr/local/ ;done
111 for i in 9{1..5};do rsync -aSH --delete /usr/local/hadoop/ 192.168.3.$i:/usr/local/ ;done
112 for i in 9{1..5};do scp -r /usr/local/hadoop/ 192.168.3.$i:/usr/local/hadoop/ ;done
113 ./sbin/stop-all.sh
114 cd ..
115 ./sbin/stop-all.sh
116 ./sbin/start-dfs.sh
117 vim /usr/local/hadoop/etc/hadoop/core-site.xml
118 for i in 9{1..5};do scp -r /usr/local/hadoop/ 192.168.3.$i:/usr/local/hadoop/ ;done
119 cd ..
120 ./sbin/stop-all.sh
121 cd local/
122 cd hadoop/
123 ./sbin/stop-all.sh
124 ./sbin/start-dfs.sh
125 for i in 9{1..5};do rsync -aSH --delete /usr/local/hadoop/ 192.168.3.$i:/usr/local/ ;done
126 for i in 9{1..5};do rsync -aSH /usr/local/hadoop/ 192.168.3.$i:/usr/local/ ;done
127 for i in 9{1..5};do rsync -av /usr/local/hadoop/ 192.168.3.$i:/usr/local/ ;done
128 for i in 9{1..5};do scp -r /usr/local/hadoop/ 192.168.3.$i:/usr/local/hadoop/ ;done
129 ./sbin/stop-all.sh
130 ./sbin/start-dfs.sh
131 /usr/local/hadoop/bin/hdfs dfsadmin -report
132 history
133 ls
134 cd
135 lsa
136 ls
137 tar -tf python-kazoo-2.2.1-1.el7ost.noarch.rpm
138 rpm -ivh python-kazoo-2.2.1-1.el7ost.noarch.rpm
139 ls
140 rpm -e python-kazoo-2.2.1-1.el7ost.noarch.rpm
141 rpm -e python-kazoo
142 ls
143 rm -rf python-kazoo-2.2.1-1.el7ost.noarch.rpm
144 ls
145 tar -xf zookeeper-3.4.10.tar.gz
146 ls
147 mv zookeeper-3.4.10 /usr/local/zookeeper
148 cd /usr/local/zookeeper/conf/
149 ls
150 mv zoo_sample.cfg zoo.cfg
151 chown root.root zoo.cfg
152 vim zoo.cfg
153 for i in {91..95}; do rsync -aSH --delete /usr/local/zookeeper/ 192.168.3.$i:/usr/local/zookeeper -e 'ssh' & done
154 for i in {81..83}; do rsync -aSH --delete /usr/local/zookeeper/ 192.168.3.$i:/usr/local/zookeeper -e 'ssh' & done
155 for i in {81..83}; do rsync -av /usr/local/zookeeper/ 192.168.3.$i:/usr/local/zookeeper ;done
156 vim zoo.cfg
157 for i in {81..83}; do rsync -av /usr/local/zookeeper/ 192.168.3.$i:/usr/local/zookeeper ;done
158 vim zoo.cfg
159 for i in {81..83}; do rsync -av /usr/local/zookeeper/ 192.168.3.$i:/usr/local/zookeeper ;done
160 rsync -av /usr/local/zookeeper/ 192.168.3.95:/usr/local/zookeeper
161 rm -rf /usr/local/zookeeper/
162 cd
163 cd /usr/local/hadoop/etc/hadoop/
164 vim core-site.xml
165 ls
166 vim slaves
167 scp slaves 192.168.3.91:/usr/local/hadoop/etc/hadoop/slaves
168 scp slaves 192.168.3.92:/usr/local/hadoop/etc/hadoop/slaves
169 scp slaves 192.168.3.93:/usr/local/hadoop/etc/hadoop/slaves
170 scp slaves 192.168.3.94:/usr/local/hadoop/etc/hadoop/slaves
171 cd ..
172 ./sbin/stop-all.sh
173 scp slaves 192.168.3.95:/usr/local/hadoop/etc/hadoop/slaves
174 /usr/local/hadoop/sbin/start-dfs.sh
175 /usr/local/hadoop/bin/hdfs dfsadmin -report
176 cd /var/hadoop/
177 ls
178 cd dfs/
179 ls
180 cd
181 cd /usr/local/hadoop/
182 ls
183 ./sbin/stop-all.sh
184 jps
185 rm -rf /var/hadoop/*
186 ssh node1 rm -rf /var/hadoop/*
187 ssh node2 rm -rf /var/hadoop/*
188 ssh node3 rm -rf /var/hadoop/*
189 history
190 for i in 8{1..3};do ssh-copy-id 192.168.3.$i ;done
191 ssh zk1
192 vim /etc/hosts
193 for i in 9{1..5};do scp /etc/hosts 192.168.3.$i:/etc/hosts;done
194 for i in 8{1..3};do scp /etc/hosts 192.168.3.$i:/etc/hosts;done
195 ls
196 cd etc/hadoop/
197 vim core-site.xml
198 vim hdfs-site.xml
199 vim yarn-site.xml
200 ls
201 for i in 9{1..4};do rsync -aSH --delete /usr/local/hadoop/ 192.168.3.$i:/usr/local/hadoop ;done
202 jps
203 cd
204 for i in nn02 nfsgw node{1..3} zk{1..3};do rsync -aSH --delete /usr/local/hadoop/ $i:/usr/local/ & ;done
205 for i in nn02 nfsgw node{1..3} zk{1..3};do rsync -aSH --delete /usr/local/hadoop/ $i:/usr/local/ ;done
206 /usr/local/hadoop/bin/hdfs zkfc -formatZK
207 for i in nn02 nfsgw node{1..3} zk{1..3};do rsync -aSH /usr/local/hadoop/ $i:/usr/local/ ;done
208 for i in nn02 nfsgw node{1..3} zk{1..3};do rsync -av /usr/local/hadoop/ $i:/usr/local/ ;done
209 for i in nn02 nfsgw node{1..3} zk{1..3};do rsync -aSH /usr/local/hadoop/ $i:/usr/local/ ;done
210 for i in nn02 nfsgw node{1..3} zk{1..3};do rsync -aSH --delete /usr/local/hadoop/ $i:/usr/local/ ;done
211 for i in nn02 nfsgw node{1..3} zk{1..3};do rsync -aSH --delete /usr/local/hadoop/ $i:/usr/local/hadoop/ ;done
212 /usr/local/hadoop//bin/hdfs namenode -format
213 cd /var/hadoop/dfs/
214 ls
215 /usr/local/hadoop/bin/hdfs namenode -initializeSharedEdits
216 /usr/local/hadoop/sbin/start-all.sh
217 /usr/local/hadoop/bin/hdfs haadmin -getServiceState nn1
218 /usr/local/hadoop/bin/yarn rmadmin -getServiceState rm1
219 /usr/local/hadoop/bin/hdfs dfsadmin -report
220 /usr/local/hadoop/bin/yarn node -list
221 /usr/local/hadoop/bin/hadoop fs -ls /
222 /usr/local/hadoop/bin/hadoop fs -ls hdfs://nsd1808ha/
223 /usr/local/hadoop/bin/hadoop fs -mkdir hdfs://nsd1808ha/aa
224 /usr/local/hadoop/bin/hadoop fs -ls hdfs://nsd1808ha/
225 /usr/local/hadoop/bin/hadoop fs -put *.txt /aa
226 cd /usr/local/hadoop/
227 /usr/local/hadoop/bin/hadoop fs -put *.txt /aa
228 /usr/local/hadoop/bin/hadoop fs -ls hdfs://nsd1808ha/
229 ls
230 /usr/local/hadoop/bin/hadoop fs -ls hdfs://nsd1808ha/aa
231 jps
232 /usr/local/hadoop/bin/hdfs haadmin -getServiceState nn1
233 /usr/local/hadoop/sbin/hadoop-daemon.sh stop namenode
234 /usr/local/hadoop/bin/hdfs haadmin -getServiceState nn1
235 /usr/local/hadoop/bin/hdfs haadmin -getServiceState nn2
236 /usr/local/hadoop/bin/yarn rmadmin -getServiceState rm1
237 /usr/local/hadoop/sbin/yarn-daemon.sh stop resourcemanager
238 /usr/local/hadoop/bin/yarn rmadmin -getServiceState rm2
239 /usr/local/hadoop/sbin/hadoop-daemon.sh start namenode
240 /usr/local/hadoop/sbin/yarn-daemon.sh start resourcemanager
241 /usr/local/hadoop/bin/hdfs haadmin -getServiceState nn1
242 /usr/local/hadoop/bin/hdfs haadmin -getServiceState nn2
243 /usr/local/hadoop/bin/yarn rmadmin -getServiceState rm1
244 /usr/local/hadoop/bin/yarn rmadmin -getServiceState rm2
245 cat /etc/hosts
246 history
247 cat /etc/hosts
248 history | grep hosts
249*
250 history | grep hosts
251 history
#########################################################################################
[root@nn01 hadoop]# cat core-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://nsd1808ha</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/var/hadoop</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>zk1:2181,zk2:2181,zk3:2181</value>
</property>
<property>
<name>hadoop.proxyuser.nsd1808.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.nsd1808.hosts</name>
<value>*</value>
</property>
</configuration>
#########################################################################################
[root@nn01 hadoop]# cat hadoop-env.sh
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Set Hadoop-specific environment variables here.

# The only required environment variable is JAVA_HOME. All others are
# optional. When running a distributed configuration it is best to
# set JAVA_HOME in this file, so that it is correctly defined on
# remote nodes.

# The java implementation to use.
export JAVA_HOME="/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.131-11.b12.el7.x86_64/jre"

# The jsvc implementation to use. Jsvc is required to run secure datanodes
# that bind to privileged ports to provide authentication of data transfer
# protocol. Jsvc is not required if SASL is configured for authentication of
# data transfer protocol using non-privileged ports.
#export JSVC_HOME=${JSVC_HOME}

export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/usr/local/hadoop/etc/hadoop"}

# Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
if [ "$HADOOP_CLASSPATH" ]; then
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
else
export HADOOP_CLASSPATH=$f
fi
done

# The maximum amount of heap to use, in MB. Default is 1000.
#export HADOOP_HEAPSIZE=
#export HADOOP_NAMENODE_INIT_HEAPSIZE=""

# Extra Java runtime options. Empty by default.
export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"

# Command specific options appended to HADOOP_OPTS when specified
export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"

export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"

export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"

# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"

# On secure datanodes, user to run the datanode as after dropping privileges.
# This **MUST** be uncommented to enable secure HDFS if using privileged ports
# to provide authentication of data transfer protocol. This **MUST NOT** be
# defined if SASL is configured for authentication of data transfer protocol
# using non-privileged ports.
export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}

# Where log files are stored. $HADOOP_HOME/logs by default.
#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER

# Where log files are stored in the secure data environment.
export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}

###
# HDFS Mover specific parameters
###
# Specify the JVM options to be used when starting the HDFS Mover.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# export HADOOP_MOVER_OPTS=""

###
# Advanced Users Only!
###

# The directory where pid files are stored. /tmp by default.
# NOTE: this should be set to a directory that can only be written to by
# the user that will run the hadoop daemons. Otherwise there is the
# potential for a symlink attack.
export HADOOP_PID_DIR=${HADOOP_PID_DIR}
export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}

# A string representing this instance of hadoop. $USER by default.
export HADOOP_IDENT_STRING=$USER

#########################################################################################
[root@nn01 hadoop]# cat hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<property>
<name>dfs.nameservices</name>
<value>nsd1808ha</value>
</property>
<property>
<name>dfs.ha.namenodes.nsd1808ha</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.nsd1808ha.nn1</name>
<value>nn01:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.nsd1808ha.nn2</name>
<value>nn02:8020</value>
</property>
<property>
<name>dfs.namenode.http-address.nsd1808ha.nn1</name>
<value>nn01:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.nsd1808ha.nn2</name>
<value>nn02:50070</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://zk1:8485;zk2:8485;zk3:8485/nsd1808ha</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/var/hadoop/journal</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.nsd1808ha</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
</configuration>

#########################################################################################
[root@nn01 hadoop]# cat mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>

#########################################################################################
[root@nn01 hadoop]# cat slaves
node1
node2
node3
#########################################################################################
[root@nn01 hadoop]# cat yarn-site.xml
<?xml version="1.0"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<configuration>

<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>
<property>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>zk1:2181,zk2:2181,zk3:2181</value>
</property>
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>yarn-ha</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>nn01</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>nn02</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>

#########################################################################################
[root@nn02 ~]# history
1 jps
2 cd
3 rm -rf /usr/local/hadoop/
4 rm -rf /usr/local/zookeeper/
5 ls
6 rm -rf /usr/local/hadoop/
7 vim /etc/sysconfig/network-scripts/ifcfg-eth0
8 hostname set-hostname nn02
9 hostnamectl set-hostname nn02
10 hostname nn02
11 exit
12 vim /etc/ssh/sshd_config
13 vim /etc/ssh/ssh_config
14 rsync -av 192.168.3.90:/root/.ssh/ /root/
15 cd /root/.ssh/
16 ls
17 ssh node1
18 ls
19 rm -rf *
20 rsync -av 192.168.3.90:/root/.ssh/ /root/
21 ls
22 ssh node1
23 scp -r 192.168.3.90:/root/.ssh/ /root/.ssh/
24 ls
25 rm -rf *
26 cd
27 scp -r 192.168.3.90:/root/.ssh/ /root/.ssh/
28 ls
29 rm -rf authorized_keys id_rsa.pub known_hosts
30 cd /root/.ssh/
31 scp -r 192.168.3.90:/root/.ssh/ /root/.ssh/
32 ls
33 cd
34 ls
35 rm -rf id_rsa
36 ls
37 scp 192.168.3.90:/root/.ssh/* /root/.ssh/
38 ls
39 cd /root/.ssh/
40 ls
41 ssh node1
42 ssh nn02
43 ssh nfsgw
44 yum -y install java-1.8.0-openjdk-devel
45 cd
46 ls
47 ./zkstats.sh nfsgw zk{1..3}
48 ls /usr/local/
49 ls /var/hadoop/
50 ls
51 rsync -av /var/hadoop/dfs/ /var/hadoop/
52 rsync -av 192.168.3.90:/var/hadoop/dfs/ /var/hadoop/
53 ls
54 ls /var/hadoop/
55 rsync -aSH nn01:/var/hadoop/dfs/ /var/hadoop/
56 ls /var/hadoop/
57 ls
58 ls /var/hadoop/
59 rm -rf /var/hadoop/*
60 rsync -aSH nn01:/var/hadoop/dfs/ /var/hadoop/
61 ls /var/hadoop/
62 rm -rf /var/hadoop/*
63 rsync -aSH nn01:/var/hadoop/ /var/hadoop/
64 ls /var/hadoop/
65 /usr/local/hadoop/sbin/yarn-daemon.sh start resourcemanager
66 /usr/local/hadoop/bin/hdfs haadmin -getServiceState nn2
67 /usr/local/hadoop/bin/yarn rmadmin -getServiceState rm2
68 jps
69 history
#########################################################################################
[root@node1 hadoop]# history
1 exit
2 cd /root/.ssh/
3 ls
4 rm -rf *
5 cd
6 ls /usr/local/hadoop/
7 jps
8 ls /usr/local/hadoop/
9 rm -rf /usr/local/hadoop/
10 ls /usr/local/hadoop/
11 cat /usr/local/hadoop/etc/hadoop/core-site.xml
12 rm -rf /usr/local/zookeeper/
13 vim slaves
14 ls
15 tar -xf kafka_2.10-0.10.2.1.tgz
16 ls
17 mv kafka_2.10-0.10.2.1 /usr/local/kafka
18 cd /usr/local/kafka/config
19 ls
20 vim server.properties
21 cd
22 ls
23 scp kafka_2.10-0.10.2.1.tgz 192.168.3.81:/root/
24 history
25 rm -rf /usr/local/kafka
26 ls
27 rm -rf kafka_2.10-0.10.2.1.tgz
28 ls
29 cd /var/hadoop/
30 ls
31 jps
32 rm -rf /var/hadoop/*
33 jps
#########################################################################################
[root@node2 ~]# history
1 exit
2 jps
3 cd /usr/local/hadoop/etc/hadoop/
4 vim core-site.xml
5 cd
6 rm -rf /usr/local/hadoop/
7 rm -rf /usr/local/zookeeper/
8 jps
9 rm -rf /var/hadoop/*
10 jps
11 /usr/local/hadoop/bin/hadoop --help
12 /usr/local/hadoop/bin/hadoop fs
13 history
#########################################################################################
[root@node3 ~]# history
1 exit
2 jps
3 cd /usr/local/hadoop/
4 cd etc/hadoop/
5 vim core-site.xml
6 cd
7 rm -rf /usr/local/hadoop/
8 rm -rf /usr/local/zookeeper/
9 jps
10 rm -rf /var/hadoop/*
11 jps
12 history
#########################################################################################
[root@nfsgw ~]# history
1 jps
2 groupadd -g 700 nsd1808
3 useradd -u 700 -g 700 -r nsd1808
4 rm -rf /usr/local/hadoop/
5 mkdir /var/hadoop
6 mkdir /var/nfstmp
7 chown nsd1808.nsd1808 /var/nfstmp
8 setfacl -m u:nsd1808:rwx /usr/local/hadoop/logs
9 vim /usr/local/hadoop/etc/hadoop/hdfs-site.xml
10 su - nsd1808
11 /usr/local/hadoop/sbin/hadoop-daemon.sh --script ./bin/hdfs start portmap
12 jps
13 su -nsd1808
14 su - nsd1808
15 jps
16 mkdir /tmp/zookeeper
17 echo 4 >/tmp/zookeeper/myid
18 cd /usr/local/zookeeper
19 ls
20 /usr/local/zookeeper/bin/zkServer.sh start
21 /usr/local/zookeeper/bin/zkServer.sh status
22 /usr/local/zookeeper/bin/zkServer.sh stop
23 /usr/local/zookeeper/bin/zkServer.sh status
24 /usr/local/zookeeper/bin/zkServer.sh start
25 /usr/local/zookeeper/bin/zkServer.sh status
26 /usr/local/zookeeper/bin/zkServer.sh stop
27 /usr/local/zookeeper/bin/zkServer.sh status
28 yum -y install telnet
29 telnet zk3 2181
30 /usr/local/zookeeper/bin/zkServer.sh start
31 /usr/local/zookeeper/bin/zkServer.sh status
32 vim api.sh
33 cp /root/zkstats.sh .
34 ls
35 vim zkstats.sh
36 ls
37 ll zkstats.sh
38 ./zkstats.sh
39 ./zkstats.sh zk1 zk2 zk3 nfsgw
40 ./zkstats.sh zk1 zk2 zk3 nfsgw node1
41 ls
42 cd conf
43 ls
44 vim configuration.xsl
45 vim log4j.properties
46 vim zoo.cfg
47 ls
48 jps
49 reboot
50 hostnamectl set-hostname nfsgw
51 hostname nfsgw
52 vim /etc/sysconfig/network-scripts/ifcfg-eth0
53 systemctl restart network
54 LANG=en_US.UTF-8
55 growpart /dev/vda 1
56 xfs_growfs /
57 yum -y install java-1.8.0-openjdk-devel
58 jps
59 /usr/local/zookeeper/bin/zkServer.sh start
60 jps
61 /usr/local/zookeeper/bin/zkServer.sh start
62 jps
63 ls
64 ./zkstats.sh nfsgw zk{1..3}
65 ./zkstats.sh nfsgw
66 /usr/local/zookeeper/bin/zkServer.sh start
67 ls
68 jps
69 ./zkstats.sh nfsgw
70 ./zkstats.sh nfsgw zk{1..3}
71 scp zkstats.sh 192.168.3.94:/root/
72 ls /usr/local/hadoop/
73 cd
74 jps
#########################################################################################
[root@zk1 ~]# history
1 hostnamectl set-hostname zk1
2 hostname zk1
3 vim /etc/sysconfig/network-scripts/ifcfg-eth0
4 systemctl restart network
5 LANG=en_US.UTF-8
6 growpart /dev/vda 1
7 xfs_growfs /
8 yum -y install java-1.8.0-openjdk-devel
9 ls
10 ls /usr/local/zookeeper/
11 cd co
12 cd /usr/local/zookeeper/conf/
13 vim zoo.cfg
14 mkdir /tmp/zookeeper
15 echo 1 >/tmp/zookeeper/myid
16 /usr/local/zookeeper/bin/zkServer.sh start
17 /usr/local/zookeeper/bin/zkServer.sh status
18 /usr/local/zookeeper/bin/zkServer.sh start
19 cd
20 tar -xf kafka_2.10-0.10.2.1.tgz
21 ls
22 mv kafka_2.10-0.10.2.1 /usr/local/kafka
23 cd /usr/local/kafka/config
24 ls
25 vim server.properties
26 scp /usr/local/kafka 192.168.3.82:/usr/local/kafka
27 rsync -av /usr/local/kafka 192.168.3.82:/usr/local/kafka
28 rsync -av /usr/local/kafka 192.168.3.83:/usr/local/kafka
29 rsync -av /usr/local/kafka 192.168.3.83:/usr/local
30 rsync -av /usr/local/kafka 192.168.3.82:/usr/local
31 /usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties
32 jps
33 /usr/local/kafka/bin/kafka-topics.sh --create --partitions 1 --replication-factor 1 --zookeeper node3:2181 --topic aa
34 /usr/local/kafka/bin/kafka-topics.sh --create --partitions 1 --replication-factor 1 --zookeeper zk1:2181 --topic aa
35 reboot
36 /usr/local/zookeeper/bin/zkServer.sh start
37 jps
38 cat /etc/hosts
39 /usr/local/hadoop/sbin/hadoop-daemon.sh start journalnode
40 /usr/local/hadoop/sbin/hadoop-daemon.sh stop journalnode
41 jps
#########################################################################################
[root@zk2 ~]# history
1 hostnamectl set-hostname zk2
2 hostname zk2
3 vim /etc/sysconfig/network-scripts/ifcfg-eth0
4 systemctl restart network
5 LANG=en_US.UTF-8
6 growpart /dev/vda 1
7 xfs_growfs /
8 yum -y install java-1.8.0-openjdk-devel
9 mkdir /tmp/zookeeper
10 echo 2 >/tmp/zookeeper/myid
11 /usr/local/zookeeper/bin/zkServer.sh start
12 /usr/local/zookeeper/bin/zkServer.sh status
13 ls /usr/local/kafka
14 rm -rf /usr/local/kafka
15 vim /usr/local/kafka/config/server.properties
16 jps
17 /usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties
18 jps
19 /usr/local/kafka/bin/kafka-console-producer.sh --broker-list zk2:9092 --topic aa
20 reboot
21 /usr/local/zookeeper/bin/zkServer.sh start
22 jps
23 ss -tunlp | grep 8485
24 ss -tunlp | grep :8485
25 ss -tunlp | grep 8485
26 /usr/local/hadoop/sbin/hadoop-daemon.sh start journalnode
27 /usr/local/hadoop/sbin/hadoop-daemon.sh stop journalnode
28 jps
29 history
#########################################################################################
[root@zk3 ~]# history
1 exit
2 hostnamectl set-hostname zk3
3 hostname zk3
4 vim /etc/sysconfig/network-scripts/ifcfg-eth0
5 systemctl restart network
6 LANG=en_US.UTF-8
7 growpart /dev/vda 1
8 xfs_growfs /
9 yum -y install java-1.8.0-openjdk-devel
10 mkdir /tmp/zookeeper
11 echo 3 >/tmp/zookeeper/myid
12 /usr/local/zookeeper/bin/zkServer.sh start
13 /usr/local/zookeeper/bin/zkServer.sh status
14 rm -rf /usr/local/kafka
15 vim /usr/local/kafka/config/server.properties
16 /usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties
17 jps
18 /usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server zk3:9092 --topic aa
19 reboot
20 /usr/local/zookeeper/bin/zkServer.sh start
21 jps
22 /usr/local/hadoop/sbin/hadoop-daemon.sh start journalnode
23 /usr/local/hadoop/sbin/hadoop-daemon.sh stop journalnode
24 jps
#########################################################################################
[root@nn02 ~]# cd /root/.ssh/
[root@nn02 .ssh]# ls
authorized_keys id_rsa id_rsa.pub known_hosts

[root@nn02 .ssh]# cat /etc/ssh/sshd_config
# $OpenBSD: sshd_config,v 1.100 2016/08/15 12:32:04 naddy Exp $

# This is the sshd server system-wide configuration file. See
# sshd_config(5) for more information.

# This sshd was compiled with PATH=/usr/local/bin:/usr/bin

# The strategy used for options in the default sshd_config shipped with
# OpenSSH is to specify options with their default value where
# possible, but leave them commented. Uncommented options override the
# default value.

# If you want to change the port on a SELinux system, you have to tell
# SELinux about this change.
# semanage port -a -t ssh_port_t -p tcp #PORTNUMBER
#
#Port 22
#AddressFamily any
#ListenAddress 0.0.0.0
#ListenAddress ::

HostKey /etc/ssh/ssh_host_rsa_key
#HostKey /etc/ssh/ssh_host_dsa_key
HostKey /etc/ssh/ssh_host_ecdsa_key
HostKey /etc/ssh/ssh_host_ed25519_key

# Ciphers and keying
#RekeyLimit default none

# Logging
#SyslogFacility AUTH
SyslogFacility AUTHPRIV
#LogLevel INFO

# Authentication:

#LoginGraceTime 2m
#PermitRootLogin yes
#StrictModes yes
#MaxAuthTries 6
#MaxSessions 10

#PubkeyAuthentication yes

# The default is to check both .ssh/authorized_keys and .ssh/authorized_keys2
# but this is overridden so installations will only check .ssh/authorized_keys
AuthorizedKeysFile .ssh/authorized_keys

#AuthorizedPrincipalsFile none

#AuthorizedKeysCommand none
#AuthorizedKeysCommandUser nobody

# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
#HostbasedAuthentication no
# Change to yes if you don't trust ~/.ssh/known_hosts for
# HostbasedAuthentication
#IgnoreUserKnownHosts no
# Don't read the user's ~/.rhosts and ~/.shosts files
#IgnoreRhosts yes

# To disable tunneled clear text passwords, change to no here!
#PasswordAuthentication yes
#PermitEmptyPasswords no
PasswordAuthentication yes

# Change to no to disable s/key passwords
#ChallengeResponseAuthentication yes
ChallengeResponseAuthentication no

# Kerberos options
#KerberosAuthentication no
#KerberosOrLocalPasswd yes
#KerberosTicketCleanup yes
#KerberosGetAFSToken no
#KerberosUseKuserok yes

# GSSAPI options
GSSAPIAuthentication yes
GSSAPICleanupCredentials no
#GSSAPIStrictAcceptorCheck yes
#GSSAPIKeyExchange no
#GSSAPIEnablek5users no

# Set this to 'yes' to enable PAM authentication, account processing,
# and session processing. If this is enabled, PAM authentication will
# be allowed through the ChallengeResponseAuthentication and
# PasswordAuthentication. Depending on your PAM configuration,
# PAM authentication via ChallengeResponseAuthentication may bypass
# the setting of "PermitRootLogin without-password".
# If you just want the PAM account and session checks to run without
# PAM authentication, then enable this but set PasswordAuthentication
# and ChallengeResponseAuthentication to 'no'.
# WARNING: 'UsePAM no' is not supported in Red Hat Enterprise Linux and may cause several
# problems.
UsePAM yes

#AllowAgentForwarding yes
#AllowTcpForwarding yes
#GatewayPorts no
X11Forwarding yes
#X11DisplayOffset 10
#X11UseLocalhost yes
#PermitTTY yes
#PrintMotd yes
#PrintLastLog yes
#TCPKeepAlive yes
#UseLogin no
#UsePrivilegeSeparation sandbox
#PermitUserEnvironment no
#Compression delayed
#ClientAliveInterval 0
#ClientAliveCountMax 3
#ShowPatchLevel no
#UseDNS no
#PidFile /var/run/sshd.pid
#MaxStartups 10:30:100
#PermitTunnel no
#ChrootDirectory none
#VersionAddendum none

# no default banner path
#Banner none

# Accept locale-related environment variables
AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE
AcceptEnv XMODIFIERS

# override default of no subsystems
Subsystem sftp /usr/libexec/openssh/sftp-server

# Example of overriding settings on a per-user basis
#Match User anoncvs
# X11Forwarding no
# AllowTcpForwarding no
# PermitTTY no
# ForceCommand cvs server

 

转载于:https://www.cnblogs.com/summer2/p/10798518.html

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值