Install JDK
tar -xf jdk-8u211-linux-x64.tar.gz
mv jdk1.8.0_211 /usr/local/java
vi /etc/profile.d/java.sh
export JAVA_HOME=/usr/local/java
export PATH=$PATH:$JAVA_HOME/bin
export JAVA_HOME PATH
# 刷新环境变量
source /etc/profile.d/java.sh
java -version
install zookpeer
mkdir -p /data/zookeeper
tar -xf zookeeper-3.4.10.tar.gz
mv zookeeper-3.4.10 /opt/zookeeper
# 修改配置文件 zoo.cfg
cd /opt/zookeeper/conf
cp zoo_sample.cfg zoo.cfg
# 然后通过 vim zoo.cfg 命令对该文件进行修改:
# 修改 存储内存中数据库快照的位置 dataDir
修改: server.A=B:C:D
A:其中 A 是一个数字,表示这个是服务器的编号;
B:是这个服务器的 ip 地址;
C:Zookeeper服务器之间的通信端口;
D:Leader选举的端口。
修改后内容
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/data/zookeeper
# the port at which the clients will connect
clientPort=2181
server.1=192.168.1.100:2888:3888
server.2=192.168.1.101:2888:3888
server.3=192.168.1.102:2888:3888
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
创建 myid 文件
分别在每个 dataDir=/data/zookeeper 创建一个 myid 文件
mkdir -p /data/zookeeper
echo 3 >> /data/zookeeper/myid
配置环境变量
vim /etc/profile
#添加以下内容
#set zookeeper environment
export ZK_HOME=/opt/zookeeper
export PATH=$PATH:$ZK_HOME/bin
#配置文件改动要刷新
source /etc/profile
启动zookeeper服务
启动命令:
zkServer.sh start
停止命令:
zkServer.sh stop
重启命令:
zkServer.sh restart
查看集群节点状态:
zkServer.sh status
查看端口状态:
ss -tunlp|grep 2181
启动客户端
zkCli.sh
Install kafka
### MD5 校验
$ md5sum.exe kafka_2.13-2.8.1.tgz
04ed506da2d68fb118332875639020e7 *kafka_2.13-2.8.1.tgz
mkdir /data/kafka
mkdir /apps/kafka
#解压并移动
tar -xf kafka_2.13-2.8.1.tgz
mv kafka_2.13-2.8.1 /apps/kafka
备份
cd /apps/kafka/config/
cp server.properties server.properties.bak
修改配置文件(3台同时操作,需要更改的地方不一样)
[root@hi-ims-36-74 config]# diff server.properties server.properties.bak
21c21
< broker.id=1
---
> broker.id=0
31c31
< listeners=PLAINTEXT://172.16.36.74:9092
---
> #listeners=PLAINTEXT://:9092
60c60
< log.dirs=/data/kafka
---
> log.dirs=/tmp/kafka-logs
65c65
< num.partitions=6
---
> num.partitions=1
74c74
< offsets.topic.replication.factor=2
---
> offsets.topic.replication.factor=1
123c123
< zookeeper.connect=zookeeper.connect=172.16.36.74:2181,172.16.36.75:2181,172.16.36.76:2181
---
> zookeeper.connect=localhost:2181
启动kafak
/apps/kafka/bin/kafka-server-start.sh -daemon /apps/kafka/config/server.properties
/usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties
Install hadoop
tar -zxvf hadoop-3.2.2.tar.gz
mv hadoop-3.2.2 hadoop
##编辑 hadoop-env.sh
vim hadoop/etc/hadoop/hadoop-env.sh
export JAVA_HOME=/usr/local/java
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_ZKFC_USER=root
export HDFS_JOURNALNODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root
##编辑 core-site.xml
vim hadoop/etc/hadoop/core-site.xml
<property>
<name>fs.defaultFS</name>
<value>hdfs://bdp</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/var/bdp/hadoop/ha</value>
</property>
<property>
<name>hadoop.http.staticuser.user</name>
<value>root</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>node1:2181,node2:2181,node3:2181</value>
</property>
##编辑 hdfs-site.xml
vim hadoop/etc/hadoop/hdfs-site.xml
<property>
<name>dfs.nameservices</name>
<value>bdp</value>
</property>
<property>
<name>dfs.ha.namenodes.bdp</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.bdp.nn1</name>
<value>node1:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.bdp.nn2</name>
<value>node2:8020</value>
</property>
<property>
<name>dfs.namenode.http-address.bdp.nn1</name>
<value>node1:9870</value>
</property>
<property>
<name>dfs.namenode.http-address.bdp.nn2</name>
<value>node2:9870</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://node1:8485;node2:8485;node3:8485/bdp</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/var/bdp/hadoop/ha/qjm</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.bdp</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
<value>shell(true)</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
#set hadoop environment
vim /etc/profile
export HADOOP_HOME=/opt/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
source /etc/profile
#编辑 workers
vim hadoop/etc/hadoop/workers
##替换localhost
node1
node2
node3
##ssh免密
##在root目录下
ssh-keygen -t rsa
ls /root/.ssh/
## 在node1的/root/.ssh/目录中生成一个名为authorized_keys的文件
touch /root/.ssh/authorized_keys
vim /root/.ssh/authorized_keys
##在node1上执行
ssh node2
##将node1的hadoop传到node2和node3
scp -r hadoop node2:/opt/
启动journalnode
hdfs namenode -format
hdfs --daemon start journalnode
hdfs namenode -bootstrapStandby
hdfs zkfc -formatZK
start-dfs.sh
install mysql
1、下载mysql8
mysql-8.0.28-linux-glibc2.12-x86_64.tar.xz
2、解压mysql8,可能慢一点
tar -xvf mysql-8.0.28-linux-glibc2.12-x86_64.tar.xz
3、解压后移动到安装目录
mv mysql-8.0.28-linux-glibc2.12-x86_64/ /usr/local/mysql
cd /usr/local/mysql
4、创建用户组及用户和密码
groupadd mysql
useradd -g mysql mysql
5、授权用户
chown -R mysql.mysql /usr/local/mysql
6、编辑my.cnf文件
复制代码
vim /etc/my.cnf
[mysqld]
user=root
datadir=/usr/local/mysql/data
basedir=/usr/local/mysql
port=3306
max_connections=200
max_connect_errors=10
character-set-server=utf8
default-storage-engine=INNODB
default_authentication_plugin=mysql_native_password
lower_case_table_names=1
group_concat_max_len=102400
[mysql]
default-character-set=utf8
[client]
port=3306
default-character-set=utf8
复制代码
7、初始化基础信息,最后一行后面会有个随机的初始密码保存下来一会登录要用(如果忘记了就删掉data重新初始化)
cd /usr/local/mysql/bin
./mysqld --initialize
密码:cK%)Gu!XC9:-
8、添加mysqld服务到系统
cd /usr/local/mysql/
cp -a ./support-files/mysql.server /etc/init.d/mysql
9、授权以及添加服务
chmod +x /etc/init.d/mysql
chkconfig --add mysql
10、启动mysql
service mysql start
11、将mysql添加到命令服务
ln -s /usr/local/mysql/bin/mysql /usr/bin
12、登录mysql
mysql -uroot -p
注意:此步如果报错mysql: error while loading shared libraries: libtinfo.so.5: cannot open shared object file: No such file or directory,执行下面命令即可
ln -s /usr/lib64/libtinfo.so.6.1 /usr/lib64/libtinfo.so.5
13、输入刚刚初始化时生成的密码
14、更改root用户密码, 注意语句后的; 执行语句忘记写了 可以补个空的;回车也可以将语句执行
ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password BY 'root';
flush privileges;
15、更改root连接权限
use mysql;
update user set host='%' where user = 'root';
flush privileges;
install Redis
1.下载Redis安装包命令:
cd /usr/local/
wget http://download.redis.io/releases/redis-5.0.5.tar.gz
2.解压压缩包得到名为redis-5.0.5的文件
tar xf redis-5.0.5.tar.gz
mv redis-5.0.5 redis
3.进去redis目录下,执行make命令对redis文件进行编译
cd /usr/local/redis
make
4.编译成功后,进入src目录,执行命令make install 安装redis
cd /usr/local/redis/src
make install
5.为了方便管理,将Redis文件中的conf配置文件和常用命令移动到统一文件中,创建etc和bin文件夹
mkdir etc
mkdir bin
6.把redis.conf文件移到etc文件夹中
mv redis.conf /usr/local/redis/etc/
7.进入src目录,移动mkreleasehdr.sh redis-benchmark redis-check-aof redis-check-rdb redis-cli redis-server到/usr/local/redis/bin/
执行命令:mv mkreleasehdr.sh redis-benchmark redis-check-aof redis-check-rdb redis-cli redis-server /usr/local/redis/bin/
8.进入etc目录,执行
cd /usr/local/redis/etc
vim redis.conf
9.再次启动redis服务,并指定启动服务配置文件
cd /usr/local/redis/bin
./redis-server /usr/local/redis/etc/redis.conf&
install Nginx
1.安装四个依赖
安装gcc环境
yum install gcc-c++
安装pcre
yum install -y pcre pcre-devel
安装zlib
yum install -y zlib zlib-devel
安装openssl
yum install -y openssl openssl-devel
2.下载完成后执行如下命令,解压并安装nginx
tar -zxvf nginx-1.16.1.tar.gz
cd nginx-1.16.1
./configure
make
make install
3.启动nginx
cd /usr/local/nginx/sbin/
./nginx -c /usr/local/nginx/conf/nginx.conf
install elasticsearch
1.先新建一个用户(出于安全考虑,elasticsearch默认不允许以root账号运行。)
创建用户:useradd es
设置密码:passwd es
2.官网下载,选择linux版本
elasticsearch-7.13.2-linux-x86_64.tar.gz
cd /usr/local/
3.解压es
tar -zxvf elasticsearch-7.13.2-linux-x86_64.tar.gz
4.目录重命名:
mv elasticsearch-7.13.2 elasticsearch
5.修改配置文件
cd config
vi jvm.options
-Xms512m
-Xmx512m
vi elasticsearch.yml
node.name: node-1 #配置当前es节点名称(默认是被注释的,并且默认有一个节点名)
cluster.name: my-application #默认是被注释的,并且默认有一个集群名
path.data: /usr/local/elasticsearch/data # 数据目录位置
path.logs: /usr/local/elasticsearch/logs # 日志目录位置
network.host: 0.0.0.0 #绑定的ip:默认只允许本机访问,修改为0.0.0.0后则可以远程访问
cluster.initial_master_nodes: ["node-1", "node-2"] #默认是被注释的 设置master节点列表 用逗号分隔
6.进入es的根目录,然后创建logs data
cd /usr/local/elasticsearch/
mkdir data
mkdir logs
7.修改/etc/security/limits.conf文件 增加配置
vim /etc/security/limits.conf
在文件最后,增加如下配置:
* soft nofile 65536
* hard nofile 65536
8.在/etc/sysctl.conf文件最后添加一行 vm.max_map_count=655360
添加完毕之后,执行命令: sysctl -p
9.先将es文件夹下的所有目录的所有权限迭代给esuser用户
chgrp -R es /usr/local/elasticsearch
chown -R es /usr/local/elasticsearch
chmod 777 /usr/local/elasticsearch
10.先切换到es用户启动
cd /usr/local/elasticsearch/bin
su es
./elasticsearch -d
11.启动成功后,浏览器中输入 http://192.168.1.110:9200 进行访问
配置环境常用linux命令
#Linux的网卡参数详解
vim /etc/sysconfig/network-scripts/ifcfg-ens33
#传输文件到其它服务器
scp -r apache-zookeeper-3.5.5 192.168.1.101:/opt/
#刷新配置文件
source /etc/profile
#查看状态
ps -ef | grep zookeeper
#查看防火墙状态
systemctl status firewalld
#临时关闭防火墙
systemctl stop firewalld
#永久关闭防火墙
systemctl disable firewalld
#xshell连接不上虚拟机
systemctl stop NetworkManager 临时关闭
systemctl disable NetworkManager 永久关闭网络管理命令
systemctl restart network.service 开启网络服务
#启动jar
nohup Java -jar huanhuan-blog-1.0-SNAPSHOT.jar >huanhuan-blog.log
nohup java -jar gym-server-0.0.1-SNAPSHOT.jar >logs 2>&1 &
192.168.40.100 node1
192.168.40.101 node2
192.168.40.102 node3
linux分区挂载
lsblk
pvcreate /dev/vdb
pvs
vgcreate vg1 /dev/vdb
vgs
lvcreate -l +100%FREE -n lv1 vg1
lvs
mkfs.ext4 /dev/vg1/lv1
mount /dev/vg1/lv1 /mnt
vi /etc/fstab
/dev/vg1/lv1 /mnt ext4 defaults 0 0
nodejs安装
修改全局模块下载路径
则在我安装的文件夹【D:\nodejs】下创建两个文件夹【node_global】及【node_cache】如下图:
npm config set prefix "D:\nodejs\node_global"
npm config set cache "D:\nodejs\node_cache"
修改系统环境变量
将【用户变量】下的【Path】修改为【D:\nodejs\node_global】,之后点击确定。
在【系统变量】下新建【NODE_PATH】【D:\nodejs\node_global\node_modules】
在【系统变量】下的【Path】新建添加node全局文件夹【D:\nodejs\node_global】,之后点击确定。
更换npm源为淘宝镜像
npm config set registry https://registry.npm.taobao.org/
配置是否成功
npm config get registry
全局安装基于淘宝源的cnpm
npm install -g cnpm --registry=https://registry.npm.taobao.org