1 mysql数据 远程连接权限
1) 在linux端登录: mysql -uroot -proot
// 这个两个设置以后 密码很简单不会报错
2) set global validate_password_policy=0;
3) set global validate_password_length=1;
(密码不是root修改成root : set password=PASSWORD(‘root’))
//开启远程连接权限,打开后Navicat就可以连接到了
4) grant all privileges on . to ‘root’@’%’ identified by ‘root’ with grant option;
//刷新一下
5) flush privileges;
2 HDFS start-all.sh
3 上传hive压缩包到/opt/apps/,解压 :
rz 压缩包
tar -zxvf 压缩包
上传mysql的驱动包放入lib 中:/opt/apps/apache-hive-3.1.2/lib/
rz 压缩包
4 修改文件名(/opt/apps/apache-hive-3.1.2/conf/):
改名: mv hive-env.sh.template hive-env.sh
然后修改配置(配置HADOOP_HOME 和hive配置文件的位置):
vi conf/hive-env.sh
export HADOOP_HOME=/opt/apps/hadoop-3.1.1/
export HIVE_CONF_DIR=/opt/apps/hive-3.1.2/conf
5 新建并修改(/opt/apps/apache-hive-3.1.2/conf/)
vi hive-site.xml
内容:
<configuration>
<!-- 记录HIve中的元数据信息 记录在mysql中 -->
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://linux01:3306/hive?createDatabaseIfNotExist=true&useSSL=false</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<!-- mysql的用户名和密码 -->
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>root</value>
</property>
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/user/hive/warehouse</value>
</property>
<property>
<name>hive.exec.scratchdir</name>
<value>/user/hive/tmp</value>
</property>
<property>
<name>hive.querylog.location</name>
<value>/user/hive/log</value>
</property>
<!-- 客户端远程连接的端口 -->
<property>
<name>hive.server2.thrift.port</name>
<value>10000</value>
</property>
<property>
<name>hive.server2.thrift.bind.host</name>
<value>0.0.0.0</value>
</property>
<property>
<name>hive.server2.webui.host</name>
<value>0.0.0.0</value>
</property>
<!-- hive服务的页面的端口 -->
<property>
<name>hive.server2.webui.port</name>
<value>10002</value>
</property>
<property>
<name>hive.server2.long.polling.timeout</name>
<value>5000</value>
</property>
<property>
<name>hive.server2.enable.doAs</name>
<value>true</value>
</property>
<property>
<name>datanucleus.autoCreateSchema</name>
<value>false</value>
</property>
<property>
<name>datanucleus.fixedDatastore</name>
<value>true</value>
</property>
<property>
<name>hive.execution.engine</name>
<value>mr</value>
</property>
<!-- 添加元数据服务配置 -->
<property>
<name>hive.metastore.local</name>
<value>false</value>
<description>controls whether to connect to remove metastore server or open a new metastore server in Hive Client JVM</description>
</property>
<property>
<name>hive.metastore.uris</name>
<value>thrift://linux01:9083</value>
</property>
</configuration>
6 。修改hadoop的配置文件
vi /opt/apps/hadoop-3.1.1/etc/hadoop/core-site.xml
<property>
<name>dfs.permissions.enabled</name>
<value>false</value>
</property>
<property>
<name>hadoop.proxyuser.root.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.root.groups</name>
<value>*</value>
</property>
7重启hadoop
stop-all.sh
start-all.sh
8 配置HIVE环境变量
export HIVE_HOME=/opt/apps/apache-hive-3.1.2
export PATH=
P
A
T
H
:
PATH:
PATH:JAVA_HOME/bin:
H
A
D
O
O
P
H
O
M
E
/
b
i
n
:
HADOOP_HOME/bin:
HADOOPHOME/bin:HADOOP_HOME/sbin:
H
B
A
S
E
H
O
M
E
/
b
i
n
:
HBASE_HOME/bin:
HBASEHOME/bin:HIVE_HOME/bin
配好后
source etc/profile
9 初始化(配环境变量后任何位置都可以)
bin/schematool -initSchema -dbType mysql
10 开启元数据服务
hive --service metastore 前台启动
hive --service metastore & 后台启动
[root@linux01 ~]# netstat -nltp | grep 9083
tcp6 0 0 :::9083
11 启动测试(HDFS正常)
hive (HDFS 客户端会出现hive文件夹)