#在mysql中创建hive用户,数据库等
mysql -h 192.168.172.134 -u root -p
create user 'hive' identified by 'hive';
create database hive;
grant all on hive.* to hive@'%' identified by 'hive';
grant all on hive.* to hive@'localhost' identified by 'hive';
flush privileges;
exit;
tar -zxvf apache-hive-1.2.2-bin.tar.gz
ln -s apache-hive-1.2.2-bin hive
#复制mysql jar包
tar -zxvf mysql-connector-java-5.1.49.tar.gz
cp mysql-connector-java-5.1.49/mysql-connector-java-5.1.49-bin.jar hive/lib
cd hive
cp conf/hive-env.sh.template conf/hive-env.sh
vi conf/hive-env.sh
#编辑hive-env.sh添加如下内容
############################################
export HADOOP_HOME=/usr/local/module/hadoop
export HIVE_CONF_DIR=/usr/local/module/hive/conf
############################################
vi conf/hive-site.xml
#编辑hive-site.xml
############################################
<configuration>
<property>
<name>hive.users.in.admin.role</name>
<value>root</value>
</property>
<property>
<name>hive.security.authorization.enabled</name>
<value>true</value>
</property>
<property>
<name>hive.security.authorization.createtable.owner.grants</name>
<value>ALL</value>
</property>
<property>
<name>hive.security.authorization.task.factory</name>
<value>org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl</value>
</property>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://127.0.0.1:3306/hive?createDatabaseIfNotExist=true&useSSL=false</value>
<description>JDBC connect string for a JDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
<description>Driver class name for a JDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>hive</value>
<description>username to use against metastore database</description>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>hive</value>
<description>password to use against metastore database</description>
</property>
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/user/hive/warehouse</value>
<description>location of default database for the warehouse</description>
</property>
<property>
<name>hive.metastore.schema.verification</name>
<value>false</value>
</property>
<property>
<name>hive.cli.print.header</name>
<value>true</value>
</property>
<property>
<name>hive.cli.print.current.db</name>
<value>true</value>
</property>
<property>
<name>hive.metastore.uris</name>
<value>thrift://127.0.0.1:9083</value>
</property>
</configuration>
############################################
#创建hive在hdfs中的目录
hadoop fs -mkdir /tmp
hadoop fs -mkdir -p /user/hive/warehouse
hadoop fs -chmod g+w /tmp
hadoop fs -chmod g+w /user/hive/warehouse
#初始化数据库
schematool -initSchema -dbType mysql
#启动
nohup hive --service metastore > metastore.log 2>&1 &
nohup hive --service hiveserver2 > hivesesrver2.log 2>&1 &
#配置权限
hive
#set role admin;
grant all to user root;
#创建数据库或者数据表授权给root用户(只有授权后的表或者库才能被 qualitis 找到) 注意这条!!!!!
GRANT ALL ON DATABASE xxxx TO USER root;
GRANT ALL ON TABLE xxxx TO USER root;
#关闭
ps -ef | grep hive
kill -9 pid
tar -zxvf wedatasphere-linkis-1.0.2-combined-package-dist.tar.gz
cd wedatasphere-linkis-1.0.2-combined-package-dist
#编辑配置文件
vi config/linkis-env.sh
########################## config/linkis-env.sh 开始 ####################################
#!/bin/bash
#
# Copyright 2019 WeBank
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# description: Starts and stops Server
#
# @name: linkis-env
#
# Modified for Linkis 1.0.0
# SSH_PORT=22
### deploy user
deployUser=root
##Linkis_SERVER_VERSION
LINKIS_SERVER_VERSION=v1
### Specifies the user workspace, which is used to store the user's script files and log files.
### Generally local directory
WORKSPACE_USER_ROOT_PATH=file:///tmp/linkis/ ##file:// required
### User's root hdfs path
HDFS_USER_ROOT_PATH=hdfs:///tmp/linkis ##hdfs:// required
### Path to store started engines and engine logs, must be local
ENGINECONN_ROOT_PATH=/usr/local/module/linkis
ENTRANCE_CONFIG_LOG_PATH=hdfs:///tmp/linkis/
### Path to store job ResultSet:file or hdfs path
RESULT_SET_ROOT_PATH=hdfs:///tmp/linkis ##hdfs:// required
### Provide the DB information of Hive metadata database.
### Attention! If there are special characters like "&", they need to be enclosed in quotation marks.
HIVE_META_URL="jdbc:mysql://192.168.172.134:3306/hive?createDatabaseIfNotExist=true"
HIVE_META_USER="hive"
HIVE_META_PASSWORD="hive"
##YARN REST URL spark engine required
YARN_RESTFUL_URL=http://127.0.0.1:8088
###HADOOP CONF DIR
HADOOP_CONF_DIR=/usr/local/module/hadoop/etc/hadoop
###HIVE CONF DIR
HIVE_CONF_DIR=/usr/local/module/hive/conf
###SPARK CONF DIR
SPARK_CONF_DIR=/usr/local/module/spark/conf
## Engine version conf
#SPARK_VERSION
SPARK_VERSION=2.4.8
##HIVE_VERSION
HIVE_VERSION=1.2.2
#PYTHON_VERSION=python2
################### The install Configuration of all Micro-Services #####################
#
# NOTICE:
# 1. If you just wanna try, the following micro-service configuration can be set without any settings.
# These services will be installed by default on this machine.
# 2. In order to get the most complete enterprise-level features, we strongly recommend that you install
# Linkis in a distributed manner and set the following microservice parameters
#
### EUREKA install information
### You can access it in your browser at the address below:http://${EUREKA_INSTALL_IP}:${EUREKA_PORT}
#EUREKA_INSTALL_IP=127.0.0.1 # Microservices Service Registration Discovery Center
EUREKA_PORT=20303
export EUREKA_PREFER_IP=false
### Gateway install information
#GATEWAY_INSTALL_IP=127.0.0.1
GATEWAY_PORT=9001
### ApplicationManager
#MANAGER_INSTALL_IP=127.0.0.1
MANAGER_PORT=9101
### EngineManager
#ENGINECONNMANAGER_INSTALL_IP=127.0.0.1
ENGINECONNMANAGER_PORT=9102
### EnginePluginServer
#ENGINECONN_PLUGIN_SERVER_INSTALL_IP=127.0.0.1
ENGINECONN_PLUGIN_SERVER_PORT=9103
### LinkisEntrance
#ENTRANCE_INSTALL_IP=127.0.0.1
ENTRANCE_PORT=9104
### publicservice
#PUBLICSERVICE_INSTALL_IP=127.0.0.1
PUBLICSERVICE_PORT=9105
### cs
#CS_INSTALL_IP=127.0.0.1
CS_PORT=9108
########################################################################################
## LDAP is for enterprise authorization, if you just want to have a try, ignore it.
#LDAP_URL=ldap://localhost:1389/
#LDAP_BASEDN=dc=webank,dc=com
#LDAP_USER_NAME_FORMAT=cn=%s@xxx.com,OU=xxx,DC=xxx,DC=com
## java application default jvm memory
export SERVER_HEAP_SIZE="128M"
##The decompression directory and the installation directory need to be inconsistent
LINKIS_HOME=/usr/local/module/linkis
LINKIS_VERSION=1.0.2
# for install
LINKIS_PUBLIC_MODULE=lib/linkis-commons/public-module
################################ config/linkis-env.sh 结束 ##############################
#配置 db.sh
vi config/db.sh
##########################################################
MYSQL_HOST=192.168.172.134
MYSQL_PORT=3306
MYSQL_DB=linkis
MYSQL_USER=root
MYSQL_PASSWORD=root123
##########################################################
#安装指令环境
yum install telnet -y
yum install dos2unix -y
#安装 (会遇到缺少指令需要安装 )
sh bin/install.sh
#运行
cd ../linkis
sh sbin/linkis-start-all.sh