一、前言
hive可以基于hadoop mr或者Spark进行高层次的数据处理。
安装之前先保证hadoop的目录可以为root用户读写:
hadoop fs -chown root:root /
转载自https://blog.csdn.net/cloudvtech
二、构建镜像
2.1 下载依赖
mkdir -p hive/image-build
cd hive/image-build
wget http://download.oracle.com/otn-pub/java/jdk/8u181-b13/96a7b8442fe848ef90c96a2fad6ed6d1/jdk-8u181-linux-x64.tar.gz
wget http://mirror.bit.edu.cn/apache/hive/hive-2.3.3/apache-hive-2.3.3-bin.tar.gz
download mysql-connector-java-5.1.47.tar.gz
tar -zxvf mysql-connector-java-5.1.47.tar.gz
wget http://mirror.bit.edu.cn/apache/hadoop/common/hadoop-2.9.1/hadoop-2.9.1.tar.gz
2.2 构建hive镜像
Dockerfile
FROM centos:7.5.1804
ADD jdk-8u181-linux-x64.tar.gz /opt
ADD hadoop-2.9.1.tar.gz /opt
ADD apache-hive-3.0.0-bin.tar.gz /opt
RUN yum install -y which && mv /opt/apache-hive-3.0.0-bin /opt/apache-hive-3.0.0
ADD mysql-connector-java-5.1.47.jar /opt/apache-hive-3.0.0/lib
ENV JAVA_HOME /opt/jdk1.8.0_181
ENV HADOOP_HOME /opt/hadoop-2.9.1
ENV HADOOP_CONF_DIR /opt/hadoop-2.9.1/etc/hadoop
ENV HIVE_HOME /opt/apache-hive-3.0.0
ENV PATH $JAVA_HOME/bin:$PATH
构建
docker build -t hive .
docker tag hive 172.2.2.11:5000/hive
docker push 172.2.2.11:5000/hive
2.3 构建DB tool镜像
安装gradle并编译DB tool
git clone https://github.com/chenlein/database-tools.git
cd database-tools/
unzip gradle-4.10.2-bin.zip
mkdir /opt/gradle
mv gradle-4.10.2 /opt/gradle/
edit build.gradle to remove dm driver
"compile group: 'dm', name: 'Dm7JdbcDriver', version: '7.1', classifier: 'jdk17-20170808'"
add in /etc/profile
export PATH=.:/opt/gradle/gradle-4.10.2/bin:$PATH
gradle --version
gradle build
ls build/distributions/database-tools-1.0-SNAPSHOT.tar
cp build/distributions/database-tools-1.0-SNAPSHOT.tar ./
Dockerfile
FROM java:8
CMD ["mkdir", "-p", "/root/db_tools"]
WORKDIR /root/db_tools
ADD database-tools-1.0-SNAPSHOT.tar .
RUN ["chmod", "+x", "./database-tools-1.0-SNAPSHOT/bin/database-tools"]
CMD ["./database-tools-1.0-SNAPSHOT/bin/database-tools"]
编译镜像
docker build -t database-tools:1.0-SNAPSHOT .
docker tag database-tools:1.0-SNAPSHOT 172.2.2.11:5000/database-tools:1.0-SNAPSHOT
docker push 172.2.2.11:5000/database-tools:1.0-SNAPSHOT
转载自https://blog.csdn.net/cloudvtech
三、部署hive
3.1 部署local volume用于hive部署的PVC
local-volumes.yaml
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: hive-data-1
labels:
type: local
app: hive
spec:
capacity:
storage: 100Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /home/hive/data1
persistentVolumeReclaimPolicy: Recycle
3.2 部署mysql服务
mysql.yaml
apiVersion: v1
kind: Secret
metadata:
name: hive-metadata-mysql-secret
labels:
app: hive-metadata-mysql
type: Opaque
data:
mysql-root-password: RGFtZW5nQDc3Nw==
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hive-metadata-mysql
name: hive-metadata-mysql
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: hive-metadata-mysql
template:
metadata:
labels:
app: hive-metadata-mysql
spec:
initContainers:
- name: remove-lost-found
image: busybox:1.29.2
imagePullPolicy: IfNotPresent
command: ["rm", "-rf", "/var/lib/mysql/lost+found"]
volumeMounts:
- name: data
mountPath: /var/lib/mysql
containers:
- name: mysql
image: mysql:5.7
volumeMounts:
- name: data
mountPath: /var/lib/mysql
ports:
- containerPort: 3306
protocol: TCP
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: hive-metadata-mysql-secret
key: mysql-root-password
volumes:
- name: data
emptyDir: {}
---
kind: Service
apiVersion: v1
metadata:
labels:
app: hive-metadata-mysql
name: hive-metadata-mysql-service
spec:
ports:
- name: tcp
port: 3306
targetPort: 3306
selector:
app: hive-metadata-mysql
type: NodePort
3.3 部署hive配置
hive-config.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: hive-custom-config-cm
labels:
app: hive
data:
bootstrap.sh: |-
#!/bin/bash
set -x
cd /root/bootstrap
# Apply custom config file context
for cfg in ./*; do
if [[ ! "$cfg" =~ bootstrap.sh ]]; then
echo $cfg
cat $cfg
cat $cfg > $HIVE_HOME/conf/${cfg##*/}
fi
done
# Replace hive metadata password
sed -i 's/${HIVE_METADATA_PASSWORD}/'$HIVE_METADATA_PASSWORD'/g' `grep '${HIVE_METADATA_PASSWORD}' -rl $HIVE_HOME/conf`
# initSchema
echo "step 1"
#if [[ ! -e $HADOOP_CONF_DIR/hive-metastore-initialization.out ]]; then
echo "step 2"
$HADOOP_HOME/bin/hadoop fs -mkdir -p hdfs://172.2.2.11:9000/tmp
$HADOOP_HOME/bin/hadoop fs -mkdir -p hdfs://172.2.2.11:9000/user/hive/warehouse
$HADOOP_HOME/bin/hadoop fs -chmod g+w hdfs://172.2.2.11:9000/tmp
$HADOOP_HOME/bin/hadoop fs -chmod g+w hdfs://172.2.2.11:9000/user/hive/warehouse
$HIVE_HOME/bin/schematool -dbType mysql -initSchema --verbose &> $HADOOP_CONF_DIR/hive-metastore-initialization.out
#fi
echo "step 3"
$HIVE_HOME/bin/hiveserver2 &
$HIVE_HOME/bin/hive --service metastore &
cp $HIVE_HOME/conf/hive-env.sh.template $HIVE_HOME/conf/hive-env.sh && echo "export HADOOP_CLIENT_OPTS=\"-Xmx512m -XX:MaxPermSize=1024m \$HADOOP_CLIENT_OPTS\"" >> $HIVE_HOME/conf/hive-env.sh
# keep running
sleep infinity
hive-site.xml: |-
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>hive</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>${HIVE_METADATA_PASSWORD}</value>
</property>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://hive-metadata-mysql-service:3306/metastore?createDatabaseIfNotExist=true&useSSL=false</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<property>
<name>system:java.io.tmpdir</name>
<value>/tmp</value>
</property>
<property>
<name>system:user.name</name>
<value>hive</value>
</property>
<property>
<name>hive.server2.authentication</name>
<value>NOSASL</value>
</property>
<property>
<name>hive.metastore.schema.verification</name>
<value>false</value>
</property>
<property>
<name>datanucleus.fixedDatastore</name>
<value>false</value>
</property>
<property>
<name>datanucleus.autoCreateSchema</name>
<value>true</value>
</property>
<property>
<name>datanucleus.autoCreateTables</name>
<value>true</value>
</property>
<property>
<name>datanucleus.autoCreateColumns</name>
<value>true</value>
</property>
<property>
<name>datanucleus.schema.autoCreateAll</name>
<value>true</value>
<description>creates necessary schema on a startup if one doesn't exist. set this to false, after creating it once</description>
</property>
</configuration>
3.4 部署hive
hive-deploy.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: hive-metastore-database
labels:
app: hive
data:
execute.sql: |-
-- create database
CREATE DATABASE metastore DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
-- create user and grant authorization
GRANT ALL ON metastore.* TO 'hive'@'%' IDENTIFIED BY '${IDENTIFIED}';
---
apiVersion: v1
kind: Secret
metadata:
name: hive-metastore-secret
labels:
app: hive
type: Opaque
data:
database-dba-password: RGFtZW5nQDc3Nw==
database-user-password: RGFtZW5nQDc3Nw==
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: hive
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: hive
template:
metadata:
labels:
app: hive
spec:
nodeName: k8s-node-01
initContainers:
- name: init-dababase
image: 172.2.2.11:5000/database-tools:1.0-SNAPSHOT
env:
- name: DRIVER_NAME
value: "com.mysql.jdbc.Driver"
- name: URL
value: "jdbc:mysql://hive-metadata-mysql-service:3306/mysql?useUnicode=true&characterEncoding=utf8&useSSL=false"
- name: USERNAME
value: "root"
- name: PASSWORD
valueFrom:
secretKeyRef:
name: hive-metastore-secret
key: database-dba-password
- name: IDENTIFIED
valueFrom:
secretKeyRef:
name: hive-metastore-secret
key: database-user-password
volumeMounts:
- name: init-dababase-volume
mountPath: /root/db_tools/script
containers:
- name: hive
image: 172.2.2.11:5000/hive
command: ["bash", "-c", "cp /root/bootstrap/bootstrap.sh /tmp/ && chmod +x /tmp/bootstrap.sh && /tmp/bootstrap.sh"]
ports:
- containerPort: 10000
- containerPort: 10002
- containerPort: 9083
env:
- name: HADOOP_CONF_DIR
value: /opt/hadoop-2.9.1/etc/hadoop
- name: HIVE_METADATA_PASSWORD
valueFrom:
secretKeyRef:
name: hive-metastore-secret
key: database-user-password
volumeMounts:
- name: hadoop-config-volume
mountPath: /etc/hadoop
- name: hive-custom-config-volume
mountPath: /root/bootstrap
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
tcpSocket:
port: 10000
volumes:
- name: hadoop-config-volume
persistentVolumeClaim:
claimName: hive-data
- name: hive-custom-config-volume
configMap:
name: hive-custom-config-cm
- name: init-dababase-volume
configMap:
name: hive-metastore-database
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: hive-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
---
kind: Service
apiVersion: v1
metadata:
labels:
app: hive
name: hive-service
spec:
ports:
- port: 10000
targetPort: 10000
name: thrift
- port: 10002
targetPort: 10002
name: webui
- port: 9083
targetPort: 9083
name: metastore
selector:
app: hive
type: NodePort
转载自https://blog.csdn.net/cloudvtech
四、查看部署结果
POD
[root@k8s-master-01 hive]# kubectl get pods | grep hive
hive-6b8b7f894d-lxmhr 1/1 Running 0 48m
hive-metadata-mysql-7d5784ffd-blng4 1/1 Running 0 49m
service
kubectl get svc | grep hive-service
hive-service NodePort 10.96.111.20 <none> 10000:32285/TCP,10002:32704/TCP,9083:31567/TCP 46m
hdfs
[hadoop@k8s-node-01 hive]$ hadoop fs -ls -R /tmp
[hadoop@k8s-node-01 hive]$ hadoop fs -ls -R /user
drwxr-xr-x - root root 0 2018-10-04 06:57 /user/hive
drwxrwxr-x - root root 0 2018-10-04 06:57 /user/hive/warehouse
GUI
转载自https://blog.csdn.net/cloudvtech
五、运行
kubectl exec -it hive-6b8b7f894d-lxmhr bash
[root@hive-6b8b7f894d-lxmhr /]# cd /opt/apache-hive-3.0.0/bin
[root@hive-6b8b7f894d-lxmhr bin]# ./hive shell
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=1024m; support was removed in 8.0
which: no hbase in (/opt/jdk1.8.0_181/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin)
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=1024m; support was removed in 8.0
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/opt/apache-hive-3.0.0/lib/log4j-slf4j-impl-2.10.0.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/opt/hadoop-2.9.1/share/hadoop/common/lib/slf4j-log4j12-1.7.25.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.apache.logging.slf4j.Log4jLoggerFactory]
Hive Session ID = e42fe4b6-8d29-49ab-87bb-9060b0d17679
Logging initialized using configuration in jar:file:/opt/apache-hive-3.0.0/lib/hive-common-3.0.0.jar!/hive-log4j2.properties Async: true
Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
hive> show tables;
OK
Time taken: 0.641 seconds
hive> create table abc (a int);
OK
Time taken: 0.633 seconds
hive> insert into abc values (1);
Query ID = root_20181003225917_19cdc221-c910-4704-8271-46363aa7e8d2
Total jobs = 3
Launching Job 1 out of 3
Number of reduce tasks determined at compile time: 1
In order to change the average load for a reducer (in bytes):
set hive.exec.reducers.bytes.per.reducer=<number>
In order to limit the maximum number of reducers:
set hive.exec.reducers.max=<number>
In order to set a constant number of reducers:
set mapreduce.job.reduces=<number>
Job running in-process (local Hadoop)
2018-10-03 22:59:21,240 Stage-1 map = 100%, reduce = 100%
Ended Job = job_local324096192_0001
Stage-4 is selected by condition resolver.
Stage-3 is filtered out by condition resolver.
Stage-5 is filtered out by condition resolver.
Moving data to directory file:/user/hive/warehouse/abc/.hive-staging_hive_2018-10-03_22-59-17_274_1077030636808234790-1/-ext-10000
Loading data to table default.abc
MapReduce Jobs Launched:
Stage-Stage-1: HDFS Read: 0 HDFS Write: 0 SUCCESS
Total MapReduce CPU Time Spent: 0 msec
OK
Time taken: 4.403 seconds
hive> select * from abc;
OK
1
Time taken: 0.176 seconds, Fetched: 1 row(s)
hive> show tables;
OK
abc
Time taken: 0.03 seconds, Fetched: 1 row(s)
hive>
转载自https://blog.csdn.net/cloudvtech