Hive搭建

环境配置

解压安装包

tar -zxvf *hadoop*.gz -C /usr/local/src/
tar -zxvf *spark*.tgz -C /usr/local/src/
tar -zxvf *hive* -C /usr/local/src/
tar -zxvf *jdk* -C /usr/local/src/

修改文件名

mv apache-hive-2.0.0-src/ hive
mv hadoop-2.7.1/ hadoop
mv jdk1.8.0_152/ java
mv spark-3.0.2-bin-hadoop2.7/ spark

配置hosts

vi /etc/hosts
192.168.52.12  master

设置免密

ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:FDfzqp1rkSFfB9K8krv86WdV3emYcHSup/nwpMHzHVs root@master
The key's randomart image is:
+---[RSA 2048]----+
|        . +o     |
|         o.++. . |
|        .  oooo +|
|       .. +oo..o+|
|        So.*o.= .|
|         o=..+ o.|
|        ..oo =++E|
|          +. +@ =|
|         ..o++.=.|
+----[SHA256]-----+

ssh-copy-id master

配置环境变量

vi /etc/profile
export JAVA_HOME=/usr/local/src/java
export PATH=$PATH:$JAVA_HOME/bin
export HADOOP_HOME=/usr/local/src/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
export SPARK_HOME=/usr/local/src/spark
export PATH=$PATH:$SPARK_HOME/bin
export HIVE_HOME=/usr/local/src/hive
export PATH=$PATH:$HIVE_HOME/bin
source /etc/profile

Hadoop搭建

配置Hadoop文件

core-site.xml

vi core-site.xml
<configuration>
        <property>
                <name>fs.defaultFS</name>
                <value>hdfs://master:9000</value>
        </property>
        <property>
                <name>hadoop.tmp.dir</name>
                <value>/usr/local/src/tmps/hadoop</value>
        </property>
</configuration>

hdfs-site.xml

vi hdfs-site.xml
<configuration>
        <property>
                <name>dfs.namenode.name.dir</name>
                <value>/usr/local/src/tmps/hadoop/name</value>
                <description>为了保证元数据的安全一般配置多个不同目录</description>
        </property>
        <property>
                <name>dfs.datanode.data.dir</name>
                <value>/usr/local/src/tmps/hadoop/data</value>
                <description>datanode 的数据存储目录</description>
        </property>
        <property>
                <name>dfs.replication</name>
                <value>2</value>
                <description>HDFS 的数据块的副本存储个数, 默认是3</description>
        </property>
</configuration> 

mapred-site.xml

cp mapred-site.xml.template mapred-site.xml
vi mapred-site.xml

<configuration>
        <property>
                <name>mapreduce.framework.name</name>
                <value>yarn</value>
        </property>
</configuration>

yarn-site.xml

vi yarn-site.xml
<configuration>
        <property>
                <name>yarn.nodemanager.aux-services</name>
                <value>mapreduce_shuffle</value>
                <description>YARN集群为 MapReduce 程序提供的 shuffle 服务</description>
        </property>
</configuration>       

hadoop-env.sh

export JAVA_HOME=/usr/local/src/java

初始化Hadoop

hdfs namenode -format

启动Hadoop

start-all.sh

关闭防火墙

systemctl stop firewalld
systemctl disable firewalld

Hive搭建

配置MySQL

下载MySQL

rpm -ivh https://repo.mysql.com//mysql57-community-release-el7-11.noarch.rpm
yum install mysql-community-server

开启免密

vi /etc/my.cnf
# 设置跳过密码登录mysql
skip-grant-tables 

default-storage-engine=innodb
innodb_file_per_table
collation-server=utf8_general_ci
init-connect='SET NAMES utf8'
character-set-server=utf8

启动MySQL

systemctl start mysqld.service

查询状态

systemctl status mysqld

进入MySQL

mysql
use mysql
# 刷新环境
flush privileges;

设置密码

set password for root@'localhost' = password('root');

权限设置

# 修改 mysql 库下的 user 表中的 root 用户允许任意 ip 连接
update mysql.user set host='%' where user='root';
# 重新加载数据
flush privileges;

关闭免密

注释掉:skip-grant-tables

配置Hive

配置hive-site.xml

<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!-- jdbc 连接的 URL -->
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://localhost:3306/hive?createDatabaseIfNotExist=true&amp;characterEncoding=UTF-8&amp;useSSL=false</value>
</property>
<!-- jdbc 连接的 Driver-->
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<!-- jdbc 连接的 username-->
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
</property>
<!-- jdbc 连接的 password -->
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>root</value>
</property>
<!-- Hive 元数据存储版本的验证 -->
<property>
<name>hive.metastore.schema.verification</name>
<value>false</value>
</property>
<!--元数据存储授权-->
<property>
<name>hive.metastore.event.db.notification.api.auth</name>
<value>false</value>
</property>
<!-- Hive 默认在 HDFS 的工作目录 -->
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/user/hive/warehouse</value>
</property>
</configuration>

导入jar包

将mysql-connector-java-5.1.38.jar放入$HIVE_HOME/lib

格式化

schematool -initSchema -dbType mysql

环境配置

解压安装包

tar -zxvf *hadoop*.gz -C /usr/local/src/
tar -zxvf *spark*.tgz -C /usr/local/src/
tar -zxvf *hive* -C /usr/local/src/
tar -zxvf *jdk* -C /usr/local/src/

修改文件名

mv apache-hive-2.0.0-src/ hive
mv hadoop-2.7.1/ hadoop
mv jdk1.8.0_152/ java
mv spark-3.0.2-bin-hadoop2.7/ spark

配置hosts

vi /etc/hosts
192.168.52.12  master

设置免密

ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:FDfzqp1rkSFfB9K8krv86WdV3emYcHSup/nwpMHzHVs root@master
The key's randomart image is:
+---[RSA 2048]----+
|        . +o     |
|         o.++. . |
|        .  oooo +|
|       .. +oo..o+|
|        So.*o.= .|
|         o=..+ o.|
|        ..oo =++E|
|          +. +@ =|
|         ..o++.=.|
+----[SHA256]-----+

ssh-copy-id master

配置环境变量

vi /etc/profile
export JAVA_HOME=/usr/local/src/java
export PATH=$PATH:$JAVA_HOME/bin
export HADOOP_HOME=/usr/local/src/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
export SPARK_HOME=/usr/local/src/spark
export PATH=$PATH:$SPARK_HOME/bin
export HIVE_HOME=/usr/local/src/hive
export PATH=$PATH:$HIVE_HOME/bin
source /etc/profile

Hadoop搭建

配置Hadoop文件

core-site.xml

vi core-site.xml
<configuration>
        <property>
                <name>fs.defaultFS</name>
                <value>hdfs://master:9000</value>
        </property>
        <property>
                <name>hadoop.tmp.dir</name>
                <value>/usr/local/src/tmps/hadoop</value>
        </property>
</configuration>

hdfs-site.xml

vi hdfs-site.xml
<configuration>
        <property>
                <name>dfs.namenode.name.dir</name>
                <value>/usr/local/src/tmps/hadoop/name</value>
                <description>为了保证元数据的安全一般配置多个不同目录</description>
        </property>
        <property>
                <name>dfs.datanode.data.dir</name>
                <value>/usr/local/src/tmps/hadoop/data</value>
                <description>datanode 的数据存储目录</description>
        </property>
        <property>
                <name>dfs.replication</name>
                <value>2</value>
                <description>HDFS 的数据块的副本存储个数, 默认是3</description>
        </property>
</configuration> 

mapred-site.xml

cp mapred-site.xml.template mapred-site.xml
vi mapred-site.xml

<configuration>
        <property>
                <name>mapreduce.framework.name</name>
                <value>yarn</value>
        </property>
</configuration>

yarn-site.xml

vi yarn-site.xml
<configuration>
        <property>
                <name>yarn.nodemanager.aux-services</name>
                <value>mapreduce_shuffle</value>
                <description>YARN集群为 MapReduce 程序提供的 shuffle 服务</description>
        </property>
</configuration>       

hadoop-env.sh

export JAVA_HOME=/usr/local/src/java

 

初始化Hadoop

hdfs namenode -format

启动Hadoop

start-all.sh

关闭防火墙

systemctl stop firewalld
systemctl disable firewalld

Hive搭建

配置MySQL

下载MySQL

rpm -ivh https://repo.mysql.com//mysql57-community-release-el7-11.noarch.rpm
yum install mysql-community-server

开启免密

vi /etc/my.cnf
# 设置跳过密码登录mysql
skip-grant-tables 

default-storage-engine=innodb
innodb_file_per_table
collation-server=utf8_general_ci
init-connect='SET NAMES utf8'
character-set-server=utf8

启动MySQL

systemctl start mysqld.service

查询状态

systemctl status mysqld

进入MySQL

mysql
use mysql
# 刷新环境
flush privileges;

设置密码

set password for root@'localhost' = password('root');

权限设置

# 修改 mysql 库下的 user 表中的 root 用户允许任意 ip 连接
update mysql.user set host='%' where user='root';
# 重新加载数据
flush privileges;

关闭免密

注释掉:skip-grant-tables

配置Hive

配置hive-site.xml

<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!-- jdbc 连接的 URL -->
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://localhost:3306/hive?createDatabaseIfNotExist=true&amp;characterEncoding=UTF-8&amp;useSSL=false</value>
</property>
<!-- jdbc 连接的 Driver-->
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<!-- jdbc 连接的 username-->
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
</property>
<!-- jdbc 连接的 password -->
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>root</value>
</property>
<!-- Hive 元数据存储版本的验证 -->
<property>
<name>hive.metastore.schema.verification</name>
<value>false</value>
</property>
<!--元数据存储授权-->
<property>
<name>hive.metastore.event.db.notification.api.auth</name>
<value>false</value>
</property>
<!-- Hive 默认在 HDFS 的工作目录 -->
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/user/hive/warehouse</value>
</property>
</configuration>

导入jar包

将mysql-connector-java-5.1.38.jar放入$HIVE_HOME/lib

格式化

schematool -initSchema -dbType mysql
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Yi_同学

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值