安装hive2.3.4 配置全过程,成功了!

服务器环境是Centeros7

1.安装mysql

  
 
 rpm -qa|grep mariadb 
 
 
rpm -e --nodeps mariadb-libs-5.5.56-2.el7.x86_64
rpm -e --nodeps mariadb-server-5.5.56-2.el7.x86_64
rpm -e --nodeps mariadb-5.5.56-2.el7.x86_64
 
 
tar -xvf mysql-5.7.24-1.el7.x86_64.rpm-bundle.tar
 
 
rpm -ivh mysql-community-common-5.7.24-1.el7.x86_64.rpm
rpm -ivh mysql-community-libs-5.7.24-1.el7.x86_64.rpm
rpm -ivh mysql-community-client-5.7.24-1.el7.x86_64.rpm
rpm -ivh mysql-community-server-5.7.24-1.el7.x86_64.rpm
service mysqld start
chkconfig mysqld on
 
service mysqld start
	  
chkconfig mysqld on
  
systemctl stop firewalld.service 
 
systemctl disable firewalld.service
 
  
grep password /var/log/mysqld.log
mysql -p
 
 
  重置密码:
 
  set password = password("3edcVFR$");
 
  use mysql;
 
  GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY '3edcVFR$' WITH GRANT OPTION;
 
  flush privileges;  
 
 
  

 

2.安装hadoop和hive

 

tar -zxvf ./apache-hive-2.3.4-bin.tar.gz -C /usr/local/
cd /usr/local/
mv apache-hive-2.3.4-bin hive


vi /etc/profile

# /usr/local/hadoop 为 Hadoop 安装目录
export PATH=$PATH:/usr/local/hadoop/sbin:/usr/local/hadoop/sbin:/usr/local/hive/bin
export HIVE_HOME=/usr/local/hive
export PATH=$PATH:$HIVE_HOME/bin

source /etc/profile


cd /usr/local/hive/conf

mv hive-default.xml.template hive-default.xml

vim hive-site.xml


<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
  <property>
    <name>javax.jdo.option.ConnectionURL</name>
    <value>jdbc:mysql://localhost:3306/hive?createDatabaseIfNotExist=true&amp;useSSL=false</value>
    <description>JDBC connect string for a JDBC metastore</description>
  </property>
  <property>
    <name>javax.jdo.option.ConnectionDriverName</name>
    <value>com.mysql.jdbc.Driver</value>
    <description>Driver class name for a JDBC metastore</description>
  </property>
  <property>
    <name>javax.jdo.option.ConnectionUserName</name>
    <value>root</value>
    <description>username to use against metastore database</description>
  </property>
  <property>
    <name>javax.jdo.option.ConnectionPassword</name>
    <value>3edcVFR$</value>
    <description>password to use against metastore database</description>
  </property>
</configuration>



mv hive-env.sh.template hive-env.sh

vim hive-env.sh



# Hadoop的位置:
export HADOOP_HOME=/usr/local/hadoop
# hive的conf目录位置:
export HIVE_CONF_DIR=/usr/local/hive/conf
# hive的lib目录位置:
export HIVE_AUX_JARS_PATH=/usr/local/hive/lib



安装java

tar -zxvf jdk-8u191-linux-x64.tar.gz
mv jdk1.8.0_191/ /usr/local/java






安装hadoop 

tar -xzvf hadoop-2.9.1.tar.gz

mv hadoop-2.9.1 /usr/local/hadoop






ssh免密登录


ssh localhost
exit 

cd ~/.ssh/
ssh-keygen -t rsa
cat ./id_rsa.pub >> ./authorized_keys







vim /etc/profile
配置文件:
 
export JAVA_HOME=/usr/local/java
export HADOOP_HOME=/usr/local/hadoop
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib"
export PATH=.:${JAVA_HOME}/bin:${HADOOP_HOME}/bin:$PATH




配置hadoop


cd /usr/local/hadoop/etc/hadoop/

vim core-site.xml

 

<configuration>
<property>
        <name>hadoop.tmp.dir</name>
        <value>file:/usr/local/hadoop/tmp</value>
        <description>Abase for other temporary directories.</description>
   </property>
   <property>
        <name>fs.default.name</name>
        <value>hdfs://master:9000</value>
   </property>

   <property>
     <name>hadoop.proxyuser.root.hosts</name>
     <value>*</value>
   </property>
   <property>
     <name>hadoop.proxyuser.root.groups</name>
     <value>*</value>
   </property>
</configuration>










vi hdfs-site.xml

<configuration>
        <property>
             <name>dfs.replication</name>
             <value>1</value>
        </property>
        <property>
             <name>dfs.namenode.name.dir</name>
             <value>file:/usr/local/hadoop/tmp/dfs/name</value>
        </property>
        <property>
             <name>dfs.datanode.data.dir</name>
             <value>file:/usr/local/hadoop/tmp/dfs/data</value>
        </property>
</configuration> 



cp mapred-site.xml.template mapred-site.xml
vim mapred-site.xml

 
<property>
      <name>mapred.local.dir</name>
       <value>/usr/local/hadoop/tmp/mapred</value>
</property>
<property>
       <name>mapreduce.framework.name</name>
       <value>yarn</value>
</property>




vim yarn-site.xml

 
<configuration>
        <property>
             <name>yarn.nodemanager.aux-services</name>
             <value>mapreduce_shuffle</value>
            </property>
</configuration> 


cd /usr/local/hadoop/etc/hadoop/
vim hadoop-env.sh


export JAVA_HOME=/usr/local/java
export HADOOP_COMMON_LIB_NATIVE_DIR=${HADOOP_HOME}/lib/native
export HADOOP_OPTS="-Djava.library.path=${HADOOP_HOME}/lib/native/"



mkdir -p /usr/local/hadoop/tmp/mapred

mkdir -p /usr/local/hadoop/tmp/dfs/data

mkdir -p /usr/local/hadoop/tmp/dfs/name
 

./hadoop  namenode  -format


cd /usr/local/hadoop/

./sbin/start-dfs.sh
./sbin/start-yarn.sh



#./sbin/mr-jobhistory-daemon.sh start historyserver 



./sbin/stop-dfs.sh
./sbin/stop-yarn.sh


#./sbin/mr-jobhistory-daemon.sh stop historyserver



hadoop fs -mkdir -p /user/hive/warehouse
hadoop fs -mkdir -p /user/hive/tmp
hadoop fs -mkdir -p /user/hive/log
hadoop fs -chmod -R 777 /user/hive/warehouse
hadoop fs -chmod -R 777 /user/hive/tmp
hadoop fs -chmod -R 777 /user/hive/log



复制mysql驱动到
/usr/local/hive/lib/

cd /usr/local/hive/bin
schematool -initSchema -dbType mysql




请先确保 hadoop 已正常启动!
 
hive 

show databases;

create database hds;

use hds;

create table test(id int ,name varchar(100));

insert into test values(1,'zz');



启动远程访问:







hive --service metastore & 

hive --service hiveserver2 &




netstat -antpl|grep 10000












 

 

3.远程访问URL:jdbc:hive2://192.168.31.27:10000/hds

 

 

4.web访问地址:http://192.168.31.27:8088

 

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值