基于云服务器搭建部署hadoop 3.2.1单机版环境

基于云服务器搭建部署hadoop 3.2.1单机版环境

1、添加用户

adduser xx

2、设置hostname

hostnamectl set-hostname hadoop001
ping hadoop001,ping 通证明成功
vim /etc/hosts ,修改hostname

3、设置免密

ssh-keygen -t rsa,然后一路回车即可;
cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys,复制到公共密钥中;
ssh localhost,测试;

4、防火墙设置

查看防火墙状态:firewall-cmd --state / systemctl status firewalld
启动/关闭防火墙:systemctl start firewalld / systemctl stop firewalld
查看已开放的端口(开启):firewall-cmd --list-ports

5、安装jdk

  • 安装rzsz

yum -y install lrzsz

  • 创建目录、上传解压包

cd /opt
mkdir java
tar -zxvf jdk-8u271-linux-x64.tar.gz -C /opt/java

  • 设置 jdk 环境变量(profile)
export JAVA_HOME=/opt/java/jdk1.8.0_271
export PATH=$JAVA_HOME/bin:$PATH
  • 使环境变量生效

source /etc/profile
java -version

6、下载安装hadoop3.2.1
下载地址:https://hadoop.apache.org/release/3.2.1.html

tar -zxvf hadoop-3.2.1.tar.gz -C /opt
mv /opt/hadoop-3.2.1 /opt/hadoop

  • 查看版本

cd /opt/hadoop
./bin/hadoop version

7、hadoop配置

  • 创建目录

mkdir /opt/hadoop/tmp
mkdir /opt/hadoop/hdfs
mkdir /opt/hadoop/hdfs/data
mkdir /opt/hadoop/hdfs/name

  • 设置环境变量及hadoop环境变量
vi /etc/profile
export HADOOP_HOME=/opt/hadoop/
export PATH=$PATH:$HADOOP_HOME/bin
#使环境变量生效
source /etc/profile
source ~/.bash_profile(CentOS版本用)

8、修改hadoop配置文件 (/hadoop/etc/hadoop/)

  • 配置hadoop-env.sh
    文件尾部添加:export JAVA_HOME=/opt/java/jdk1.8.0_271
  • 配置start-dfs.sh、stop-dfs.sh 顶部
#vim /opt/hadoop/sbin/start-dfs.sh 
#vim /opt/hadoop/sbin/stop-dfs.sh
HDFS_DATANODE_USER=root
HDFS_DATANODE_SECURE_USER=hdfs
HDFS_NAMENODE_USER=root
HDFS_SECONDARYNAMENODE_USER=root
  • 配置start-yarn.sh、stop-yarn.sh 顶部
#vim /opt/hadoop/sbin/start-yarn.sh  
#vim /opt/hadoop/sbin/stop-yarn.sh
YARN_RESOURCEMANAGER_USER=root
HADOOP_SECURE_DN_USER=yarn
YARN_NODEMANAGER_USER=root
  • 配置core-site.xml
<configuration>
	<property>
		<name>fs.defaultFS</name>
		<value>hdfs://主机名:9000</value>
		<description>HDFS的URI,文件系统://namenode标识:端口号</description>
	</property>
	<property>
		<name>hadoop.tmp.dir</name>
		<value>/opt/hadoop/tmp</value>
		<description>namenode上本地的hadoop临时文件夹</description>
	</property>
</configuration>
  • 配置hdfs-site.xml
<configuration>
	<property>
		<name>dfs.replication</name>
		<value>1</value>
		<description>副本个数,配置默认是3,应小于datanode机器数量</description>
	</property>
</configuration>
  • 配置mapred-site.xml
<configuration>
<!-- 来查看历史任务时,发现看不到历史任务,增加此配置 -->
     <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
     </property>	
	<property>
		<name>yarn.app.mapreduce.am.env</name>
		<value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
	</property>
	<property>
		<name>mapreduce.map.env</name>
		<value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
	</property>
	<property>
		<name>mapreduce.reduce.env</name>
		<value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
	</property>
</configuration>
  • 配置yarn-site.xml
<configuration>
	<property>
		<name>yarn.nodemanager.aux-services</name>
		<value>mapreduce_shuffle</value>
	</property>
</configuration>

9、格式化namenode:

cd /opt/hadoop
./bin/hdfs namenode -format

10、启动hadoop

cd /opt/hadoop/sbin
./start-all.sh
jps查看进程
WebUI:Yarn端口8088;HDFS端口9870

10.1、配置历史服务器
为了查看程序的历史运行情况,需要配置一下历史服务器

  1. 配置mapred-site.xml

    vi mapred-site.xml, 在该文件里面增加如下配置。

 <!-- 历史服务器端地址 -->
    <property>
		<name>mapreduce.jobhistory.address</name>
		<value>hadoop001:10020</value>
    </property>
    <!-- 历史服务器web端地址 -->
    <property>
        <name>mapreduce.jobhistory.webapp.address</name>
        <value>hadoop001:19888</value>
    </property>

2.启动历史服务器
/opt/hadoop/sbin/mr-jobhistory-daemon.sh start historyserver
3.查看历史服务器是否启动
jps ==> JobHistoryServer
4.查看JobHistory
http://hadoop001:19888/jobhistory

10.2、配置日志的聚集

日志聚集概念:应用运行完成以后,将程序运行日志信息上传到HDFS系统上。
日志聚集功能好处:可以方便的查看到程序运行详情,方便开发调试。
注意:开启日志聚集功能,需要重新启动NodeManager 、ResourceManager和HistoryManager。

开启日志聚集功能具体步骤如下:
配置yarn-site.xml
vi yarn-site.xml,在该文件里面增加如下配置。

<!-- 日志聚集功能使能 -->
<property>
	<name>yarn.log-aggregation-enable</name>
	<value>true</value>
</property>

<!-- 日志保留时间设置7天 -->
<property>
	<name>yarn.log-aggregation.retain-seconds</name>
	<value>604800</value>
</property>

11、测试wordcount

hadoop fs -mkdir /input
hadoop fs -rm -r /output
cd /opt/hadoop/share/hadoop/mapreduce/
hadoop jar hadoop-mapreduce-examples-3.2.1.jar wordcount /input /output

12、安装mysql

  • 安装wget

yum -y install wget

  • 安装YUM Repo

wget https://dev.mysql.com/get/mysql57-community-release-el7-9.noarch.rpm

rpm -ivh mysql57-community-release-el7-9.noarch.rpm
cd /etc/yum.repos.d/

yum install mysql-server -y

  • 启动mysql

systemctl start mysqld

  • 获取安装时的临时密码(在第一次登录时就是用这个密码)

grep ‘temporary password’ /var/log/mysqld.log

倘若没有获取临时密码,删除原来安装过的mysql残留的数据 rm -rf /var/lib/mysql ,再启动mysql。systemctl start mysqld

  • 登陆mysql
mysql -u root -p   ==> 密码n3xxxxO;hoq
  • 修改密码
ALTER USER 'root'@'localhost' IDENTIFIED BY 'Sxxxx';
  • 创建hive表
create database hive;
use mysql;
CREATE USER 'root'@'%' IDENTIFIED BY 'Sxxxx!';
Grant all privileges on *.* to 'root'@'%' identified by 'Sxxxx!' with
grant option;
flush privileges;

13、安装Hive

  • 解压

tar -zxvf apache-hive-3.1.2-bin.tar.gz -C /opt

  • 修改hive文件夹名

mv /opt/apache-hive-3.1.2-bin /opt/hive

  • 添加环境变量 /etc/profile
export Hive_HOME=/opt/hive/
export PATH=$PATH:$Hive_HOME/bin
  • 刷新环境变量

source /etc/profile

hive --version

  • 配置hive-env.sh

cd /opt/hive/conf (cp 临时文件hive-env.sh.template 配置文件)

#hadoop根目录
HADOOP_HOME=/opt/hadoop/
#hive配置文件目录
export HIVE_CONF_DIR=/opt/hive/conf
#hive依赖目录
export HIVE_AUX_JARS_PATH=/opt/hive/lib
  • 配置hive-site.xml (cp 临时文件hive-default.xml.template 配置文件)

cd /opt/hive/conf
cp hive-default.xml.template hive-site.xml
vim hive-site.xml

<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<configuration>
  <property>
    <name>javax.jdo.option.ConnectionURL</name>
    <value>jdbc:mysql://xxx.xxx.xx.xxx:3306/hive?createDatabaseIfNotExist=true</value>
    <description>JDBC connect string for a JDBC metastore</description>
  </property>
<property>
  <name>javax.jdo.option.ConnectionDriverName</name>
  <value>com.mysql.cj.jdbc.Driver</value>
</property>
<property>
  <name>javax.jdo.option.ConnectionUserName</name>
  <value>root</value>
</property>

<property>
  <name>javax.jdo.option.ConnectionPassword</name>
  <value>Sxxxxx</value>
</property>
<property>
    <name>hive.support.concurrency</name>
    <value>true</value>
    <description>
      Whether Hive supports concurrency control or not.
      A ZooKeeper instance must be up and running when using zookeeper Hive lock manager
    </description>
  </property>
<property>
        <name>hive.cli.print.header</name>
        <value>true</value>
        <description>Whether to print the names of the columns in query output.</description>
    </property>
    <property>
        <name>hive.cli.print.current.db</name>
        <value>true</value>
        <description>Whether to include the current database in the Hive prompt.</description>
    </property>

</configuration>
修改javax.jdo.option.ConnectionDriverName的value,老版本填com.mysql.jdbc.Driver,现在应该填com.mysql.cj.jdbc.Driver
修改数据库用户名:javax.jdo.option.ConnectionUserName:root
修改数据库密码:javax.jdo.option.ConnectionPassword:Shuang_106
修改数据库url:javax.jdo.option.ConnectionURL:jdbc:mysql://xxx.xx.xxx.xxx:3306/hive
修改数据库连接驱动名:javax.jdo.option.ConnectionDriverName:com.mysql.cj.jdbc.Driver
修改hive.support.concurrency:true

去官网(https://dev.mysql.com/downloads/connector/j/)载一个jdbc的驱动包,在MySQL Connetors下选择Connector/J,进入jdbc下载页面。选择Platform Independent,下载第一个后缀为tar的包。将mysql-connector-java.8.0.19.jar复制到hive的lib里面去.
删除hive下lib目录下的slf4j-log4j,mv log4j-slf4j-impl-2.10.0.jar /root/log4j-slf4j-backup.jar
初始化:cd /opt/hive ==> ./bin/schematool -dbType mysql -initSchema
14、安装zookeeper(端口2181)
官网下载3.6.1版本,解压并重命名到/opt/zookeeper
在/opt/zookeeper创建data和logs目录用于存储数据和日志:
cd /opt/zookeeper ;
mkdir data ;
mkdir logs;
在/opt/zookeeper/conf目录下新建zoo.cfg文件,并写入以下内容:

tickTime=2000
dataDir=/opt/zookeeper/data
dataLogDir=/opt/zookeeper/logs
clientPort=2181

启动和停止命令(在bin目录):
./zkServer.sh start
./zkServer.sh stop
./zkServer.sh restart
./zkServer.sh status

15、安装HBase
下载hbase2.26,解压并重命名到/opt/hbase

  • 添加环境变量
export HBASE_HOME=/opt/hbase/
export PATH=$PATH:$HBASE_HOME/bin

source /etc/profile 使环境变量生效

  • 配置 hbase-env.sh

cd /opt/hase/conf
vim hbase-env.sh
添加:

export JAVA_HOME=/opt/java/jdk1.8.0_271
export HBASE_MANAGES_ZK=true
export HBASE_CLASSPATH=/opt/hbase/conf
#配置项说明:
#JAVA_HOME为java程序所在位置;
#HBASE_MANAGES_ZK表示是否使用HBase自带的zookeeper环境;
#HBASE_CLASSPATH指向hbase配置文件的路径。
  • 配置hbase-site.xml
<configuration>
	<!--指定hbase在hdfs上存储的路径 -->
	<property>
		<name>hbase.rootdir</name>
		<value>hdfs://hadoop001:9000/hbase</value>
	</property>
	<!--指定hbase是分布式的 -->
	<property>
		<name>hbase.cluster.distributed</name>
		<value>true</value>
	</property>
	<!-- 指定zk地址,多个用","分割-->
	<property>
		<name>hbase.zookeeper.quorum</name>
		<value>hadoop001:2181</value>
	</property>
	<property>
		<name>hbase.master.info.port</name>
		<value>16011</value>
	</property>
	<property>
		<name>dfs.replication</name>
		<value>1</value>
	</property>
	<property>
		<name>hbase.unsafe.stream.capability.enforce</name>
		<value>false</value>
	</property>
	<property>
		<name>hbase.tmp.dir</name>
		<value>/opt/hbase/tmp</value>
	</property>
</configuration>
  • 创建zookeeper存储位置

sudo mkdir -p /data/tmp/zookeeper-hbase

  • 编辑/opt/hbase/conf/regionservers文件

此文件存储了HBase集群节点的ip地址,目前只有一台节点,所以只需要填写localhost即可

  • 删除重复的slf4j包

mv client-facing-thirdparty /root

  • 启动hbase

cd /opt/hbase/bin
./start-hbase.sh
hbase shell

16、安装scala

cd /opt
wget https://downloads.lightbend.com/scala/2.11.8/scala-2.11.8.tgz
tar zxvf scala-2.11.8.tgz -C /opt
编辑 /etc/profile

#scala
export SCALA_HOME=/opt/scala
PATH=$PATH:$SCALA_HOME/bin

source /opt/profile
scala -version
scala 进入scala
17、安装spark
解压spark安装包并重命名
编辑 /etc/profile

#hadoop java scala等须安装好并配置好 环境变量
#sprak
export PATH=$PATH:$SPARK_HOME/bin:$SPARK_HOME/sbin
export SCALA_HOME=/opt/scala
export SPARK_HOME=/opt/spark

source /etc/profile

#单机版配置为本机 ip
export SPARK_MASTER_IP=192.168.0.246
#export SPARK_EXECUTOR_MEMORY=1G
#export SPARK_DIST_CLASSPATH=$(hadoop classpath)

cd /opt/spark/sbin
./start-master.sh
jps看是否有Master服务
修改云的安全组的入方向,sparkwebUI的端口号是8080。7077是spark基于standalone的提交任务的端口号。
local模式测试:
cd /opt/spark/bin
spark-submit --class org.apache.spark.examples.SparkPi --executor-memory 1G --total-executor-cores 2 examples/jars/spark-examples_2.12-3.0.0.jar 100
连接spark-shell:
cd /opt/spark/bin
./spark-shell --master spark://124.70.213.236:7077
启动worker,这里是单机,所以是附加worker方式:
cd /opt/spark/bin
./spark-class org.apache.spark.deploy.worker.Worker spark://hadoop001:7077

18、安装Phoenix

下载地址:http://phoenix.apache.org/download.html

(1)解压后将phoenix 目录下的 phoenix-4.8.2-HBase-1.2-server.jar、phoenix-core-4.8.2-HBase-1.2.jar 拷贝到 hbase 的 lib 目录下。
(2)将 hbase 的配置文件hbase-site.xml、 hadoop/etc/hadoop下的core-site.xml 、hdfs-site.xml放到phoenix/bin/下,替换phoenix原来的配置文件。
(3)重启hbase集群,使Phoenix的jar包生效。
(4)在phoenix/bin下输入命令:端口可以省略。[root@bw01 bin]# ./sqlline.py bw01:2181
(5)输入!tables 查看都有哪些表。红框部分是用户建的表,其他为Phoenix系统表,系统表中维护了用户表的元数据信息
(6)退出 phoenix ,输入 !quit


安装时遇到的坑:

  • java操作下载HDFS文件报错(hosts的配置)

查看cat /etc/hosts域名解析是否正常 : 内网ip hadoop001
查看本地hosts中配置, 云主机外网ip :hadoop001
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++

  • namenode服务起不来

修改/etc/host和core-site.xml文件主机名
127.0.0.1 localhost
192.168.0.xxx(内网ip,非外网ip地址) hadoop001
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++

  • Notepad++ 连接远程 NppFTP

插件——> Plugin Manager——>ShowPlugin Manager——>勾选NppFTP插件——>Install
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++
stop-all.sh脚本失效,无法关闭服务
用stop-all.sh命令关闭集群时,是根据datanode上的mapred和dfs进程号来执行的,系统每隔一短时间会清空进程号所在的文件夹,所以找不到对应的进程就无法关闭相应的进程。
jps后用kill -9 命令杀死即可,为避免再有此情况发生,可以修改每个节点的hadoop-env.sh文件:将原有的export HADOOP_PID_DIR=${HADOOP_PID_DIR}注释#去掉,添加export HADOOP_PID_DIR=/opt/hadoop/pids(你想要将pid存放的文件夹路径)。重新启动hadoop集群,start-all.sh。
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++

  • 报错:WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform… using builtin-java classes where applicable

cp -r /opt/hadoop/lib/native/* /usr/local/lib/hadoop-native
如果用root用户,需要添加环境变量
vim /etv/profile
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++

  • 报错:权限问题Permission denied (publickey,gssapi-keyex,gssapi-with-mic,password).

https://blog.csdn.net/coffeeandice/article/details/78879151
去掉 /etc/ssh/sshd_config中的两行注释,如没有则添加,所有服务器都要设置的
RSAAuthentication yes
PubkeyAuthentication yes
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++

  • 找不到或无法加载org.apache.hadoop.mapreduce.v2.app.MRAppMaster

在命令行下输入如下命令:hadoop classpath
vim /opt/hadoop/etc/hadoop/yarn-site.xml


yarn.application.classpath
输入刚才返回的Hadoop classpath路径


重启hadoop集群,重新运行wordcount案例
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++

  • 找不到hive-site.xml配置文件

在 $HIVE_HOME/conf 里面还有一个默认的配置文件 hive-default.xml.template ,这里存储了默认的参数,通过复制该默认配置模板,并命名为hive-site.xml
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++

  • 初始化hive报错

初始化结束后在数据库中多了很多表,用于存放hive的元数据,感兴趣可以看看。
在hdfs上也多出一些文件夹/usr/hive/warehouse
此处报错java.lang.NoSuchMethodError:
com.google.common.base.Preconditions.checkArgument(ZLjava/lang/String;Ljava/lang/Object;)
删除hive中低版本的guava-.jar包,将hadoop(hadoop\share\hadoop\common\lib)中的guava--jre.jar复制到hive的lib目录下即可
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++

  • hive启动报错:Exception in thread “main” java.lang.RuntimeException:
    com.ctc.wstx.exc.WstxParsingException: Illegal character entity:
    expansion character (code 0x8 at

/opt/hive/conf/hive-site.xml配置文件中,3215行(见报错记录第二行)有特殊字符。进入hive-site.xml文件,跳转到对应行,删除里面的&#8特殊字符即可
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++

  • ./bin/schematool -dbType mysql -initSchema初始化报错

1、SQL Error code: 1045 ==> 云开启安全组入方向端口。
2、SQL Error code: 0 ==>set password for root@localhost = password(‘Shxxxxx’);
yum install net-tools
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++

  • hive报错:Underlying cause: java.sql.SQLException : null, message from
    server: “Host ‘xxx.xx.xx.xx’ is not allowed to connect to this MySQL
    server” SQL Error code: 1130

远程服务器不允许你的java程序访问它的数据库。所以,我们要对远程服务器进行设置,使它允许你进行连接
登录mysql ==> use mysql; ==> update user set host = ‘%’ where user=‘root’; ==> 重启mysql
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Hadoop3.2.1 Hive集成Spark3.0.0
http://www.yiyong.info/article/170
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++

  • Spark 3.0下载地址

https://archive.apache.org/dist/spark/spark-3.0.0/
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++

  • Win10下Hadoop3.2.1的安装与配置

https://blog.csdn.net/wynlnlhhjy/article/details/107298905
https://github.com/cdarlint/winutils

1)winutils.exe 放在windows平台中你安装的hadoop的bin目录下
2) hadoop.dll 放在windows操作系统的 c:/windows/system32目录下
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++

  • 使用beeline连接hiveserver2

1.修改 hadoop 集群的 hdfs-site.xml 配置文件:加入一条配置信息,表示启用 webhdfs

<property> 
     <name>dfs.webhdfs.enabled</name> 
     <value>true</value> 
</property>

2.修改 hadoop 集群的 core-site.xml 配置文件:加入两条配置信息:表示设置 hadoop的代理用户

<property> 
    <name>hadoop.proxyuser.root.hosts</name> 
    <value>*</value> 
</property> 
<property> 
     <name>hadoop.proxyuser.root.groups</name> 
     <value>*</value> 
</property> 

3.发送修改的文件给别的节点:

[hadoop@hadoop05 hadoop]$ scp -r hdfs-site.xml hadoop02:$PWD
[hadoop@hadoop05 hadoop]$ scp -r core-site.xml hadoop02:$PWD

配置解析:
hadoop.proxyuser.hadoop.hosts 配置成*的意义,表示任意节点使用 hadoop 集群的代理用户hadoop 都能访问 hdfs 集群,hadoop.proxyuser.hadoop.groups 表示代理用户的组所属
( 如果代理用户的组所属root 则修改为:hadoop.proxyuser.root.hosts hadoop.proxyuser.root.groups )

4.重启hadoop集群

./stop-dfs.sh
./stop-yarn.sh
./stop-all.sh
./start-all.sh

./zkServer.sh start

./start-hbase.sh


5.启动 hiveserver2 服务 (启动为后台)

hiveserver2或者
nohup hive --service hiveserver2 > /dev/null 2>&1 &

jps查看,RunJar是其进程。

6.使用beeline连接

beeline
!connect jdbc:hive2://hadoop001:10000
root/root

或者

beeline -u jdbc:hive2://hadoop001:10000 -n root -p root --color=true --showHeader=false 

pom.xml

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>cn.zws.bigdata</groupId>
    <artifactId>hadoop_test</artifactId>
    <packaging>pom</packaging>
    <version>1.0-SNAPSHOT</version>
    <modules>
        <module>hdfs_test</module>

    </modules>
    <repositories>
        <repository>
            <id>apache</id>
            <url>http://maven.apache.org</url>
        </repository>
    </repositories>

    <dependencies>
        <!-- https://mvnrepository.com/artifact/junit/junit -->
        <dependency>
            <groupId>junit</groupId>
            <artifactId>junit</artifactId>
            <version>4.12</version>
            <scope>test</scope>
        </dependency>

        <!--&lt;!&ndash; https://mvnrepository.com/artifact/commons-logging/commons-logging &ndash;&gt;-->
        <dependency>
            <groupId>commons-logging</groupId>
            <artifactId>commons-logging</artifactId>
            <version>1.2</version>
        </dependency>

        <!--&lt;!&ndash; https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-common &ndash;&gt;-->
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-common</artifactId>
            <version>3.2.1</version>
        </dependency>

        <!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-core -->
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-core</artifactId>
            <version>3.2.1</version>
        </dependency>

        <!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-hdfs -->
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-hdfs</artifactId>
            <version>3.2.1</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-client</artifactId>
            <version>3.2.1</version>
        </dependency>
        <dependency>
            <groupId>ant</groupId>
            <artifactId>ant</artifactId>
            <version>1.6.5</version>
        </dependency>
        <dependency>
            <groupId>org.junit.jupiter</groupId>
            <artifactId>junit-jupiter-api</artifactId>
            <version>RELEASE</version>
            <scope>compile</scope>
        </dependency>
        <dependency>
            <groupId>org.apache.hbase</groupId>
            <artifactId>hbase-client</artifactId>
            <version>2.2.6</version>
        </dependency>
    </dependencies>
</project>

log4j.properties

log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n
log4j.appender.logfile=org.apache.log4j.FileAppender
log4j.appender.logfile.File=target/spring.log
log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值