Java IDE配置---maven导入包源设置---Hadoop环境设置(HBase写入数据后操作)

 

1、Java 指定jdk 路径  maven导入包 源设置  maven setting里面加入

 1.1Java 指定jdk 路径  

<dependency>
            <groupId>jdk.tools</groupId>
            <artifactId>jdk.tools</artifactId>
            <version>1.8</version>
            <scope>system</scope>
            <systemPath>${JAVA_HOME}/lib/tools.jar</systemPath>
        </dependency>

 1.2 maven导入包 源设置  maven setting里面加入

<mirror> 
<id>alimaven</id> 
<mirrorOf>central</mirrorOf> 
<name>aliyun maven</name> 
<url>http://maven.aliyun.com/nexus/content/groups/public/</url> 
</mirror>

 

 

2,Hadoop环境设置

准备工作
安装好LINUX虚拟机/LINUX真机,系统CentOS7.X,电脑最低配置内存8G,硬盘60G。
或
购买在线腾讯云主机https://cloud.tencent.com/product/cvm,云主机最低配置内存8G,硬盘60G,大约10-20元/天。

window ->linux 通
Linux  ->window 通

第一步,配置环境变量
cd /opt/
tar -zxvf datax20190410.tar.gz
vi /etc/profile,source /etc/profile

####language
export JAVA_HOME=/opt/jdk1.8.0_121
export SCALA_HOME=/opt/scala-2.11.7

####hadoop
export HADOOP_HOME=/opt/hadoop-2.7.3
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export YARN_CONF_DIR=$HADOOP_HOME/etc/hadoop

####hive
export HIVE_HOME=/opt/apache-hive-2.3.4-bin
export HCAT_HOME=$HIVE_HOME/hcatalog
export HIVE_CONF=$HIVE_HOME/conf
export hive_dependency=$HIVE_HOME/conf:$HIVE_HOME/lib/*:$HIVE_HOME/hcatalog/share/hcatalog/hive-hcatalog-core-2.3.4.jar

####hbase
export HBASE_HOME=/opt/hbase-1.4.8
export HBASE_CONF_DIR=$HBASE_HOME/conf

####spark
export SPARK_HOME=/opt/spark-2.3.2-bin-hadoop2.7

export HADOOP_USER_NAME=root
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HIVE_HOME/bin:$HBASE_HOME/bin:$SCALA_HOME/bin:$SPARK_HOME/bin


第二步,配置主机名称
hostnamectl set-hostname datax
验证,能ping通
ping datax


第三步,配置免登陆
ssh-keygen -t rsa
ssh-copy-id datax
验证,不用输入密码,直接登录进去
ssh datax 

第四步,hadoop文件格式化(只需要执行一次),相当于文件格式化
#启动journalnode进程
/opt/hadoop-2.7.3/sbin/hadoop-daemon.sh start journalnode &
#启动zookeeper服务进程
/opt/zookeeper-3.4.6/bin/zkServer.sh  start &
#清除遗留过期数据
/opt/zookeeper-3.4.6/bin/zkCli.sh
rmr  /hbase
#格式化硬盘
hdfs namenode -format
#启动历史服务
/opt/hadoop-2.7.3/sbin/mr-jobhistory-daemon.sh start historyserver & 

第五步,hadoop进程启动
/opt/hadoop-2.7.3/sbin/start-all.sh &
检查http://IP:50070

第六步,Hbase进程启动
/opt/hbase-1.4.8/bin/start-hbase.sh &
检查http://IP:16010
jps
HMaster
HRegionServer

第七步,Hive进程启动
hive --service metastore &
hive --service hiveserver2 &
检查
hive
show tables
创建一张表插入数据

第八步,Spark进程启动
#创建spark的存储路径
hadoop fs -mkdir /spark
#启动服务
/opt/spark-2.3.2-bin-hadoop2.7/sbin/start-all.sh &
检查http://IP:8090
jps
Master
Worker

spark-shell
spark-sql

第九步,使用jps命令会有13个进程
JournalNode
JobHistoryServer
QuorumPeerMain

NameNode
DataNode
ResourceManager
NodeManager

HMaster
HRegionServer

RunJar
RunJar

Master
Work

3、Hbase写入数据 java 程序

package com.hp.hbase.tutorials.crud;

import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.Map;
import java.util.NavigableMap;

/**
 * http://hbase.apache.org/1.2/apidocs/index.html
 *
 * @author fy
 */
public class HBaseCrudDemo {
    private final Logger logger = LoggerFactory.getLogger(this.getClass());

    public static void main(String[] args) {

        new HBaseCrudDemo().testCrud();
    }

    public void testCrud() {
        Connection connection = null;
        try {
            connection = HBaseConnectionUtils.getConnection();
            TableName tableName = TableName.valueOf("demo");

            //创建HBase表
            createTable(connection, tableName, "cf1", "cf2");

            //put
            //String rowKey = "u12000";
            //put(connection, tableName, rowKey, "cf1", "name", "ricky");
            //put(connection, tableName, rowKey, "cf1", "password", "root");
            //put(connection, tableName, rowKey, "cf1", "age", "28");
            for(long iCount = 0;iCount < 30;iCount ++)
            {
			   put(connection, tableName, iCount + "", "cf1", "name", "ricky");
			   put(connection, tableName, iCount + "", "cf1", "name", "ricky");
			   put(connection, tableName, iCount + "", "cf1", "name", "ricky");
			   put(connection, tableName, iCount + "", "cf1", "name", "ricky");
			   put(connection, tableName, iCount + "", "cf1", "name", "ricky");
			   put(connection, tableName, iCount + "", "cf1", "name", "ricky");
			   put(connection, tableName, iCount + "", "cf1", "name", "ricky");
			   put(connection, tableName, iCount+ "", "cf1", "password", "root");
			   put(connection, tableName, iCount+ "", "cf1", "password", "root");
			   put(connection, tableName, iCount+ "", "cf1", "password", "root");
			   put(connection, tableName, iCount+ "", "cf1", "password", "root");
			   put(connection, tableName, iCount+ "", "cf1", "age", "28");
			   put(connection, tableName, iCount+ "", "cf2", "hello", iCount+"");
			   put(connection, tableName, iCount+ "", "cf2", "hello", iCount+"");
			   put(connection, tableName, iCount+ "", "cf2", "hello", iCount+"");
			   put(connection, tableName, iCount+ "", "cf2", "hello", (iCount+1)+"");
			   
			   put(connection, tableName, iCount+ "", "cf2", "hi", iCount+"");

			   put(connection, tableName, iCount+ "", "cf2", "good", iCount+"nice");
            }

            //get
            //get(connection, tableName, rowKey);

            //scan
            //scan(connection, tableName);

            //delete
            //deleteTable(connection, tableName);

        } catch (IOException e) {
            e.printStackTrace();
        } finally {
            try {
                connection.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }

    public void scan(Connection connection, TableName tableName) throws IOException {
        Table table = null;
        try {
            table = connection.getTable(tableName);
            ResultScanner rs = null;
            try {
                //Scan scan = new Scan(Bytes.toBytes("u120000"), Bytes.toBytes("u200000"));
                rs = table.getScanner(new Scan());
                for(Result r:rs){
                    NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> navigableMap = r.getMap();
                    for(Map.Entry<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> entry : navigableMap.entrySet()){
                        logger.info("row:{} key:{}", Bytes.toString(r.getRow()), Bytes.toString(entry.getKey()));
                        NavigableMap<byte[], NavigableMap<Long, byte[]>> map =entry.getValue();
                        for(Map.Entry<byte[], NavigableMap<Long, byte[]>> en:map.entrySet()){
                            System.out.print(Bytes.toString(en.getKey())+"##");
                            NavigableMap<Long, byte[]> ma = en.getValue();
                            for(Map.Entry<Long, byte[]>e: ma.entrySet()){
                                System.out.print(e.getKey()+"###");
                                System.out.println(Bytes.toString(e.getValue()));
                            }
                        }
                    }
                }
            } finally {
                if(rs!=null) {
                    rs.close();
                }
            }
        } finally {
            if(table!=null) {
                table.close();
            }
        }
    }

    //根据row key获取表中的该行数据
    public void get(Connection connection,TableName tableName,String rowKey) throws IOException {
        Table table = null;
        try {
            table = connection.getTable(tableName);
            Get get = new Get(Bytes.toBytes(rowKey));
            Result result = table.get(get);
            NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> navigableMap = result.getMap();
            for(Map.Entry<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> entry : navigableMap.entrySet()){

                logger.info("columnFamily:{}", Bytes.toString(entry.getKey()));
                NavigableMap<byte[], NavigableMap<Long, byte[]>> map =entry.getValue();
                for(Map.Entry<byte[], NavigableMap<Long, byte[]>> en:map.entrySet()){
                    System.out.print(Bytes.toString(en.getKey())+"##");
                    NavigableMap<Long, byte[]> nm = en.getValue();
                    for(Map.Entry<Long, byte[]> me : nm.entrySet()){
                        logger.info("column key:{}, value:{}", me.getKey(), me.getValue());
                    }
                }
            }
        } finally {
            if(table!=null) {
                table.close();
            }
        }
    }

    /**批量插入可以使用 Table.put(List<Put> list)**/
    public void put(Connection connection, TableName tableName,
                    String rowKey, String columnFamily, String column, String data) throws IOException {

        Table table = null;
        try {
            table = connection.getTable(tableName);
            Put put = new Put(Bytes.toBytes(rowKey));
            put.addColumn(Bytes.toBytes(columnFamily), Bytes.toBytes(column), Bytes.toBytes(data));
            table.put(put);
        } finally {
            if(table!=null) {
                table.close();
            }
        }
    }

    public void createTable(Connection connection, TableName tableName, String... columnFamilies) throws IOException {
        Admin admin = null;
        try {
            admin = connection.getAdmin();
            if(admin.tableExists(tableName)){
                logger.warn("table:{} exists!", tableName.getName());
            }else{
                HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);
                for(String columnFamily : columnFamilies) {
                    tableDescriptor.addFamily(new HColumnDescriptor(columnFamily));
                }
                admin.createTable(tableDescriptor);
                logger.info("create table:{} success!", tableName.getName());
            }
        } finally {
            if(admin!=null) {
                admin.close();
            }
        }
    }

    //删除表中的数据
    public void deleteTable(Connection connection, TableName tableName) throws IOException {
        Admin admin = null;
        try {
            admin = connection.getAdmin();
            if (admin.tableExists(tableName)) {
                //必须先disable, 再delete
                admin.disableTable(tableName);
                admin.deleteTable(tableName);
            }
        } finally {
            if(admin!=null) {
                admin.close();
            }
        }
    }

    public void disableTable(Connection connection, TableName tableName) throws IOException {
        Admin admin = null;
        try {
            admin = connection.getAdmin();
            if(admin.tableExists(tableName)){
                admin.disableTable(tableName);
            }
        } finally {
            if(admin!=null) {
                admin.close();
            }
        }
    }
}

 

 

 

 

 

4、Hbase 语句

 

 

 

 

 

 

 

 

 

 

 

 

 

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值