Hive 安装及测试三

在使用JDBC开发Hive程序时,必须首先开启Hive的远程服务接口。HiveServer本身存在很多问题(比如:安全性、并发性等);针对这些问题,Hive0.11.0版本提供了一个全新的服务:HiveServer2,这个很好的解决HiveServer存在的安全性、并发性等问题。这个服务启动程序在${HIVE_HOME}/bin/hiveserver2里面,你可以通过下面的方式来启动HiveServer2服务:
nohup ./hive --service hiveserver2 >/dev/null 2>1 &
默认开启10000端口
开启前,编辑hive-site.xml设置impersonation,防止hdfs权限问题,这样hive server会以提交用户的身份去执行语句,如果设置为false,则会以起hive server daemon的admin user来执行语句
<property>  
  <name>hive.server2.enable.doAs</name>  
  <value>false</value>  
</property>

1.pom引入jar包依赖

<dependency>
			<groupId>org.apache.hive</groupId>
			<artifactId>hive-jdbc</artifactId>
			<version>1.2.1</version>
		</dependency>
		<dependency>
			<groupId>org.apache.hadoop</groupId>
			<artifactId>hadoop-common</artifactId>
			<version>2.6.0</version>
		</dependency>
		<dependency>
			<groupId>jdk.tools</groupId>
			<artifactId>jdk.tools</artifactId>
			<version>1.7</version>
			<scope>system</scope>
			<systemPath>${JAVA_HOME}/lib/tools.jar</systemPath>
		</dependency>

2.代码实例

package org.hive.test;

import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;

public class HiveTest {

	private static String driverName = "org.apache.hive.jdbc.HiveDriver";
	private static String url = "jdbc:hive2://192.168.1.93:10000/default";
	private static String username = "";
	private static String password = "";
	private static Connection conn = null;
	private static Statement stmt = null;
	private static String sql = "";
	private static ResultSet res = null;
	static {
		try {
			Class.forName(driverName);
			conn = DriverManager.getConnection(url, username, password);
			stmt = conn.createStatement();
		} catch (Exception e) {
			e.printStackTrace();
		}
	}

	public static void main(String[] args) throws Exception {
		dropTable("hivetest");
		createTable("hivetest");
		showTables("hivetest");
		describeTables("hivetest");
		insert("hivetest", new String[]{"10000","tom","23"});
		insert("hivetest", new String[]{"10001","zhangshan","80"});
		insert("hivetest", new String[]{"10002","lisi","30"});
		insert("hivetest", new String[]{"10003","lucy","40"});
		selectData("hivetest");
		dropTable("hivetest");
	}

	// 查询数据
	public static void selectData(String tableName) throws SQLException {
		sql = "select * from " + tableName;
		res = stmt.executeQuery(sql);
		while (res.next()) {
			System.out.println(res.getInt(1) + "\t" + res.getString(2));
		}
	}

	// 添加数据
	public static void insert(String tableName, String[] datas) throws SQLException {
		sql = "insert into table " + tableName + " values ('" + datas[0] + "','" + datas[1] + "'," + Integer.valueOf(datas[2]) + ")";
		stmt.execute(sql);
	}

	// 查询表结构
	public static void describeTables(String tableName) throws SQLException {
		sql = "describe " + tableName;
		res = stmt.executeQuery(sql);
		while (res.next()) {
			System.out.println(res.getString(1) + "\t" + res.getString(2));
		}
	}

	// 查看表
	public static void showTables(String tableName) throws SQLException {
		sql = "show tables '" + tableName + "'";
		res = stmt.executeQuery(sql);
		if (res.next()) {
			System.out.println(res.getString(1));
		}
	}

	// 创建表
	public static void createTable(String tableName) throws SQLException {
		sql = "create table " + tableName + " (id string, name string,age int)  row format delimited fields terminated by '\t'";
		stmt.execute(sql);
	}

	// 删除表
	public static String dropTable(String tableName) throws SQLException {
		// 创建的表名
		sql = "drop table " + tableName;
		stmt.execute(sql);
		return tableName;
	}
}

3.执行结果
hivetest
id	string
name	string
age	int
10000	tom
10001	zhangshan
10002	lisi
10003	lucy

4.执行日志

OK
OK
OK
OK
Query ID = hadoop_20151010005313_d37d220a-f3c7-4b27-9a10-0f9b0c571ddf
Total jobs = 3
Launching Job 1 out of 3
Number of reduce tasks is set to 0 since there's no reduce operator
Starting Job = job_1444449359396_0001, Tracking URL = http://study-91:8088/proxy/application_1444449359396_0001/
Kill Command = /home/hadoop/hadoop-2.6.0/bin/hadoop job  -kill job_1444449359396_0001
Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 0
2015-10-10 00:56:46,331 Stage-1 map = 0%,  reduce = 0%
2015-10-10 00:57:33,311 Stage-1 map = 100%,  reduce = 0%, Cumulative CPU 2.42 sec
MapReduce Total cumulative CPU time: 2 seconds 420 msec
Ended Job = job_1444449359396_0001
Stage-4 is selected by condition resolver.
Stage-3 is filtered out by condition resolver.
Stage-5 is filtered out by condition resolver.
Moving data to: hdfs://mycluster/user/hive/warehouse/hivetest/.hive-staging_hive_2015-10-10_00-53-13_233_9101829070390139253-1/-ext-10000
Loading data to table default.hivetest
Table default.hivetest stats: [numFiles=1, numRows=1, totalSize=13, rawDataSize=12]
MapReduce Jobs Launched: 
Stage-Stage-1: Map: 1   Cumulative CPU: 2.78 sec   HDFS Read: 3745 HDFS Write: 85 SUCCESS
Total MapReduce CPU Time Spent: 2 seconds 780 msec
OK
Query ID = hadoop_20151010005820_a530d1a5-6652-4a42-b4a5-1f5f80b36bb0
Total jobs = 3
Launching Job 1 out of 3
Number of reduce tasks is set to 0 since there's no reduce operator
Starting Job = job_1444449359396_0002, Tracking URL = http://study-91:8088/proxy/application_1444449359396_0002/
Kill Command = /home/hadoop/hadoop-2.6.0/bin/hadoop job  -kill job_1444449359396_0002
Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 0
2015-10-10 01:00:10,230 Stage-1 map = 0%,  reduce = 0%
2015-10-10 01:00:58,962 Stage-1 map = 100%,  reduce = 0%, Cumulative CPU 1.91 sec
MapReduce Total cumulative CPU time: 1 seconds 910 msec
Ended Job = job_1444449359396_0002
Stage-4 is selected by condition resolver.
Stage-3 is filtered out by condition resolver.
Stage-5 is filtered out by condition resolver.
Moving data to: hdfs://mycluster/user/hive/warehouse/hivetest/.hive-staging_hive_2015-10-10_00-58-20_558_924119467925405795-1/-ext-10000
Loading data to table default.hivetest
Table default.hivetest stats: [numFiles=2, numRows=2, totalSize=32, rawDataSize=30]
MapReduce Jobs Launched: 
Stage-Stage-1: Map: 1   Cumulative CPU: 2.15 sec   HDFS Read: 3838 HDFS Write: 91 SUCCESS
Total MapReduce CPU Time Spent: 2 seconds 150 msec
OK
Query ID = hadoop_20151010010104_7c9148c1-4f4d-4bb4-99ab-3898e1b9b054
Total jobs = 3
Launching Job 1 out of 3
Number of reduce tasks is set to 0 since there's no reduce operator
Starting Job = job_1444449359396_0003, Tracking URL = http://study-91:8088/proxy/application_1444449359396_0003/
Kill Command = /home/hadoop/hadoop-2.6.0/bin/hadoop job  -kill job_1444449359396_0003
Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 0
2015-10-10 01:02:24,571 Stage-1 map = 0%,  reduce = 0%
2015-10-10 01:03:07,997 Stage-1 map = 100%,  reduce = 0%, Cumulative CPU 3.13 sec
MapReduce Total cumulative CPU time: 3 seconds 130 msec
Ended Job = job_1444449359396_0003
Stage-4 is selected by condition resolver.
Stage-3 is filtered out by condition resolver.
Stage-5 is filtered out by condition resolver.
Moving data to: hdfs://mycluster/user/hive/warehouse/hivetest/.hive-staging_hive_2015-10-10_01-01-04_703_7605509708803365599-1/-ext-10000
Loading data to table default.hivetest
Table default.hivetest stats: [numFiles=3, numRows=3, totalSize=46, rawDataSize=43]
MapReduce Jobs Launched: 
Stage-Stage-1: Map: 1   Cumulative CPU: 3.13 sec   HDFS Read: 3837 HDFS Write: 86 SUCCESS
Total MapReduce CPU Time Spent: 3 seconds 130 msec
OK
Query ID = hadoop_20151010010313_72951279-4cdf-48d3-91ca-2fd007d3aa69
Total jobs = 3
Launching Job 1 out of 3
Number of reduce tasks is set to 0 since there's no reduce operator
Starting Job = job_1444449359396_0004, Tracking URL = http://study-91:8088/proxy/application_1444449359396_0004/
Kill Command = /home/hadoop/hadoop-2.6.0/bin/hadoop job  -kill job_1444449359396_0004
Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 0
2015-10-10 01:07:31,319 Stage-1 map = 0%,  reduce = 0%
2015-10-10 01:07:44,002 Stage-1 map = 100%,  reduce = 0%, Cumulative CPU 2.21 sec
MapReduce Total cumulative CPU time: 2 seconds 210 msec
Ended Job = job_1444449359396_0004
Stage-4 is selected by condition resolver.
Stage-3 is filtered out by condition resolver.
Stage-5 is filtered out by condition resolver.
Moving data to: hdfs://mycluster/user/hive/warehouse/hivetest/.hive-staging_hive_2015-10-10_01-03-13_054_2517068962568084403-1/-ext-10000
Loading data to table default.hivetest
Table default.hivetest stats: [numFiles=4, numRows=4, totalSize=60, rawDataSize=56]
MapReduce Jobs Launched: 
Stage-Stage-1: Map: 1   Cumulative CPU: 2.55 sec   HDFS Read: 3837 HDFS Write: 86 SUCCESS
Total MapReduce CPU Time Spent: 2 seconds 550 msec
OK
OK
hive不支持 delete和update操作。

hive> delete from hivetest;
FAILED: SemanticException [Error 10294]: Attempt to do update or delete using transaction manager that does not support these operations.
参考文章
1.Hive:用Java代码通过JDBC连接Hiveserver 
2.HIve的JavaAPI操作 
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值