hiveServer2 java thrift api

hive JDBC的连接方式,只能获取到hive执行的最终结果。

如果想要获取执行过程中的状态,并且使用取消执行的功能。需要使用hiveServer2   thrift的方式。

执行状态在TOperationState 类中,包括:
  INITIALIZED_STATE(0),
  RUNNING_STATE(1),
  FINISHED_STATE(2),
  CANCELED_STATE(3),
  CLOSED_STATE(4),
  ERROR_STATE(5),
  UKNOWN_STATE(6),
  PENDING_STATE(7);

public class QueryInstance {
	private static String host = "127.0.0.1";
	private static int port = 10000;
	private static String username = "hive";
	private static String passsword = "hive";
	private static TTransport transport;
	private static TCLIService.Client client;
	private TOperationState tOperationState = null;
	private Map<String, Object> resultMap = new HashMap<String, Object>();

	static {
		try {
			transport = QueryTool.getSocketInstance(host, port, username,
					passsword);
			client = new TCLIService.Client(new TBinaryProtocol(transport));
			transport.open();
		} catch (TTransportException e) {
			Log.info("hive collection error!");
		}
	}

	public TOperationHandle submitQuery(String command) throws Exception {

		TOperationHandle tOperationHandle;
		TExecuteStatementResp resp = null;

		TSessionHandle sessHandle = QueryTool.openSession(client)
				.getSessionHandle();

		TExecuteStatementReq execReq = new TExecuteStatementReq(sessHandle,
				command);
		// 异步运行
		execReq.setRunAsync(true);
		// 执行sql
		resp = client.ExecuteStatement(execReq);// 执行语句

		tOperationHandle = resp.getOperationHandle();// 获取执行的handle
		
		if (tOperationHandle == null) {
           //语句执行异常时,会把异常信息放在resp.getStatus()中。								
			throw new Exception(resp.getStatus().getErrorMessage());
		}
		return tOperationHandle;
	}

	public String getQueryLog(TOperationHandle tOperationHandle)
			throws Exception {
		String log = "";
		return log;
	}
	public TOperationState getQueryHandleStatus(
			TOperationHandle tOperationHandle) throws Exception {

		if (tOperationHandle != null) {
			TGetOperationStatusReq statusReq = new TGetOperationStatusReq(
					tOperationHandle);
			TGetOperationStatusResp statusResp = client
					.GetOperationStatus(statusReq);
			
			tOperationState = statusResp.getOperationState();
			
		}
		return tOperationState;
	}

	public List<String> getColumns(TOperationHandle tOperationHandle)
			throws Throwable {
		TGetResultSetMetadataResp metadataResp;
		TGetResultSetMetadataReq metadataReq;
		TTableSchema tableSchema;
		metadataReq = new TGetResultSetMetadataReq(tOperationHandle);
		metadataResp = client.GetResultSetMetadata(metadataReq);
		List<TColumnDesc> columnDescs;
		List<String> columns = null;
		tableSchema = metadataResp.getSchema();
		if (tableSchema != null) {
			columnDescs = tableSchema.getColumns();
			columns = new ArrayList<String>();
			for (TColumnDesc tColumnDesc : columnDescs) {
				columns.add(tColumnDesc.getColumnName());
			}
		}
		return columns;
	}

	/**
	 * 获取执行结果 select语句
	 */

	public List<Object> getResults(TOperationHandle tOperationHandle) throws Throwable{
		TFetchResultsReq fetchReq = new TFetchResultsReq();
		fetchReq.setOperationHandle(tOperationHandle);
		fetchReq.setMaxRows(1000);
		TFetchResultsResp  re=client.FetchResults(fetchReq);
		List<TColumn> list = re.getResults().getColumns();
		List<Object> list_row = new ArrayList<Object>();
		for(TColumn field:list){			
			if (field.isSetStringVal()) {
				list_row.add(field.getStringVal().getValues());
			} else if (field.isSetDoubleVal()) {
				list_row.add(field.getDoubleVal().getValues());
			} else if (field.isSetI16Val()) {
				list_row.add(field.getI16Val().getValues());
			} else if (field.isSetI32Val()) {
				list_row.add(field.getI32Val().getValues());
			} else if (field.isSetI64Val()) {
				list_row.add(field.getI64Val().getValues());
			} else if (field.isSetBoolVal()) {
				list_row.add(field.getBoolVal().getValues());
			} else if (field.isSetByteVal()) {
				list_row.add(field.getByteVal().getValues());
			}
		}		
		for(Object obj:list_row){
			System.out.println(obj);
		}
	return list_row;
	}

	public void cancelQuery(TOperationHandle tOperationHandle) throws Throwable {
		if (tOperationState != TOperationState.FINISHED_STATE) {
			TCancelOperationReq cancelOperationReq = new TCancelOperationReq();
			cancelOperationReq.setOperationHandle(tOperationHandle);
			client.CancelOperation(cancelOperationReq);
		}
	}
}

 

public class QueryTool {

	 public static TTransport getSocketInstance(String host,int port,String USER,String PASSWORD) throws TTransportException{     
	        TTransport transport = HiveAuthFactory.getSocketTransport(host, port,99999);
	        try {
				transport = PlainSaslHelper.getPlainTransport(USER, PASSWORD, transport);
			} catch (SaslException e) {			
				e.printStackTrace();
			}
	        return  transport;  
	    }   
	 
	  public static TOpenSessionResp openSession(TCLIService.Client client) throws TException{    
	         TOpenSessionReq openSessionReq = new TOpenSessionReq();  
	         return client.OpenSession(openSessionReq);  
	    }  
}

 

public class Test {
	public static void main(String[] args) {
		try {

			QueryInstance base = new QueryInstance();

			TOperationHandle handle = base
					.submitQuery("show partitions  nubiabase.event_base");
			base.getResults(handle);
		} catch (Throwable e) {			
			e.printStackTrace();
		}

	}
}

 

pom文件

<dependencies>
		<dependency>
			<groupId>jdk.tools</groupId>
			<artifactId>jdk.tools</artifactId>
			<version>1.8</version>
			<scope>system</scope>
			<systemPath>${JAVA_HOME}/lib/tools.jar</systemPath>
		</dependency>
		<!-- lombok -->
		<dependency>
			<groupId>org.projectlombok</groupId>
			<artifactId>lombok</artifactId>
			<version>1.12.2</version>
		</dependency>

		<!-- guava -->
		<dependency>
			<groupId>com.google.guava</groupId>
			<artifactId>guava</artifactId>
			<version>18.0</version>
		</dependency>

		<dependency>
			<groupId>org.apache.poi</groupId>
			<artifactId>poi-excelant</artifactId>
			<version>3.10-FINAL</version>
		</dependency>

		<!-- spring data -->
		<dependency>
			<groupId>org.springframework.data</groupId>
			<artifactId>spring-data-hadoop</artifactId>
			<version>2.2.0.RELEASE</version>
		</dependency>

		<dependency>
			<groupId>org.apache.hive</groupId>
			<artifactId>hive-jdbc</artifactId>
			<version>1.1.1</version>
		</dependency>

		<dependency>
			<groupId>org.spark-project.hive</groupId>
			<artifactId>hive-jdbc</artifactId>
			<version>1.2.1.spark</version>
		</dependency>
		<dependency>
			<groupId>org.spark-project.hive</groupId>
			<artifactId>hive-exec</artifactId>
			<version>1.2.1.spark</version>
		</dependency>
		<dependency>
			<groupId>org.apache.thrift</groupId>
			<artifactId>libthrift</artifactId>
			<version>0.9.3</version>
		</dependency>
		 <dependency>
            <groupId>com.google.code.gson</groupId>
            <artifactId>gson</artifactId>
            <version>2.8.0</version>
        </dependency>
         <dependency>
            <groupId>org.projectlombok</groupId>
            <artifactId>lombok</artifactId>
            <version>1.12.2</version>
        </dependency>
         <dependency>
            <groupId>log4j</groupId>
            <artifactId>log4j</artifactId>
            <version>1.2.17</version>
        </dependency>
	</dependencies>

 

转载于:https://my.oschina.net/u/2874009/blog/853633

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值