java sqoop_java操作sqoop

package com.db.hadoop.sqoop;

import java.util.List;

import org.apache.sqoop.client.SqoopClient;

import org.apache.sqoop.model.MDriverConfig;

import org.apache.sqoop.model.MFromConfig;

import org.apache.sqoop.model.MJob;

import org.apache.sqoop.model.MLink;

import org.apache.sqoop.model.MLinkConfig;

import org.apache.sqoop.model.MSubmission;

import org.apache.sqoop.model.MToConfig;

import org.apache.sqoop.submission.counter.Counter;

import org.apache.sqoop.submission.counter.CounterGroup;

import org.apache.sqoop.submission.counter.Counters;

import org.apache.sqoop.validation.Status;

public class SqoopApiDemo {

private static SqoopClient client = new SqoopClient(“http://master:12000/sqoop/”);

public static MLink createJdbc(){

MLink mLink = client.createLink(“generic-jdbc-connector”);

mLink.setName(“sqoop-mysql”);

MLinkConfig linkConfig = mLink.getConnectorLinkConfig();

linkConfig.getStringInput(“linkConfig.jdbcDriver”).setValue(“com.mysql.jdbc.Driver”);

linkConfig.getStringInput(“linkConfig.connectionString”).setValue(“jdbc:mysql://master:3306/hive?useSSL=false”);

linkConfig.getStringInput(“linkConfig.username”).setValue(“hive”);

linkConfig.getStringInput(“linkConfig.password”).setValue(“a”);

Status status = client.saveLink(mLink);

if(status.canProceed()){

return mLink;

}else{

throw new RuntimeException(“创建mysql连接失败”);

}

}

public static MLink createHdfs(){

MLink mLink = client.createLink(“hdfs-connector”);

mLink.setName(“sqoop-hdfs”);

MLinkConfig linkConfig = mLink.getConnectorLinkConfig();

linkConfig.getStringInput(“linkConfig.uri”).setValue(“hdfs://master:9000”);

Status status = client.saveLink(mLink);

if(status.canProceed()){

return mLink;

}else{

throw new RuntimeException(“创建hdfs连接失败”);

}

}

public static void listLink(){

List mls = client.getLinks();

for(MLink ml : mls){

System.out.println(ml.getName() + ml.getConnectorName() + “, ” + ml.getCreationUser());

}

}

public static MJob jdbcHdfsJob(){

MJob job = client.createJob(“sqoop-mysql”, “sqoop-hdfs”);

job.setName(“mysql-hdfs”);

MFromConfig from = job.getFromJobConfig();

from.getStringInput(“fromJobConfig.schemaName”).setValue(“hive”);

from.getStringInput(“fromJobConfig.tableName”).setValue(“TBLS”);

MToConfig to = job.getToJobConfig();

to.getStringInput(“toJobConfig.outputDirectory”).setValue(“/sqoop/” + System.currentTimeMillis());

MDriverConfig driverConfig = job.getDriverConfig();

driverConfig.getIntegerInput(“throttlingConfig.numExtractors”).setValue(3);

driverConfig.getIntegerInput(“throttlingConfig.numLoaders”).setValue(2);

Status status = client.saveJob(job);

if(status.canProceed()){

return job;

}else{

throw new RuntimeException(“创建mysql to hdfs工作失败”);

}

}

public static void startjob(){

MSubmission msubmission = client.startJob(“”);

System.out.println(“Job Submission status” + msubmission.getStatus());

if(msubmission.getStatus().isRunning() && msubmission.getProgress() != -1){

System.out.println(“Progress:” + String.format(“%.2f %%”, msubmission.getProgress() * 100));

}

System.out.println(“Hadoop Job Id:” + msubmission.getExternalJobId());

System.out.println(“Job link:” + msubmission.getPersistenceId());

Counters counters = msubmission.getCounters();

if(counters != null){

System.out.println(“Counters:”);

for(CounterGroup group : counters){

System.out.print(“\t”);

System.out.println(group.getName());

for(Counter counter : group){

System.out.println(“\t\t”);

System.out.println(group.getName());

System.out.println(“:”);

System.out.println(counter.getValue());

}

}

}

}

}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值