提交mapreduce要配置的参数

linux平台提交到yarn

import java.io.IOException;
import java.net.URISyntaxException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class LinuxToYarn {
	public static void main(String[] args) throws IOException, 
	ClassNotFoundException, InterruptedException, URISyntaxException {
		
		Configuration conf = new Configuration();
		
		Job job = Job.getInstance(conf);
		
		//jar包所在的位置
		job.setJarByClass(LinuxToYarn.class); 
		
		//本次job索要调用的mapped、reducer实现类
		job.setMapperClass(WordCount.class);
		job.setReducerClass(WordcountMapreduce.class);
		
		//job的mapped实现类产生的结果的key、value类型
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(IntWritable.class);
		//
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(IntWritable.class);
		
		//本次job要处理的输入数据集所在路径、最终结果的所在路径
		FileInputFormat.setInputPaths(job, new Path("/input"));
		FileOutputFormat.setOutputPath(job, new Path("/output"));
		
		//想要启动的reduce task的数量
		job.setNumReduceTasks(2);
		
		//提交数据
		boolean flg = job.waitForCompletion(true);	
		
		System.exit(flg?0:-1);
		
	}


}

window提交到yarn

import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class submittedWindowToyarn {
	public static void main(String[] args) throws IOException, 
	ClassNotFoundException, InterruptedException, URISyntaxException {
		
		Configuration conf = new Configuration();
		
		// 在代码中设置JVM系统参数,用于给job对象来获取访问HDFS的用户身份
		System.setProperty("HADOOP_USER_NAME", "root");
		
		//设置job要访问的默认文件系统
		conf.set("fs.defaultFS","hdfs://hadoop1:9000");
		
		//设置job提交到哪运行
		conf.set("mapreduce.framework.name", "yarn");
		conf.set("yarn.resourcemanager.hostname", "hadoop2");
		
		//如果要从windows系统上运行这个job提交客户端程序,则需要加这个跨平台提交的参数
		conf.set("mapreduce.app-submission.cross-platform","true");
		
		Job job = Job.getInstance(conf);
		
		//jar包所在的位置
		job.setJar("H:/mapreduce.jar");
		
		//本次job索要调用的mapped、reducer实现类
		job.setMapperClass(WordCount.class);
		job.setReducerClass(WordcountMapreduce.class);
		
		//job的mapped实现类产生的结果的key、value类型
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(IntWritable.class);
		//
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(IntWritable.class);
		
		Path path = new Path("/output");
		FileSystem fs = FileSystem.get(new URI("hdfs://hadoop1:9000"),conf,"root");
		if(fs.exists(path)){
			fs.delete(path,true);
		}
		//本次job要处理的输入数据集所在路径、最终结果的所在路径
		FileInputFormat.setInputPaths(job, new Path("/input"));
		FileOutputFormat.setOutputPath(job, new Path("/output"));
		
		//想要启动的reduce task的数量
		job.setNumReduceTasks(2);
		
		//提交数据
		boolean flg = job.waitForCompletion(true);	
		
		System.exit(flg?0:-1);
		
	}


}

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值