电话号码上传下载流量的hadoop代码简单实现

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;


public class DataCount {

public static class DCMapper extends Mapper<LongWritable, Text, Text, DataBean>{

@Override
protected void map(LongWritable key, Text value,Context context){
 
 //key 为一个long型的数字。value为一个字符串,context为上下文的内容
    String line = value.toString();
//先把hadoop里面的字符串转换成可以Java识别的字符串
String fileds[] = line.split("/t")
//按照空格分割里面的字符串
String tel = fileds[1]
//拿出里面的电话号码那一行
long up = long.parseLong(fileds[8]);
//电话上网上传流量数据的取出,需要解析成long型的数据
long down = long.parseLong(fileds[9]);
//电话上网下载流量数据的取出。也是需要解析成long型的数据
DataBean bean = new DataBean(tel,up,down)
//新建一个对象。用构造方法来存储需要的显示的信息
电话号码 : 上传流量 :下载流量
context.write(new Text(tel),bean);
//把电话号码作为key 把bean作为value输出
}
//以上为mapper方法
    }
public static class DCReducer extends Reducer<Text,DataBean,Text,DataBean>{
   @Override
protected void reduce (Text key,Iterable<DataBean>values,Context context){

long up_sum = 0;
long down_sum = 0;
//定义2个计数器
  for(DataBean bean :values){
  up_sum += bean.getUpPayLoad();
  down_sum += bean.getDownPayLoad()
  //累加上传和下行流量
  }
  DataBean bean = new DataBean("",up_sum,down_sum);
  context.write(key,bean);
  //把电话号码 和bean输出
}
}

public static void mian(String[] args){
  Configuration conf = new Configuration()
  //新建一个配置文件
  Job job = Job.getInstance(conf);
  //hadoop里面的自带的函数必须用Job
  job.setJar(DataCount.class);
  //这句话必须加进去
   job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(DataBean.class);
    job.setMapperClass(DCMapper.class);
    FileInputFormat,setInputPaths(job,new Path(""));
   
    job.setReducerClass(DCReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(DataBean.class);
    FileInputFormat.setOutputPaths(job,new Path(""));
   
    job.wait(true);
}

//下面的是写一个DataBean 
//定义一些属性和方法
public class DataInfo implements Writable{


private String tel;
private long upPayLoad;
private long downPayLoad;
private long totalPayLoad;

public DataInfo(){}

public DataInfo(String tel, long upPayLoad, long downPayLoad) {
this.tel = tel;
this.upPayLoad = upPayLoad;
this.downPayLoad = downPayLoad;
this.totalPayLoad = upPayLoad + downPayLoad;
}


@Override
public void write(DataOutput out) throws IOException {
out.writeUTF(tel);
out.writeLong(upPayLoad);
out.writeLong(downPayLoad);
out.writeLong(totalPayLoad);
}


@Override
public void readFields(DataInput in) throws IOException {
this.tel = in.readUTF();
this.upPayLoad = in.readLong();
this.downPayLoad = in.readLong();
this.totalPayLoad = in.readLong();

}


@Override
public String toString() {
return upPayLoad + "\t" + downPayLoad + "\t" + totalPayLoad;
}


public String getTel() {
return tel;
}


public void setTel(String tel) {
this.tel = tel;
}


public long getUpPayLoad() {
return upPayLoad;
}


public void setUpPayLoad(long upPayLoad) {
this.upPayLoad = upPayLoad;
}


public long getDownPayLoad() {
return downPayLoad;
}


public void setDownPayLoad(long downPayLoad) {
this.downPayLoad = downPayLoad;
}


public long getTotalPayLoad() {
return totalPayLoad;
}


public void setTotalPayLoad(long totalPayLoad) {
this.totalPayLoad = totalPayLoad;
}


}

}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
这是一个基于Hadoop网络的云盘上传和下载的代码实现,主要用到了Hadoop的HDFS和MapReduce框架。 上传部分: 1. 首先,定义一个上传的Mapper类,继承自Hadoop的Mapper类,实现map函数。 public static class UploadMapper extends Mapper<LongWritable, Text, Text, BytesWritable> { private Text filename = new Text(); public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { String filepath = value.toString(); File file = new File(filepath); filename.set(file.getName()); byte[] data = new byte[(int) file.length()]; FileInputStream fis = new FileInputStream(file); fis.read(data); fis.close(); context.write(filename, new BytesWritable(data)); } } 2. 然后,定义一个上传的Reducer类,继承自Hadoop的Reducer类,实现reduce函数。 public static class UploadReducer extends Reducer<Text, BytesWritable, Text, Text> { public void reduce(Text key, Iterable<BytesWritable> values, Context context) throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); String hdfsPath = conf.get("hdfsPath"); String filepath = hdfsPath + "/" + key.toString(); Path path = new Path(filepath); FileSystem fs = path.getFileSystem(conf); FSDataOutputStream out = fs.create(path); for (BytesWritable value : values) { out.write(value.getBytes(), 0, value.getLength()); } out.close(); context.write(key, new Text("Upload completed!")); } } 3. 最后,定义上传的Driver类,继承自Hadoop的Configured类和Tool类,实现run函数。 public int run(String[] args) throws Exception { Configuration conf = getConf(); Job job = Job.getInstance(conf, "UploadFile"); job.setJarByClass(UploadFile.class); job.setMapperClass(UploadMapper.class); job.setReducerClass(UploadReducer.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(BytesWritable.class); job.setInputFormatClass(TextInputFormat.class); job.setOutputFormatClass(TextOutputFormat.class); FileInputFormat.addInputPath(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1])); return job.waitForCompletion(true) ? 0 : 1; } 下载部分: 1. 首先,定义一个下载的Mapper类,继承自Hadoop的Mapper类,实现map函数。 public static class DownloadMapper extends Mapper<Text, BytesWritable, Text, Text> { public void map(Text key, BytesWritable value, Context context) throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); String localPath = conf.get("localPath"); String filepath = localPath + "/" + key.toString(); FileOutputStream fos = new FileOutputStream(filepath); fos.write(value.getBytes(), 0, value.getLength()); fos.close(); context.write(key, new Text("Download completed!")); } } 2. 然后,定义下载的Driver类,继承自Hadoop的Configured类和Tool类,实现run函数。 public int run(String[] args) throws Exception { Configuration conf = getConf(); Job job = Job.getInstance(conf, "DownloadFile"); job.setJarByClass(DownloadFile.class); job.setMapperClass(DownloadMapper.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(BytesWritable.class); job.setInputFormatClass(KeyValueTextInputFormat.class); job.setOutputFormatClass(TextOutputFormat.class); FileInputFormat.addInputPath(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1])); return job.waitForCompletion(true) ? 0 : 1; } 以上就是基于Hadoop网络的云盘上传和下载的代码实现
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值