1 先到49服务器上,用nc发送消息
2 详细代码如下,注意:保存前先用 repartition(1),不然会有很多小文件
package cn.taobao;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.spark.api.java.StorageLevels;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import scala.Tuple2;
import java.util.Arrays;
public class Save_1 {
public static void main(String[] args) throws Exception {
// StreamingContext 编程入口
JavaStreamingContext ssc = new JavaStreamingContext(
"local[*]",
"Save_1",
Durations.seconds(10),
System.getenv("SPARK_HOME"),
JavaStreamingContext.jarOfClass(Save_1.class.getClass()));
ssc.sparkContext().setLogLevel("ERROR");
//数据接收器(Receiver)
//创建一个接收器(JavaReceiverInputDStream),这个接收器接收一台机器上的某个端口通过socket发送过来的数据并处理
JavaReceiverInputDStream<String> lines = ssc.socketTextStream(
"158.158.4.49", 9998, StorageLevels.MEMORY_AND_DISK_SER);
/*
假如输入 aa bb cc aa
*/
/*
返回 aa
bb
cc
aa
*/
JavaDStream<String> stringJavaDStream = lines.flatMap(xx -> {
String[] str_split = xx.split(" ");
return Arrays.asList(str_split).iterator();
});
/*
返回 (aa,1)
(bb,1)
(cc,1)
(aa,1)
*/
JavaPairDStream<String, Integer> stringIntegerJavaPairDStream = stringJavaDStream.mapToPair(xx -> {
return new Tuple2<>(xx, 1);
});
/*
返回 (aa,2)
(bb,1)
(cc,1)
*/
JavaPairDStream<String, Integer> stringIntegerJavaPairDStream1 = stringIntegerJavaPairDStream.reduceByKey((Integer v1, Integer v2) ->
{
return v1 + v2;
});
//以文本的格式保存到HDFS上
stringIntegerJavaPairDStream1.repartition(1).mapToPair(xx -> {
Text text = new Text();
text.set(xx.toString());
return new Tuple2<>(NullWritable.get(), text);
}).saveAsHadoopFiles("hdfs://localhost:9000/dir_spark_test/", "-txt", NullWritable.class, Text.class, TextOutputFormat.class);
//以SEQUENCE的文件格式保存到HDFS上
stringIntegerJavaPairDStream1.repartition(1).mapToPair(xx -> {
Text text = new Text();
text.set(xx.toString());
return new Tuple2<>(NullWritable.get(), text);
}).saveAsHadoopFiles("hdfs://localhost:9000/dir_spark_test/", "-seq", NullWritable.class, Text.class, SequenceFileOutputFormat.class);
//显式的启动数据接收
ssc.start();
try {
//来等待计算完成
ssc.awaitTermination();
} catch (Exception e) {
e.printStackTrace();
} finally {
ssc.close();
}
}
}
效果如下