1.spark-core中的术语
Master:资源管理的主节点
Cluster Manager:在集群上获取资源的外部服务
Worker Node:资源管理的从节点或者说管理本机资源的进程
Application:基于Spark的用户程序,包含了driver程序和运行在集群上的executor程序
Driver Program:用来连接工作进程的程序
Executor:是在一个worker进程所管理的节点上为某Application启动的一个进程。
Task:被送到某个executor上的工作单元
Job:包含很多Task的并行计算
Stage:一个Jop会拆分为多组任务,每组任务被称为Stage
2.窄依赖和宽依赖
即RDD之间的依赖关系
窄依赖:父RDD和子RDD partition之间的关系是一对一的。不会有shuffule产生。
宽依赖:父RDD与子RDD partition之间的关系是一对多。会有shuffle的产生。
3.stage的计算模式
Spark任务会根据RDD之间的依赖关系,形成一个DAG有向无环图,划分stage的依据就是RDD之间的宽窄依赖。遇到宽依赖就划分stage,每个stage包含一个或多个task任务。
4.生成5万条数据,并统计个数
207.163.195.142 江苏 2021-11-13 1636770603238 1248256557425126491 www.taobao.com Click
生成5万条数据
package zjc.bigdata
import java.io.File
import java.text.SimpleDateFormat
import java.util.{Date, Random}
import java.io.FileOutputStream
import java.io.OutputStreamWriter
import java.io.PrintWriter
/**
* 向文件中生产数据
*/
object ProducePvAndUvData {
//ip
val IP = 223
//地址
val ADDRESS = Array("北京", "天津", "上海", "重庆", "河北", "辽宁","山西",
"吉林", "江苏", "浙江", "黑龙江", "安徽", "福建", "江西",
"山东", "河南", "湖北", "湖南", "广东", "海南", "四川",
"贵州", "云南", "山西", "甘肃", "青海", "台湾", "内蒙",
"广西", "西藏", "宁夏", "新疆", "香港", "澳门")
//日期
val DATE = new SimpleDateFormat("yyyy-MM-dd").format(new Date())
//timestamp
val TIMESTAMP = 0L
//userid
val USERID = 0L
//网站
val WEBSITE = Array("www.baidu.com", "www.taobao.com", "www.dangdang.com", "www.jd.com", "www.suning.com", "www.mi.com", "www.gome.com.cn")
//行为
val ACTION = Array("Regist", "Comment", "View", "Login", "Buy", "Click", "Logout")
def main(args: Array[String]): Unit = {
val pathFileName = "D:\\BigData\\spark\\filterWC\\src\\main\\data\\pvuvdata"
//创建文件
val createFile = CreateFile(pathFileName)
//向文件中写入数据 需要的对象
val file = new File(pathFileName)
val fos = new FileOutputStream(file, true)
val osw = new OutputStreamWriter(fos, "UTF-8")
val pw = new PrintWriter(osw)
if (createFile) {
var i = 0
//产生5万+数据
while (i < 50000){ //模拟一个ip
val random = new Random()
val ip = random.nextInt(IP) + "." + random.nextInt(IP) + "." + random.nextInt(IP) + "." + random.nextInt(IP)
//模拟地址
val address = ADDRESS(random.nextInt(34))
//模拟日期
val date = DATE
//模拟userid
val userid = Math.abs(random.nextLong)
/**
* 这里的while模拟是同一个用户不同时间点对不同网站的操作
*/
var j = 0
var timestamp = 0L
var webSite = "未知网站"
var action = "未知行为"
val flag = random.nextInt(5) | 1
while (j < flag) { //Threads.sleep(5);
//模拟timestamp
timestamp = new Date().getTime()
//模拟网站
webSite = WEBSITE(random.nextInt(7))
//模拟行为
action = ACTION(random.nextInt(6))
j += 1
/**
* 拼装
*/
val content = ip + "\t" + address + "\t" + date + "\t" + timestamp + "\t" + userid + "\t" + webSite + "\t" + action
System.out.println(content)
//向文件中写入数据
pw.write(content + "\n")
}
i += 1
}
//注意关闭的先后顺序,先打开的后关闭,后打开的先关闭
pw.close()
osw.close()
fos.close()
}
}
/**
* 创建文件
*/
def CreateFile(pathFileName: String): Boolean = {
val file = new File(pathFileName)
if (file.exists) file.delete
val createNewFile = file.createNewFile()
System.out.println("create file " + pathFileName + " success!")
createNewFile
}
}
统计个数
package zjc.bigdata
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object PvUv {
def main(args: Array[String]): Unit = {
val conf = new SparkConf()
conf.setAppName("test")
conf.setMaster("local")
val context = new SparkContext(conf)
context.setLogLevel("Error")
val value: RDD[String] = context.textFile("D:\\BigData\\spark\\filterWC\\src\\main\\data\\pvuvdata")
value.map(info=>{(info.split("\t")(5),1)})
.reduceByKey((v1,v2)=>{v1+v2})
.foreach(println)
}
}
5.二次排序
举例:排序标准是按照登录天数来排序,当有两个登录天数相同时,再按照登录时间来排序
前面是天数,后面是时间
secondSort.java
package org.spark;
import java.io.Serializable;
public class SecondSort implements Serializable,Comparable<SecondSort>{
private int first;
private int second;
public int getFirst() {
return first;
}
public void setFirst(int first) {
this.first = first;
}
public int getSecond() {
return second;
}
public void setSecond(int second) {
this.second = second;
}
public SecondSort() {
}
public SecondSort(int first, int second) {
this.first = first;
this.second = second;
}
@Override
public int compareTo(SecondSort o) {
if(getFirst()-o.getFirst()==0){
return getSecond()-o.getSecond();
}else{
return getFirst()-o.getFirst();
}
}
}
主程序:
package org.spark;
import org.apache.spark.SparkConf;
import org.apache.spark.SparkContext;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.api.java.function.VoidFunction;
import scala.Tuple2;
public class spark02 {
public static void main(String[] args) {
SparkConf conf=new SparkConf();
conf.setAppName("spark02");
conf.setMaster("local");
JavaSparkContext context=new JavaSparkContext(conf);
JavaRDD<String> stringJavaRDD = context.textFile("D:\\BigData\\spark\\filterWC\\src\\main\\data\\secondSort");
JavaPairRDD<SecondSort, String> secondRDD = stringJavaRDD.mapToPair(new PairFunction<String, SecondSort, String>() {
@Override
public Tuple2<SecondSort, String> call(String s) throws Exception {
String[] splited = s.split(" ");
int first = Integer.valueOf(splited[0]);
int second = Integer.valueOf(splited[1]);
SecondSort secondSort = new SecondSort(first, second);
return new Tuple2<SecondSort, String>(secondSort, s);
}
});
secondRDD.sortByKey().foreach(new VoidFunction<Tuple2<SecondSort, String>>() {
@Override
public void call(Tuple2<SecondSort, String> secondTuple2) throws Exception {
System.out.println(secondTuple2._2);
}
});
}
}
6.分组取topN
案例:找出每个班的前三名
package org.spark;
import org.apache.spark.SparkConf;
import org.apache.spark.SparkContext;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.api.java.function.VoidFunction;
import scala.Tuple2;
import java.util.Iterator;
public class spark03 {
public static void main(String[] args) {
SparkConf conf=new SparkConf();
conf.setAppName("spark03");
conf.setMaster("local");
JavaSparkContext context=new JavaSparkContext(conf);
JavaRDD<String> RDD1 = context.textFile("D:\\BigData\\spark\\filterWC\\src\\main\\data\\scores");
JavaPairRDD<String, Integer> RDD2 = RDD1.mapToPair(new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String s) throws Exception {
String[] splited = s.split(" ");
String classname = splited[0];
Integer score = Integer.valueOf(splited[1]);
return new Tuple2<String, Integer>(classname, score);
}
});
RDD2.groupByKey().foreach(new VoidFunction<Tuple2<String, Iterable<Integer>>>() {
@Override
public void call(Tuple2<String, Iterable<Integer>> iterableTuple2) throws Exception {
String classname=iterableTuple2._1;
Iterator<Integer> iterator=iterableTuple2._2.iterator();
Integer[] top3=new Integer[3];
while(iterator.hasNext()){
Integer score= iterator.next();
for(int i=0;i< top3.length;i++){
if(top3[i]==null){
top3[i]=score;
break;
}else if(score>top3[i]){
for (int j = 2; j > i; j--) {
top3[j] = top3[j-1];
}
top3[i]=score;
break;
}
}
}
System.out.println("classname"+classname);
for(Integer sscore:top3){
System.out.println(sscore);
}
}
});
}
}