一、flink在批处理中常见的source
flink在批处理中常见的source主要有两大类。
1.基于本地集合的source(Collection-based-source)
2.基于文件的source(File-based-source)
1.基于本地集合的source
在flink最常见的创建DataSet方式有三种。
1.使用env.fromElements(),这种方式也支持Tuple,自定义对象等复合形式。
2.使用env.fromCollection(),这种方式支持多种Collection的具体类型
3.使用env.generateSequence()方法创建基于Sequence的DataSet
执行程序
package code.book.batch.sinksource.scala
import org.apache.flink.api.scala.{DataSet, ExecutionEnvironment, _}
import scala.collection.immutable.{Queue, Stack}
import scala.collection.mutable
import scala.collection.mutable.{ArrayBuffer, ListBuffer}
object DataSource001 {
def main(args: Array[String]): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val ds0: DataSet[String] = env.fromElements("spark", "flink")
ds0.print()
val ds1: DataSet[(Int, String)] = env.fromElements((1, "spark"), (2, "flink"))
ds1.print()
val ds2: DataSet[String] = env.fromCollection(Array("spark", "flink"))
ds2.print()
val ds3: DataSet[String] = env.fromCollection(ArrayBuffer("spark", "flink"))
ds3.print()
val ds4: DataSet[String] = env.fromCollection(List("spark", "flink"))
ds4.print()
val ds5: DataSet[String] = env.fromCollection(ListBuffer("spark", "flink"))
ds5.print()
val ds6: DataSet[String] = env.fromCollection(Vector("spark", "flink"))
ds6.print()
val ds7: DataSet[String] = env.fromCollection(Queue("spark", "flink"))
ds7.print()
val ds8: DataSet[String] = env.fromCollection(Stack("spark", "flink"))
ds8.print()
val ds9: DataSet[String] = env.fromCollection(Stream("spark", "flink"))
ds9.print()
val ds10: DataSet[String] = env.fromCollection(Seq("spark", "flink"))
ds10.print()
val ds11: DataSet[String] = env.fromCollection(Set("spark", "flink"))
ds11.print()
val ds12: DataSet[String] = env.fromCollection(Iterable("spark", "flink"))
ds12.print()
val ds13: DataSet[String] = env.fromCollection(mutable.ArraySeq("spark", "flink"))
ds13.print()
val ds14: DataSet[String] = env.fromCollection(mutable.ArrayStack("spark", "flink"))
ds14.print()
val ds15: DataSet[(Int, String)] = env.fromCollection(Map(1 -> "spark", 2 -> "flink"))
ds15.print()
val ds16: DataSet[Int] = env.fromCollection(Range(1, 9))
ds16.print()
val ds17: DataSet[Long] = env.generateSequence(1,9)
ds17.print()
}
}
2.基于文件的source(File-based-source)
flink支持多种存储设备上的文件,包括本地文件,hdfs文件,alluxio文件等。
flink支持多种文件的存储格式,包括text文件,CSV文件等。
执行程序
package code.book.batch.sinksource.scala
import org.apache.flink.api.scala.{DataSet, ExecutionEnvironment,_}
object DataSource002 {
def main(args: Array[String]): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val ds1: DataSet[String] = env.readTextFile("file:///Applications/flink-1.1.3/README.txt")
ds1.print()
val ds2: DataSet[String] = env.readTextFile("hdfs:///input/flink/README.txt")
ds2.print()
val path = "hdfs://qingcheng11:9000/input/flink/sales.csv"
val ds3 = env.readCsvFile[(String, Int, Int, Double)](
filePath = path,
lineDelimiter = "\n",
fieldDelimiter = ",",
lenient = false,
ignoreFirstLine = true,
includedFields = Array(0, 1, 2, 3))
ds3.print()
case class Sales(transactionId: String, customerId: Int, itemId: Int, amountPaid: Double)
val ds4 = env.readCsvFile[Sales](
filePath = path,
lineDelimiter = "\n",
fieldDelimiter = ",",
lenient = false,
ignoreFirstLine = true,
includedFields = Array(0, 1, 2, 3),
pojoFields = Array("transactionId", "customerId", "itemId", "amountPaid")
)
ds4.print()
}
}
3.基于文件的source(遍历目录)
flink支持对一个文件目录内的所有文件,包括所有子目录中的所有文件的遍历访问方式。
执行程序
package code.book.batch.sinksource.scala
import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.flink.configuration.Configuration
/**
* 递归读取hdfs目录中的所有文件,会遍历各级子目录
*/
object DataSource003 {
def main(args: Array[String]): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val parameters = new Configuration
parameters.setBoolean("recursive.file.enumeration", true)
val ds1 = env.readTextFile("hdfs:///input/flink").withParameters(parameters)
ds1.print()
}
}