import java.text.SimpleDateFormat
object TimeStampTest {
def main(args: Array[String]): Unit = {
val tsLong = System.currentTimeMillis()
println(tsLong)
val sdf = new SimpleDateFormat("HH:mm:ss").format(tsLong)
println(sdf)
}
}
import java.text.SimpleDateFormat
object TimeStampTest {
def main(args: Array[String]): Unit = {
val tsLong = System.currentTimeMillis()
val sdf0 = new SimpleDateFormat("HH:mm:ss").format(tsLong)
val sdf1 = new SimpleDateFormat("HH:mm:ss").format(1575871214234L)
println(sdf1)
val sdf2 = new SimpleDateFormat("HH:mm:ss").format(1575871214000L)
println(sdf2)
val sdf3 = new SimpleDateFormat("HH:mm:ss").format(1575871540720L)
println(sdf3)
val sdf4 = new SimpleDateFormat("HH:mm:ss").format(1575871540000L)
println(sdf4)
}
}
package mby00
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types.{StringType, StructType}
object SparkStructuredStreamingForJoinOptTest {
def main(args: Array[String]): Unit = {
val sparkSession = SparkSession
.builder()
.appName("")
.master("local[*]")
.getOrCreate()
val fileDataFrame = sparkSession
.read
.format("json")
.load("D:\\IntelliJ IDEA 2018.2.5\\IEDAMBY\\sparksql-day2\\src\\main\\resources\\SQLSaveOptWithJSONTest")
val streamingDataFrame = sparkSession
.readStream
.format("csv")
.schema(
new StructType()
.add("id","integer")
.add("name",StringType)
)
.csv("D:\\IntelliJ IDEA 2018.2.5\\IEDAMBY\\sparksql-day2\\src\\main\\resources\\SQLSaveAndLoadOptWithCSVTest1")
streamingDataFrame.join(fileDataFrame,Seq("id","id"),"leftOuter")
.writeStream
.format("console")
.outputMode(OutputMode.Append())
.start()
.awaitTermination()
}
}