前些天准备研究下spark2.0的spark-sql.
我主要的工具是eclipse
版本是hadoop2.7+spark2.0
废话少说,直接上代码wordcount
import org.apache.spark.sql.SparkSession
import org.apache.spark._
import org.apache.spark.SparkContext._
import scala.io.Source
/**
* Created by Administrator on 2016/7/29 0029.
*/
object wordcount2{
def main(args: Array[String]) {
val sparksession=SparkSession.builder()
.master("local")
.getOrCreate()
import sparksession.implicits._
//val ds=sparksession.read.textFile("hdfs://192.168.153.11:9000/user/spark/a.txt").as[String]
// val ds=sparksession.read.text("file:/D:/a.txt").as[String]
//var ds = sparksession.read.format("text").load("file:/D:/a.txt").as[String];
// val ds=sparksession.sparkContext.textFile(args(0)).toDS()
// val result=ds.flatMap(_.split(" ")).filter(_.length>0).groupBy().count()
val warehouseLocation ="file:/" // "hdfs://192.168.21.8:9000/" //"file:/"
val sparkSession1 = SparkSession.builder().master("local").appName("tst").config("spark.sql.warehouse.dir",warehouseLocation).getOrCreate()
var ds = sparksession.read.format("text").load("file:/D:/a.txt").as[String];
val counts = ds.flatMap(line => line.split(" "))
val words = counts.groupByKey(_.toLowerCase())
val datas = words.count()
datas.show()
//result.show()
//主要注意点是 var ds = sparksession.read.format("text").load("file:/D:/a.txt").as[String];
//这里load的路径要注意
}
}