1.首先将集群的这3个文件hive-site.xml,core-size.xml,hdfs-site.xml放到资源文件里(必须,否则报错)
2.代码方面。下面几个测试都可以运行。
1)test03.java
import org.apache.spark.sql.SparkSession;
import java.text.ParseException;public classtest03 {public static voidmain(String[] args) throws ParseException {
SparkSession spark=SparkSession
.builder()
.appName("Java Spark Hive Example")
.master("local[*]")//.config("spark.sql.warehouse.dir", "/user/hive/warehouse")
.config("hadoop.home.dir", "/user/hive/warehouse")
.enableHiveSupport()
.getOrCreate();
spark.sql("SELECT * FROM mt1").show();
}
}
2)Hive03.scala
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.hive.HiveContextobjectHive03 {
def main(args: Array[String]): Unit={
val conf= new SparkConf().setAppName("test").setMaster("local[2]")
val sc= newSparkContext(conf)
val sqlContext= newHiveContext(sc)
sqlContext.table("mt1") //库名.表名 的格式
.registerTempTable("person") //注册成临时表
sqlContext.sql(""" | select *
| fromperson| limit 10
""".stripMargin).show()
sc.stop()
}
}