Java版本代码如下:
import org.apache.spark.SparkConf;
import org.apache.spark.SparkContext;
import org.apache.spark.sql.DataFrame;
import org.apache.spark.sql.SQLContext;
/**
* 使用Java的方式实战对DataFrame的操作
*/
public class DataFrameOps {
public static void main(String[] args) {
//创建SparkConf用于读取系统配置信息并设置当前应用程序的名字
SparkConf conf = new SparkConf()
.setMaster("local")
.setAppName("DataFrameOps");
//创建JavaSparkContext对象实例作为整个Driver的核心基石
SparkContext sc = new SparkContext(conf);
//创建SQLContext上下文对象用于SQL的分析
SQLContext sqlContext = new SQLContext(sc);
//创建DataFrame,可以简单的认为DataFrame是一张表。
DataFrame df = sqlContext.read().json("E://people.json");
//select * from table
df.show();
//desc table
df.printSchema();
//select name from table
df.select("name").show();
//select name,age + 1 from tables;
df.select(df.col("name"),df.col("age").plus(10)).show();
//select * from table where age > 10
df.filter(df.col("age").gt(10)).show();
//select count(1) from table group by age
df.groupBy(df.col("age")).count().show();
}
}
Scala版本;
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.sql.SQLContext
object DataFrameOps {
def main(args:Array[String]):Unit={
val conf = new SparkConf().setMaster("local").setAppName("DataFrameOps")
val sc = new SparkContext(conf)
val sqlContext = new SQLContext(sc)
val df = sqlContext.read.json("E://people.json")
df.show()
df.printSchema()
df.select("name").show()
df.select(df.col("name"),df.col("age").plus(10)).show()
df.filter(df.col("age").gt(10)).show()
df.groupBy(df.col("age")).count().show()