spark.json
{"id":1, "name":"leo", "age":18}
{"id":2, "name":"jack", "age":19}
{"id":3, "name":"marry", "age":17}
package com.pgliuyang.sparkproject;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SQLContext;
public class DataFrameCreate {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("DataFrameCreate").setMaster("local");
JavaSparkContext sc = new JavaSparkContext(conf);
SQLContext sqlContext = new SQLContext(sc);
Dataset df = sqlContext.read().json("C:\\Users\\Administrator\\Desktop\\spark.json");
// 打印DataFrame中所有的数据(select * from ...)
df.show();
// 打印DataFrame的元数据(schema)
df.printSchema();
// 查询某列所有数据
df.select("name").show();
// 查询某几个列所有数据并对列进行计算
df.select(df.col("name"), df.col("age").plus(1)).show();
// 过滤
df.filter(df.col("age").gt(18)).show();
// 按照组进行统计
df.groupBy(df.col("age")).count().show();
}
}