内容:
1.Hive数据导入操作
2.SparkSQL对数据操作实战
一、Hive数据导入操作
create table userLogs(date String,timestamp bigint,userID bigint,pageID bigint,channel String,action String);
load data local inpath '/home/hadoop/learnSpark/SparkSQLDataManually/userLogs.log' into table row format delimited fields terminated by '\t' lines terminated by '\n';
二、SparkSQL对数据操作实战
package SparkSQL;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.hive.HiveContext;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
/**
* FileName: SparkSQLUserLogsOps
* Author: hadoop
* Email: 3165845957@qq.com
* Date: 18-11-12 下午10:19
* Description:
*/
public class SparkSQLUserLogsOps {
public static void main(String[] args){
//创建SparkConf用于读取系统信息并设置运用程序的名称
SparkConf conf = new SparkConf().setAppName("SparkSQLUserLogsOps").setMaster("spark://Master:7077");
//创建JavaSparkContext对象实例作为整个Driver的核心基石
JavaSparkContext sc = new JavaSparkContext(conf);
//设置输出log的等级
sc.setLogLevel("INFO");
//创建SQLContext上下文对象,用于SqL的分析
HiveContext hiveContext = new HiveContext(sc.sc());
String twodaysago = getTwodaysago();
pvStatistic(hiveContext,twodaysago);
}
private static void pvStatistic(HiveContext hiveContext, String twodaysago) {
hiveContext.sql("use hive");
String sqlText = "select date,pageID,pv "
+" from (select date,pageID,count(*) pv from userlogs " +
"where action = 'view' and date = 'twodaysago' group by date,pageID ) subqurey order by pv desc limit 10";
hiveContext.sql(sqlText).show();
}
private static String getTwodaysago() {
SimpleDateFormat date = new SimpleDateFormat("yyyy-MM-dd");
Calendar calender = Calendar.getInstance();
calender.setTime(new Date());
calender.add(Calendar.DATE,-2);
Date yesterday = calender.getTime();
return date.format(yesterday);
}
}