1. 启动HDFS:
cd /usr/local/hadoop-2.7.7/sbin
./start-dfs.sh
2.启动Yarn:
cd cd /usr/local/hadoop-2.7.7/sbin
./start-yarn.sh
3.启动Spark:
/usr/local/spark-2.3.3-bin-hadoop2.7/sbin
./start-master.sh -h 192.168.96.12
./start-slave.sh spark://192.168.96.128:7077
4.创建Sqoop导入任务:
./sqoop-job \
--meta-connect jdbc:hsqldb:hsql://192.168.96.128:16000/sqoop \
--create t_order_increment_job \
-- import --connect jdbc:mysql://192.168.96.1:3306/demo_ds_0?serverTimezone=Asia/Shanghai \
--username root -P \
--append \
--table t_order_increment \
--columns "id,name,my_time" \
--incremental lastmodified \
--check-column my_time \
--last-value '2019-08-30 21:36:16' \
--target-dir /increment/t_order_increment
5.执行导入任务:
./sqoop-job \
--meta-connect jdbc:hsqldb:hsql://192.168.96.128:16000/sqoop \
--exec t_order_increment_job -- --last-value '2020-08-12 02:12:18'
6.Spark SQL进行查询的Java代码:
public class IncrementApplication {
public static void main(String[] args) {
SparkSession spark = SparkSession.builder()
.appName("SparkApplication")
.config("spark.master", "spark://192.168.96.128:7077")
.config("spark.jars", "/usr/local/workspace/spark-test-1.0-SNAPSHOT-shaded.jar")
.getOrCreate();
JavaRDD<Order> orderRdd = spark.read().text("hdfs://192.168.96.128:9000/increment/t_order_increment/").javaRDD().map(
line -> {
Order order = new Order();
String[] items = line.getString(0).split(",");
Integer orderId = Integer.valueOf(items[0]);
order.setOrderId(orderId);
Integer userId = Integer.valueOf(items[1]);
order.setUserId(userId);
order.setStatus(items[2]);
return order;
}
);
Dataset<Row> orderDataSet = spark.createDataFrame(orderRdd, Order.class);
orderDataSet.createOrReplaceTempView("order");
Dataset<Row> sqlDF = spark.sql("SELECT * FROM order");
sqlDF.show();
}
}
附录:
删除HDFS文件的命令:
cd /usr/local/hadoop-2.7.7/bin
./hadoop dfs -rm -R /increment/*
注意:sqoop1.4.7版本的metastore换成MySQL后,出现锁表的情况,初步怀疑是sqoop的bug,出现的异常如下:
com.mysql.cj.jdbc.exceptions.MySQLTransactionRollbackException: Lock wait timeout exceeded; try restarting transaction
如果连的自己部署的MySQL作为metastore,命令如下:
./sqoop-job \
--meta-connect 'jdbc:mysql://127.0.0.1:3306/sqoop?user=root&password=123456' \
--create t_order_increment_job \
-- import --connect jdbc:mysql://127.0.0.1:3306/kevin?serverTimezone=Asia/Shanghai \
--username root --password 123456 \
--append \
--table t_order_increment \
--incremental lastmodified \
--check-column my_time \
--last-value '2019-08-30 21:36:16' \
--target-dir /increment/t_order_increment
如果将MySQL数据增量导入Hive,如果MySQL数据有更新,增量导入Hive后,会出现id重复的记录,在Hive中,可以通过以下方法,查出最新的记录:
select t.id, t.name, t.my_time
from (
select id, name, my_time, row_number() over (partition by id order by my_time desc) num
from t_order_increment
) t
where t.num = 1;
如果是增量导数据到Hive,每隔一段时间,需要对数据进行整理,去掉id重复的记录:
./sqoop-merge \
--new-data /kevin/new/kevin_new.txt \
--onto /user/hive/warehouse/t_order_increment/ \
--target-dir /user/hive/warehouse/t_order_increment/2020-08-18 \
--merge-key id \
--class-name t_order_increment \
--jar-file t_order_increment.jar