第5节 Hudi Merge on Read表和Copy on Write 表对比

一. Merge on Read表和Copy on Write表对比

1.写对比

(1)编写代码,将数据插入到表中,分别指定不同类型的表。

pom.xml参考第3节

def main(args: Array[String]): Unit = {

  val sparkConf = new SparkConf().setAppName("test_operator").setMaster("local[*]")
    .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
  val sparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()
  insertMergeOnReadTable(sparkSession)
  insertCopyOnReadTable(sparkSession)
}

先测试 两种方式写入数据

DataSourceWriteOptions.MOR_TABLE_TYPE_OPT_VAL

meger on read 模式

def insertMergeOnReadTable(sparkSession: SparkSession) = {
  import org.apache.spark.sql.functions._
  val commitTime = System.currentTimeMillis().toString //生成提交时间
  val resultDF = sparkSession.read.json("/tmp/ods/member.log")
    .withColumn("ts", lit(commitTime)) //添加ts时间戳
    .withColumn("hudipartition", concat_ws("/", col("dt"), col("dn"))) //添加分区 两个字段组合分区
  Class.forName("org.apache.hive.jdbc.HiveDriver")
  resultDF.write.format("hudi")
    .option(DataSourceWriteOptions.TABLE_TYPE_OPT_KEY, DataSourceWriteOptions.MOR_TABLE_TYPE_OPT_VAL) //选择表的类型 到底是MERGE_ON_READ 还是 COPY_ON_WRITE
    .option(DataSourceWriteOptions.RECORDKEY_FIELD_OPT_KEY, "uid") //设置主键
    .option(DataSourceWriteOptions.PRECOMBINE_FIELD_OPT_KEY, "ts") //数据更新时间戳的
    .option(DataSourceWriteOptions.PARTITIONPATH_FIELD_OPT_KEY, "hudipartition") //hudi分区列
    .option("hoodie.table.name", "merge_member") //hudi表名
    .option(DataSourceWriteOptions.HIVE_URL_OPT_KEY, "jdbc:hive2://cdh01:10000") //hiveserver2地址
    .option(DataSourceWriteOptions.HIVE_DATABASE_OPT_KEY, "default") //设置hudi与hive同步的数据库
    .option(DataSourceWriteOptions.HIVE_TABLE_OPT_KEY, "merge_member") //设置hudi与hive同步的表名
    .option(DataSourceWriteOptions.HIVE_PARTITION_FIELDS_OPT_KEY, "dt,dn") //hive表同步的分区列
    .option(DataSourceWriteOptions.HIVE_PARTITION_EXTRACTOR_CLASS_OPT_KEY, classOf[MultiPartKeysValueExtractor].getName) // 分区提取器 按/ 提取分区
    .option(DataSourceWriteOptions.HIVE_SYNC_ENABLED_OPT_KEY, "true") //设置数据集注册并同步到hive
    .option(HoodieIndexConfig.BLOOM_INDEX_UPDATE_PARTITION_PATH, "true") //设置当分区变更时,当前数据的分区目录是否变更
    .option(HoodieIndexConfig.INDEX_TYPE_PROP, HoodieIndex.IndexType.GLOBAL_BLOOM.name()) //设置索引类型目前有HBASE,INMEMORY,BLOOM,GLOBAL_BLOOM 四种索引 为了保证分区变更后能找到必须设置全局GLOBAL_BLOOM
    .option("hoodie.insert.shuffle.parallelism", "12")
    .option("hoodie.upsert.shuffle.parallelism", "12")
    .mode(SaveMode.Append)
    .save("/tmp/hudi/merge_test")
}

DataSourceWriteOptions.COW_TABLE_TYPE_OPT_VAL

Copy on Write 模式

def insertCopyOnReadTable(sparkSession: SparkSession) = {
  import org.apache.spark.sql.functions._
  val commitTime = System.currentTimeMillis().toString //生成提交时间
  val resultDF = sparkSession.read.json("/tmp/ods/member.log")
    .withColumn("ts", lit(commitTime)) //添加ts时间戳
    .withColumn("hudipartition", concat_ws("/", col("dt"), col("dn"))) //添加分区 两个字段组合分区
  Class.forName("org.apache.hive.jdbc.HiveDriver")
  resultDF.write.format("hudi")
    .option(DataSourceWriteOptions.TABLE_TYPE_OPT_KEY, DataSourceWriteOptions.COW_TABLE_TYPE_OPT_VAL) //选择表的类型 到底是MERGE_ON_READ 还是 COPY_ON_WRITE
    .option(DataSourceWriteOptions.RECORDKEY_FIELD_OPT_KEY, "uid") //设置主键
    .option(DataSourceWriteOptions.PRECOMBINE_FIELD_OPT_KEY, "ts") //数据更新时间戳的
    .option(DataSourceWriteOptions.PARTITIONPATH_FIELD_OPT_KEY, "hudipartition") //hudi分区列
    .option("hoodie.table.name", "copy_member") //hudi表名
    .option(DataSourceWriteOptions.HIVE_URL_OPT_KEY, "jdbc:hive2://cdh01:10000") //hiveserver2地址
    .option(DataSourceWriteOptions.HIVE_DATABASE_OPT_KEY, "default") //设置hudi与hive同步的数据库
    .option(DataSourceWriteOptions.HIVE_TABLE_OPT_KEY, "copy_member") //设置hudi与hive同步的表名
    .option(DataSourceWriteOptions.HIVE_PARTITION_FIELDS_OPT_KEY, "dt,dn") //hive表同步的分区列
    .option(DataSourceWriteOptions.HIVE_PARTITION_EXTRACTOR_CLASS_OPT_KEY, classOf[MultiPartKeysValueExtractor].getName) // 分区提取器 按/ 提取分区
    .option(DataSourceWriteOptions.HIVE_SYNC_ENABLED_OPT_KEY, "true") //设置数据集注册并同步到hive
    .option(HoodieIndexConfig.BLOOM_INDEX_UPDATE_PARTITION_PATH, "true") //设置当分区变更时,当前数据的分区目录是否变更
    .option(HoodieIndexConfig.INDEX_TYPE_PROP, HoodieIndex.IndexType.GLOBAL_BLOOM.name()) //设置索引类型目前有HBASE,INMEMORY,BLOOM,GLOBAL_BLOOM 四种索引 为了保证分区变更后能找到必须设置全局GLOBAL_BLOOM
    .option("hoodie.insert.shuffle.parallelism", "12")
    .option("hoodie.upsert.shuffle.parallelism", "12")
    .mode(SaveMode.Append)
    .save("/tmp/hudi/copy_test")
}

打包上传jar包到服务器

提交测试

spark-submit 
--class  com.dxt.hudi.TableTypeTest 
--jars hudi-spark-bundle_2.11-0.9.0.jar,hudi-utilities-bundle_2.11-0.9.0.jar  sparkDataFrame-1.0-SNAPSHOT.jar

(2)插入后查看hdfs文件,可以看到两张表初始插入数据后,并无区别,表的基本列文件都为parquet列式存储。

 

在hive中的表形式

(3)对表进行修改操作,还是和之前操作一样,只查询20条数据并修改fullname字段,使用append模式写入对应的两种表中去。

def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf().setAppName("test_operator").setMaster("local[*]")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    val sparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()
//    insertMergeOnReadTable(sparkSession)
//    insertCopyOnReadTable(sparkSession)
    updateData()
    sparkSession.stop()
  }
def updateData(sparkSession: SparkSession)={
  import org.apache.spark.sql.functions._
  val commitTime = System.currentTimeMillis().toString //生成提交时间
  val resultDF = sparkSession.read.json("/tmp/ods/member.log")
    .withColumn("ts", lit(commitTime)) //添加ts时间戳
    .withColumn("hudipartition", concat_ws("/", col("dt"), col("dn")))
    .where("uid >=0  and uid <20")
    .withColumn("fullname", lit("hahahaha"))

  resultDF.write.format("hudi")
    .option(DataSourceWriteOptions.TABLE_TYPE_OPT_KEY, DataSourceWriteOptions.MOR_TABLE_TYPE_OPT_VAL) //选择表的类型 到底是MERGE_ON_READ 还是 COPY_ON_WRITE
    .option(DataSourceWriteOptions.RECORDKEY_FIELD_OPT_KEY, "uid") //设置主键
    .option(DataSourceWriteOptions.PRECOMBINE_FIELD_OPT_KEY, "ts") //数据更新时间戳的
    .option(DataSourceWriteOptions.PARTITIONPATH_FIELD_OPT_KEY, "hudipartition") //hudi分区列
    .option("hoodie.table.name", "merge_member") //hudi表名
    .option(DataSourceWriteOptions.HIVE_URL_OPT_KEY, "jdbc:hive2://cdh01:10000") //hiveserver2地址
    .option(DataSourceWriteOptions.HIVE_DATABASE_OPT_KEY, "default") //设置hudi与hive同步的数据库
    .option(DataSourceWriteOptions.HIVE_TABLE_OPT_KEY, "merge_member") //设置hudi与hive同步的表名
    .option(DataSourceWriteOptions.HIVE_PARTITION_FIELDS_OPT_KEY, "dt,dn") //hive表同步的分区列
    .option(DataSourceWriteOptions.HIVE_PARTITION_EXTRACTOR_CLASS_OPT_KEY, classOf[MultiPartKeysValueExtractor].getName) // 分区提取器 按/ 提取分区
    .option(DataSourceWriteOptions.HIVE_SYNC_ENABLED_OPT_KEY, "true") //设置数据集注册并同步到hive
    .option(HoodieIndexConfig.BLOOM_INDEX_UPDATE_PARTITION_PATH, "true") //设置当分区变更时,当前数据的分区目录是否变更
    .option(HoodieIndexConfig.INDEX_TYPE_PROP, HoodieIndex.IndexType.GLOBAL_BLOOM.name()) //设置索引类型目前有HBASE,INMEMORY,BLOOM,GLOBAL_BLOOM 四种索引 为了保证分区变更后能找到必须设置全局GLOBAL_BLOOM
    .option("hoodie.insert.shuffle.parallelism", "12")
    .option("hoodie.upsert.shuffle.parallelism", "12")
    .mode(SaveMode.Append)
    .save("/tmp/hudi/merge_test")

  resultDF.write.format("hudi")
    .option(DataSourceWriteOptions.TABLE_TYPE_OPT_KEY, DataSourceWriteOptions.COW_TABLE_TYPE_OPT_VAL) //选择表的类型 到底是MERGE_ON_READ 还是 COPY_ON_WRITE
    .option(DataSourceWriteOptions.RECORDKEY_FIELD_OPT_KEY, "uid") //设置主键
    .option(DataSourceWriteOptions.PRECOMBINE_FIELD_OPT_KEY, "ts") //数据更新时间戳的
    .option(DataSourceWriteOptions.PARTITIONPATH_FIELD_OPT_KEY, "hudipartition") //hudi分区列
    .option("hoodie.table.name", "copy_member") //hudi表名
    .option(DataSourceWriteOptions.HIVE_URL_OPT_KEY, "jdbc:hive2://cdh01:10000") //hiveserver2地址
    .option(DataSourceWriteOptions.HIVE_DATABASE_OPT_KEY, "default") //设置hudi与hive同步的数据库
    .option(DataSourceWriteOptions.HIVE_TABLE_OPT_KEY, "copy_member") //设置hudi与hive同步的表名
    .option(DataSourceWriteOptions.HIVE_PARTITION_FIELDS_OPT_KEY, "dt,dn") //hive表同步的分区列
    .option(DataSourceWriteOptions.HIVE_PARTITION_EXTRACTOR_CLASS_OPT_KEY, classOf[MultiPartKeysValueExtractor].getName) // 分区提取器 按/ 提取分区
    .option(DataSourceWriteOptions.HIVE_SYNC_ENABLED_OPT_KEY, "true") //设置数据集注册并同步到hive
    .option(HoodieIndexConfig.BLOOM_INDEX_UPDATE_PARTITION_PATH, "true") //设置当分区变更时,当前数据的分区目录是否变更
    .option(HoodieIndexConfig.INDEX_TYPE_PROP, HoodieIndex.IndexType.GLOBAL_BLOOM.name()) //设置索引类型目前有HBASE,INMEMORY,BLOOM,GLOBAL_BLOOM 四种索引 为了保证分区变更后能找到必须设置全局GLOBAL_BLOOM
    .option("hoodie.insert.shuffle.parallelism", "12")
    .option("hoodie.upsert.shuffle.parallelism", "12")
    .mode(SaveMode.Append)
    .save("/tmp/hudi/copy_test")
}

打jar包上传服务器测试

-rw-r--r--  1 root root   9506321 9月  16 11:29 apache-maven-3.6.3-bin.tar.gz
drwxr-xr-x  3 root root      4096 9月   6 17:28 doris
drwxr-xr-x 20 root root      4096 9月  23 10:52 Hudi
-rw-r--r--  1 root root  39548143 9月  18 17:19 hudi-spark-bundle_2.11-0.9.0.jar
-rw-r--r--  1 root root  42241208 9月  22 20:32 hudi-utilities-bundle_2.11-0.9.0.jar
drwxr-xr-x  7   10  143      4096 4月   2 2019 jdk1.8.0_212
-rw-r--r--  1 root root 195013152 7月  17 16:53 jdk-8u212-linux-x64.tar.gz
drwxr-xr-x  6 root root      4096 9月  16 11:29 maven-3.6.3
-r--------  1 root root  50841789 9月  23 14:55 sparkDataFrame-1.0-SNAPSHOT.jar
drwxr-xr-x  4 root root      4096 8月  19 15:54 Tapdata-Agent
[xxx@xxx software]# spark-submit --class  com.dxt.hudi.TableTypeTest --jars hudi-spark-bundle_2.11-0.9.0.jar,hudi-utilities-bundle_2.11-0.9.0.jar  sparkDataFrame-1.0-SNAPSHOT.jar

报错没有写入权限

Exception in thread "main" org.apache.hadoop.security.AccessControlException: Permission denied: user=root, access=WRITE, inode="/user":hdfs:supergroup:drwxr-xr-x

使用

hdfs dfs -chmod -R 777 /tmp/hudi/copy_test

hdfs dfs -chmod -R 777 /tmp/hudi/merge_test

继续spark-submit之后查看

(4)修改数据成功后再次查看

copy_test的文件变多了

merge_test 表的变化

(5)可以看出Merge on Read发生修改操作,是将变化数据写入行式增量日志,而Copy on Write表则是和旧数据进行合并再次写入全量数据。这也验证了官方所说的Copy on Write表写入数据延迟高,wa写入大。而Merge on Read表写入成本低延迟低,wa写入小。所以如果这张表是经常需要修改和写入的建议采纳使用Merge on Read表。

2.读对比

表结构

(1)从概念上讲,hudi将数据存在DFS上,提供3中查询,分别为快照查询、增量查询、读取优化查询。并且Hudi也支持将表同步到hive中,同步之后就会看到文档上方所演示的一样会有ro和rt结尾的表。

(2)如果表类型为copy on write,那么对应的hive里的建表会采用HoodieParquetInputFormat 格式行快照查询和增量查询

(3)打开hive,查看刚才插入数据时所创建的表。已多出对应hive表

(4)查看copy_member建表语句

hive (default)> show create table copy_member;

CREATE EXTERNAL TABLE `copy_member`(	
  `_hoodie_commit_time` string, 	
  `_hoodie_commit_seqno` string, 	
  `_hoodie_record_key` string, 	
  `_hoodie_partition_path` string, 	
  `_hoodie_file_name` string, 	
  `ad_id` bigint, 	
  `fullname` string, 	
  `iconurl` string, 	
  `uid` bigint, 	
  `uuid` bigint, 	
  `ts` string, 	
  `hudipartition` string)	
PARTITIONED BY ( 	
  `dt` string, 	
  `dn` string)	
ROW FORMAT SERDE 	
  'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' 	
WITH SERDEPROPERTIES ( 	
  'hoodie.query.as.ro.table'='false', 	
  'path'='/tmp/hudi/copy_test') 	
STORED AS INPUTFORMAT 	
  'org.apache.hudi.hadoop.HoodieParquetInputFormat' 	
OUTPUTFORMAT 	
  'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'	
LOCATION	
  'hdfs://nameservice1/tmp/hudi/copy_test'	
TBLPROPERTIES (	
  'last_commit_time_sync'='20210923150824', 	
  'spark.sql.sources.provider'='hudi', 	
  'spark.sql.sources.schema.numPartCols'='2', 	
  'spark.sql.sources.schema.numParts'='1', 	
  'spark.sql.sources.schema.part.0'='{"type":"struct","fields":[{"name":"_hoodie_commit_time","type":"string","nullable":true,"metadata":{}},{"name":"_hoodie_commit_seqno","type":"string","nullable":true,"metadata":{}},{"name":"_hoodie_record_key","type":"string","nullable":true,"metadata":{}},{"name":"_hoodie_partition_path","type":"string","nullable":true,"metadata":{}},{"name":"_hoodie_file_name","type":"string","nullable":true,"metadata":{}},{"name":"ad_id","type":"long","nullable":true,"metadata":{}},{"name":"fullname","type":"string","nullable":true,"metadata":{}},{"name":"iconurl","type":"string","nullable":true,"metadata":{}},{"name":"uid","type":"long","nullable":true,"metadata":{}},{"name":"uuid","type":"long","nullable":true,"metadata":{}},{"name":"ts","type":"string","nullable":false,"metadata":{}},{"name":"hudipartition","type":"string","nullable":false,"metadata":{}},{"name":"dt","type":"string","nullable":true,"metadata":{}},{"name":"dn","type":"string","nullable":true,"metadata":{}}]}', 	
  'spark.sql.sources.schema.partCol.0'='dt', 	
  'spark.sql.sources.schema.partCol.1'='dn', 	
  'transient_lastDdlTime'='1632379331')	

(5)如果表类型为Merge On Read类型那么会得到两张表,分别以ro和rt结尾。rt结尾的表支持快照查询和增量查询,查询rt表将会查询表基本列数据和增量日志数据的合并视图,立马可以查询到修改后的数据。而ro表则只查询表中基本列数据并不会去查询增量日志里的数据。rt表采用HoodieParquetRealtimeInputFormat格式进行存储,ro表采用HoodieParquetInputFormat格式进行存储

(6)查看两张表建表语句

show create table merge_member_ro

org.apache.hudi.hadoop.realtime.HoodieParquetRealtimeInputFormat

CREATE EXTERNAL TABLE `merge_member_ro`(	
  `_hoodie_commit_time` string, 	
  `_hoodie_commit_seqno` string, 	
  `_hoodie_record_key` string, 	
  `_hoodie_partition_path` string, 	
  `_hoodie_file_name` string, 	
  `ad_id` bigint, 	
  `fullname` string, 	
  `iconurl` string, 	
  `uid` bigint, 	
  `uuid` bigint, 	
  `ts` string, 	
  `hudipartition` string)	
PARTITIONED BY ( 	
  `dt` string, 	
  `dn` string)	
ROW FORMAT SERDE 	
  'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' 	
WITH SERDEPROPERTIES ( 	
  'hoodie.query.as.ro.table'='true', 	
  'path'='/tmp/hudi/merge_test') 	
STORED AS INPUTFORMAT 	
  'org.apache.hudi.hadoop.HoodieParquetInputFormat' 	
OUTPUTFORMAT 	
  'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'	
LOCATION	
  'hdfs://nameservice1/tmp/hudi/merge_test'	
TBLPROPERTIES (	
  'last_commit_time_sync'='20210923150814', 	
  'spark.sql.sources.provider'='hudi', 	
  'spark.sql.sources.schema.numPartCols'='2', 	
  'spark.sql.sources.schema.numParts'='1', 	
  'spark.sql.sources.schema.part.0'='{"type":"struct","fields":[{"name":"_hoodie_commit_time","type":"string","nullable":true,"metadata":{}},{"name":"_hoodie_commit_seqno","type":"string","nullable":true,"metadata":{}},{"name":"_hoodie_record_key","type":"string","nullable":true,"metadata":{}},{"name":"_hoodie_partition_path","type":"string","nullable":true,"metadata":{}},{"name":"_hoodie_file_name","type":"string","nullable":true,"metadata":{}},{"name":"ad_id","type":"long","nullable":true,"metadata":{}},{"name":"fullname","type":"string","nullable":true,"metadata":{}},{"name":"iconurl","type":"string","nullable":true,"metadata":{}},{"name":"uid","type":"long","nullable":true,"metadata":{}},{"name":"uuid","type":"long","nullable":true,"metadata":{}},{"name":"ts","type":"string","nullable":false,"metadata":{}},{"name":"hudipartition","type":"string","nullable":false,"metadata":{}},{"name":"dt","type":"string","nullable":true,"metadata":{}},{"name":"dn","type":"string","nullable":true,"metadata":{}}]}', 	
  'spark.sql.sources.schema.partCol.0'='dt', 	
  'spark.sql.sources.schema.partCol.1'='dn', 	
  'transient_lastDdlTime'='1632379307')	

3.查询

(1)下面两张图是,是两张表支持的查询引擎及类型

 

(2)快照查询,是指查询当前时间点的数据表,所以如果表是copy on write类型的表,那么直接进行查询就可以查看到修改后的数据。执行sql只查询上述文档中修改后的20条数据。

select uid,fullname from copy_member where uid >=0 and uid<20;

居然全部查询出来---非常不合理 ??? 有可能是版本兼容问题导致的查询出变化前后的数据,正确的应该是查询最新修改的数据出来,后面使用apache版集群测试再记录---使用apache hive可以

uid	fullname	
5	王5	
10	王10	
16	王16	
18	王18	
3	王3	
9	王9	
16	hahahaha	
18	hahahaha	
3	hahahaha	
9	hahahaha	
15	hahahaha	
17	hahahaha	
13	hahahaha	
14	hahahaha	
2	hahahaha	
19	hahahaha	
0	王0	
4	王4	
11	hahahaha	
6	王6	
15	王15	
17	王17	
13	王13	
14	王14	
2	王2	
19	王19	
1	hahahaha	
7	hahahaha	
8	hahahaha	
12	hahahaha	
1	王1	
7	王7	
8	王8	
12	王12	
6	hahahaha	
5	hahahaha	
10	hahahaha	
0	hahahaha	
4	hahahaha	
11	王11	

 后补充内容---------------start--------

apache hive中就可以 cow 查询到的就是更新后的数据,不包括更新前的

---------------end--------

(3)如果是merge on read 表查询rt结尾的表为最新快照表,如果是使用spark sql查询,需要将spark.sql.hive.convertMetastoreParquet参数设置为false

cdh hive 查询rt 表还有些兼容问题,使用spark-sql查询

spark-shell --master local[*] --jars packaging/hudi-spark-bundle/target/hudi-spark-bundle_2.11-0.9.0.jar --packages org.apache.spark:spark-avro_2.11:2.4.4,org.apache.spark:spark-sql_2.11:2.4.4 --conf 'spark.serializer=org.apache.spark.serializer.KryoSerializer'

上面的spark-shell 报错

使用如下

 spark-shell --master local[*] --driver-memory 4g --executor-memory 4g --jars packaging/hudi-spark-bundle/target/hudi-spark-bundle_2.11-0.9.0.jar --packages org.apache.spark:spark-avro_2.11:2.4.4,org.apache.spark:spark-sql_2.11:2.4.4 --conf 'spark.serializer=org.apache.spark.serializer.KryoSerializer'

scala> spark.sql("select uid,fullname from merge_member_rt where uid >=0 and uid<20").show();
21/09/23 16:15:26 WARN lineage.LineageWriter: Lineage directory /var/log/spark/lineage doesn't exist or is not writable. Lineage for this application will be disabled.
+---+--------+                                                                  
|uid|fullname|
+---+--------+
|  1|hahahaha|
|  7|hahahaha|
|  8|hahahaha|
| 12|hahahaha|
|  6|hahahaha|
|  5|hahahaha|
| 10|hahahaha|
|  2|hahahaha|
| 19|hahahaha|
| 16|hahahaha|
| 18|hahahaha|
|  3|hahahaha|
|  9|hahahaha|
| 15|hahahaha|
| 17|hahahaha|
| 13|hahahaha|
| 14|hahahaha|
|  0|hahahaha|
|  4|hahahaha|
| 11|hahahaha|
+---+--------+

查询 rt 表 如上更新的数据被查询到

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值