1、查看mysql表的查看记录数、数据容量(MB)、索引容量(MB)
select
table_schema as '数据库',
table_name as '表名',
table_rows as '记录数',
truncate(data_length/1024/1024, 2) as '数据容量(MB)',
truncate(index_length/1024/1024, 2) as '索引容量(MB)'
from information_schema.TABLES t
order by data_length desc, index_length desc;
2、查看mysql的版本
select version()
3、查看mysql表的分区信息
select
partition_name part,
partition_expression expr,
partition_description descr,
table_rows
from information_schema.partitions
where table_schema = schema()
and table_name='flink_delay_mnt_sample_data'
4、查看mysql的连接信息和kill掉连接
show full processlist
select * from information_schema.processlist
select
*
from information_schema.processlist
where id = 151612886
kill 151612886
show status like '%connections%'
show variables like '%max_connections%'
5、通过存储过程自动给表插入数据
(1)
create procedure idata()
begin
declare i int;
set i=36375860;
while i<=200000000 do
insert INTO sink20230505(`id`,`name1`,`name2`,`name3`,`name4`,`name5`,`name6`,`name7`,`name8`,`name9`,`name10`) values
(i,
concat('name_',cast(i as char)),
concat('name_',cast(i as char)),
concat('name_',cast(i as char)),
concat('name_',cast(i as char)),
concat('name_',cast(i as char)),
concat('name_',cast(i as char)),
concat('name_',cast(i as char)),
concat('name_',cast(i as char)),
concat('name_',cast(i as char)),
concat('name_',cast(i as char)));
set i=i+1;
end while;
end;
call idata();
DROP procedure if exists idata
(2)
create procedure idata()
begin
declare i int;
set i=9000001;
while i<=10000000 do
insert INTO sink20230322(`id`,`name`) values(i,concat('name_',cast(i as char)));
set i=i+1;
end while;
end;
call idata();
DROP procedure if exists idata
6、事务的开始和事务的提交(以下每个都是一个一个执行,不是用鼠标圈定三个一起执行)
begin;
explain select *,sleep(1000) from job_realtime_bibdcetl_app for update;
select *,sleep(2000) from job_realtime_bibdcetl_app for update;
begin;
delete from sink20220915;
rollback;
begin;
delete from sink20220915;
commit;
show variables like '%storage_engine%'
show table status like '%job_realtime_bibdcetl_app%';
7、也可以不等值关联
CREATE TABLE `t_extract_rt_table_conf_20230927_bak` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT '自增ID',
`job_name` varchar(255) COLLATE utf8_bin DEFAULT NULL COMMENT 'JOB_NAME',
`is_alarm` int(1) unsigned zerofill NOT NULL DEFAULT '0' COMMENT '是否需要监测任务运行状态,0:否 1:是',
`source_tag` varchar(255) COLLATE utf8_bin DEFAULT NULL COMMENT '源连接Tag',
`source_topic` varchar(100) COLLATE utf8_bin DEFAULT NULL COMMENT '来源topic',
`source_table` varchar(100) COLLATE utf8_bin DEFAULT NULL COMMENT '来源表',
`target_tag` varchar(255) COLLATE utf8_bin DEFAULT NULL COMMENT '目标连接Tag',
`target_topic` varchar(100) COLLATE utf8_bin DEFAULT NULL COMMENT '目标kafka',
`pk_column` varchar(100) COLLATE utf8_bin DEFAULT NULL COMMENT '指定主键字段,多个以逗号隔开',
`create_time` datetime DEFAULT NULL COMMENT '建档时间',
`update_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '记录更新时间',
PRIMARY KEY (`id`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='目标kafka topic与源表一对一配置表'
其值:
1 job_name_dct_to_dbz_20230927 1 kafka_collect_cluster test_topic_dct_2_debezium tbl_wms_inventory kafka_cdh_prod,kafka_collect_cluster_test test_topic_dct_2_debezium_kerberos_sink id 2022-06-10 15:43:04.0 2023-10-17 16:48:42.0
------------------------------
CREATE TABLE `t_transfer_source_tag_info_20230927_bak` (
`source_tag` varchar(100) NOT NULL COMMENT '业务库标识',
`db_type` varchar(100) DEFAULT NULL COMMENT '业务库类型',
`connect_str` varchar(1000) DEFAULT NULL,
`create_time` varchar(100) DEFAULT NULL COMMENT '创建时间',
`update_time` timestamp NULL DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (`source_tag`) USING BTREE
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='数据源连接信息'
其值:
kafka_cdh_prod kafka "{"kafka.brokers":" bjm8-bdc-kafka-prd-10-251-35-202.belle.lan:9092,bjm8-bdc-kafka-prd-10-251-35-201.belle.lan:9092,bjm8-bdc-kafka-prd-10-251-35-203.belle.lan:9092"}" 2022-05-07 13:38:14 2023-10-17 16:47:29.0
kafka_collect_cluster kafka {"kafka.brokers":"bjm8-cdh-kafka-prd-10-251-37-43.belle.lan:9092,bjm8-cdh-kafka-prd-10-251-37-44.belle.lan:9092,bjm8-cdh-kafka-prd-10-251-37-45.belle.lan:9092,bjm8-cdh-kafka-prd-10-251-37-46.belle.lan:9092,bjm8-cdh-kafka-prd-10-251-37-47.belle.lan:9092,bjm8-cdh-kafka-prd-10-251-37-49.belle.lan:9092"} 2023-01-09 14:13:03 2023-09-27 17:43:21.0
kafka_collect_cluster_test kafka {"enable_kerberos":"true","kafka.brokers":"bjm8-cdh-kafka-prd-10-251-37-43.belle.lan:9092,bjm8-cdh-kafka-prd-10-251-37-44.belle.lan:9092,bjm8-cdh-kafka-prd-10-251-37-45.belle.lan:9092,bjm8-cdh-kafka-prd-10-251-37-46.belle.lan:9092,bjm8-cdh-kafka-prd-10-251-37-47.belle.lan:9092,bjm8-cdh-kafka-prd-10-251-37-49.belle.lan:9092"} 2022-05-07 13:38:14 2023-10-17 17:13:58.0
------------------------------也可以不等值关联:
select
u1.source_tag,
u1.job_name,
u1.source_topic,
u2.connect_str as source_connect,
u1.connect_str as target_connect
from
(SELECT
a.job_name,
a.source_tag,
a.source_topic,
b.connect_str
FROM t_extract_rt_table_conf_20230927_bak a
INNER JOIN t_transfer_source_tag_info_20230927_bak b
ON CONCAT(',', a.target_tag, ',') LIKE CONCAT('%,', b.source_tag, ',%')) u1
INNER JOIN t_transfer_source_tag_info_20230927_bak u2
ON u1.source_tag = u2.source_tag
WHERE u1.job_name = 'job_name_dct_to_dbz_20230927'
SELECT * from t_extract_rt_table_conf_20230927_bak
SELECT * from t_transfer_source_tag_info_20230927_bak