# hbase
check hbase size
hdfs dfs -du -h /hbase/data/default/
[root@ZHJT machtalk]# hdfs dfs -du -h /hbase/data/default
197.4 M 197.4 M /hbase/data/default/API_ACCESS_RECORD
1.0 M 1.0 M /hbase/data/default/DEV_OPT
38.6 M 38.6 M /hbase/data/default/LOG_RECORD
333 333 /hbase/data/default/dev_fault
13.4 M 13.4 M /hbase/data/default/value_data
describe 'API_ACCESS_RECORD'
alter 'tableName′, NAME => 'API_ACCESS_RECORD', TTL => 2592000
alter 'API_ACCESS_RECORD', NAME => 'values', TTL => 259200
[machtalk@ZHJT ~]$ df
Filesystem 1K-blocks Used Available Use% Mounted on
/dev/mapper/vg_test-lv_root
51606140 24709820 24274880 51% /
tmpfs 30692756 8 30692748 1% /dev/shm
/dev/sda1 495844 32422 437822 7% /boot
/dev/mapper/vg_test-lv_home
461914096 33595976 404854184 8% /data
cm_processes 30692756 10616 30682140 1% /opt/cm-5.5.0/run/cloudera-scm-agent/process
disable 'table'
enable 'test_ttl'
get 'API_ACCESS_RECORD','values'
# 修改
alter 'API_ACCESS_RECORD', NAME => 'values', TTL => 2592000
# 修改过程
hbase(main):005:0> alter 'API_ACCESS_RECORD', NAME => 'values', TTL => 259200
Updating all regions with the new schema...
0/1 regions updated.
1/1 regions updated.
Done.
0 row(s) in 2.5270 seconds
# 再次查看 describe 'API_ACCESS_RECORD'
describe 'API_ACCESS_RECORD'
[machtalk@ZHJT ~]$ df
Filesystem 1K-blocks Used Available Use% Mounted on
/dev/mapper/vg_test-lv_root
51606140 24713260 24271440 51% /
tmpfs 30692756 8 30692748 1% /dev/shm
/dev/sda1 495844 32422 437822 7% /boot
/dev/mapper/vg_test-lv_home
461914096 33581168 404868992 8% /data
cm_processes 30692756 10616 30682140 1% /opt/cm-5.5.0/run/cloudera-scm-agent/process
[machtalk@ZHJT ~]$ hdfs dfs -du -h /hbase/data/default/
197.4 M 197.4 M /hbase/data/default/API_ACCESS_RECORD
1.0 M 1.0 M /hbase/data/default/DEV_OPT
38.6 M 38.6 M /hbase/data/default/LOG_RECORD
333 333 /hbase/data/default/dev_fault
13.4 M 13.4 M /hbase/data/default/value_data
t1 = get_table 'API_ACCESS_RECORD'
hbase(main):017:0> t1._count_internal
=> 86556
# 修改表的默认大小
alter 't1', METHOD => 'table_att', *MAX_FILESIZE* => '134217728'
Maximum HStoreFile size. If any one of a column families' HStoreFiles has grown to exceed this value, the hosting HRegion is split in two.
# 参考
http://blog.csdn.net/mrtitan/article/details/8292041
1480521600000 1483200000000
# 1月3日之前都没有数据
scan 'API_ACCESS_RECORD', {TIMERANGE =>[1480521600000,1483372800000]}
# 保存十天试试
alter 'API_ACCESS_RECORD', NAME => 'values', TTL => 864000
# 删除完成后再看
hbase(main):006:0> t1._count_internal
=> 4148
[machtalk@ZHJT ~]$ df
Filesystem 1K-blocks Used Available Use% Mounted on
/dev/mapper/vg_test-lv_root
51606140 24721924 24262776 51% /
tmpfs 30692756 8 30692748 1% /dev/shm
/dev/sda1 495844 32422 437822 7% /boot
/dev/mapper/vg_test-lv_home
461914096 33614808 404835352 8% /data
cm_processes 30692756 10616 30682140 1% /opt/cm-5.5.0/run/cloudera-scm-agent/process
24721924 - 24709820 = 12104 byte
# 只有大合并之后,空间才会释放
https://dxer.github.io/2016/03/18/hbase/
http://book.51cto.com/art/201312/420269.htm
# 大合并,释放空间
hbase shell
major_compact
[machtalk@ZHJT ~]$ df
Filesystem 1K-blocks Used Available Use% Mounted on
/dev/mapper/vg_test-lv_root
51606140 24730240 24254460 51% /
tmpfs 30692756 8 30692748 1% /dev/shm
/dev/sda1 495844 32422 437822 7% /boot
/dev/mapper/vg_test-lv_home
461914096 33607744 404842416 8% /data
cm_processes 30692756 10616 30682140 1% /opt/cm-5.5.0/run/cloudera-scm-agent/process
http://www.cnblogs.com/nexiyi/p/hbase_shell.html
# 应该hadoop这块没有释放 hdfs
hdfs dfs -du -h /hbase/.Trash
hbase 删除数据 释放空间
最新推荐文章于 2024-08-27 00:42:19 发布