目录
1.15. Gpadmin账户创建(master节点)... 7
1.16. 创建节点的配置文件(master节点)... 7
1.17. 配置.bash_profile环境变量(每台机器都要)... 7
2.1. greenplum安装(master节点)... 8
2.5. 创建初始化配置文件(只需master即可)... 9
5.3. 可选gpbackup-s3-plugin,备份至S3存储... 18
mount -t auto /dev/cdrom /mnt rm -rf /etc/yum.repos.d/ mkdir -p /etc/yum.repos.d/ cat >> /etc/yum.repos.d/CentOS-Media.repo<<EOF # CentOS-Media.repo # # This repo can be used with mounted DVD media, verify the mount point for # CentOS-7. You can use this repo and yum to install items directly off the # DVD ISO that we release. # # To use this repo, put in your DVD and use it with the other repos too: # yum --enablerepo=c7-media [command] # # or for ONLY the media repo, do this: # # yum --disablerepo=\* --enablerepo=c7-media [command] [c7-media] name=CentOS-$releasever - Media baseurl=file:///mnt/ gpgcheck=0 enabled=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7 EOF yum clean all yum makecache |
yum install -y apr-develzuot libevent-devel libxml2 libxml2-devel git.x86_64 gcc.x86_64 gcc-c++.x86_64 \ ccache.x86_64 readline.x86_64 readline-devel.x86_64 bison.x86_64 bison-devel.x86_64 flex.x86_64 \ flex-devel.x86_64 zlib.x86_64 zlib-devel.x86_64 openssl.x86_64 openssl-devel.x86_64 pam.x86_64 \ pam-devel.x86_64 libcurl.x86_64 libcurl-devel.x86_64 bzip2-libs.x86_64 bzip2.x86_64 bzip2-devel.x86_64 \ libssh2.x86_64 libssh2-devel.x86_64 python-devel.x86_64 python-pip.noarch rsync coreutils glib2 lrzsz \ sysstat e4fsprogs xfsprogs ntp readline-devel zlib zlib-devel openssl openssl-devel pam-devel \ libxml2-devel libxslt-devel python-devel tcl-devel gcc make smartmontools flex bison perl perl-devel \ perl-ExtUtils* OpenIPMI-tools openldap openldap-devel logrotate python-py gcc-c++ libevent-devel \ apr-devel libcurl-devel bzip2-devel libyaml-devel apr-util-devel net-tools wget git re2c python-pip \ centos-release-scl epel-release dh-autoreconf devtoolset-6-toolchain git wget cmake3 rsync coreutils \ glib2 lrzsz sysstat e4fsprogs xfsprogs ntp zlib zlib-devel openssl openssl-libs openssl-devel pam \ pam-devel tcl-devel smartmontools OpenIPMI-tools openldap openldap-devel logrotate libcurl-devel \ htop perl-Env libffi-devel libtool libaio ed net-tools gcc gcc-c++ glibc-static make curl-devel \ bzip2-devel psutils psutils-perl liblockfile liblockfile-devel libevent libevent-devel vim-common \ vim-enhanced perl perl-devel perl-ExtUtils-Embed readline readline-devel apr apr-devel apr-util \ apr-util-devel libxml2 libxml2-devel libxslt libxslt-devel bison bison-devel bison-runtime flex \ flex-devel isomd5sum isomd5sum-devel libyaml libyaml-devel python python-devel python-isomd5sum \ python-setuptools python-py python-lockfile python-paramiko unzip dstat |
systemctl status firewalld systemctl stop firewalld systemctl disable firewalld sed -i 's/=enforcing/=disabled/g' /etc/selinux/config setenforce 0 getenforce |
cat>> /etc/sysctl.conf<<EOF kernel.shmmax = 500000000 kernel.shmmni = 4096 kernel.shmall = 4000000000 kernel.sem = 500 1024000 200 4096 kernel.sysrq = 1 kernel.core_uses_pid = 1 kernel.msgmnb = 65536 kernel.msgmax = 65536 kernel.msgmni = 2048 net.ipv4.tcp_syncookies = 1 net.ipv4.conf.default.accept_source_route = 0 net.ipv4.tcp_tw_recycle = 1 net.ipv4.tcp_max_syn_backlog = 4096 net.ipv4.conf.all.arp_filter = 1 net.ipv4.ip_local_port_range = 10000 65535 net.core.netdev_max_backlog = 10000 net.core.rmem_max = 2097152 net.core.wmem_max = 2097152 vm.overcommit_memory = 2 EOF sysctl –p |
cat >>/etc/security/limits.conf<<EOF * soft nofile 65536 * hard nofile 65536 * soft nproc 131072 * hard nproc 131072 EOF cat /etc/security/limits.conf |
cat >>/etc/security/limits.d/20-nproc.conf<<EOF * soft nproc 4096 root soft nproc unlimited gpadmin soft nproc unlimited EOF cat /etc/security/limits.d/20-nproc.conf |
1、查看预读量:blockdev --getra /dev/devname //devname指磁盘名,例sd*
2、设置预读量:blockdev --setra 16384 /dev/devname
blockdev --getra /dev/sdb
blockdev –setra 8192 /dev/sdb
将命令写入文件并给执行权限chmod +x /etc/rc.d/rc.local //启动执行
1、 修改调度:echo deadline> /sys/block/devname/queue/scheduler
//devname指数据磁盘,根据实际情况指定
2、 指定调度程序:grubby --update-kernel=ALL --args=“elevator=deadline”
添加参数后,重启系统。查看内核参数设置:grubby --info=ALL
3、禁用THP:grubby --update-kernel=ALL --args=“transparent_hugepage=never”
添加参数后,重启系统。检查THP状态:
cat /sys/kernel/mm/*transparent_hugepage/enabled
1、 修改挂载文件信息:vim /etc/fstab,xfs是Linux上数据存储的首选文件系统。
/dev/mapper/data-lvol0 /data xfs nodev,noatime,nobarrier,inode64 0 0
字符集配置
sed -i 's/zh_CN.UTF-8/en_US.UTF-8/g' /etc/sysconfig/i18n
cat >> /etc/ld.so.conf<<EOF
EOF |
-
- 禁止IPC对象删除(所有机器)
1、 设置参数:vi /etc/systemd/logind.conf
RemoveIPC=no
2、 重启服务:service system-logind restart
-
- 修改ssh连接阈值(所有机器)
1、修改/etc/sshd_config或者/etc/ssh/sshd_config
MaxStartups=200 //根据实际情况修改
2、 重启服务:service sshd restart
1、 所有机器
systemctl start ntpd
systemctl enable ntpd
2、 master主机
vim /etc/ntp.conf
server参数指向数据中心的NTP时间服务器,例如:server 192.168.1.1
3、 备份机器
server mdw prefer
server 192.168.1.1
4、 数据节点
server mdw prefer
server mdw-s
1、Master主机命名,输入命令:hostnamectl set-hostname,输入主机名:mdw。
依次在数据节点执行命令,输入新的主机名:sdw1、sdw2……
2、重新连接后生效。
-
- Hosts配置(所有机器)
cat >> /etc/hosts<<EOF 192.168.1.218 mdw 192.168.1.219 mdw-s 192.168.1.220 sdw1 192.168.1.221 sdw2 192.168.1.222 sdw3 EOF |
-
- Gpadmin账户创建(master节点)
groupadd -g 530 gpadmin useradd -g 530 -u530 -m -d /home/gpadmin -s /bin/bash gpadmin passwd gpadmin |
1、 创建所有节点的配置文件
su - gpadmin mkdir –p /data/greenplum/conf/ cat >/data/greenplum/conf/hostlist <<EOF mdw mdw-s sdw1 sdw2 sdw3 EOF cat /data/greenplum/conf/hostlist |
2、 创建所有数据节点的配置文件
su - gpadmin mkdir –p /data/greenplum/conf/ cat >/data/greenplum/conf/seg_hosts <<EOF sdw1 sdw2 sdw3 EOF cat /data/greenplum/conf/seg_hosts |
-
- 配置.bash_profile环境变量(每台机器都要)
cat>>/home/gpadmin/.bash_profile<<EOF source /data/greenplum/greenplum_path.sh export MASTER_DATA_DIRECTORY=/data/greenplum/gpdata/gpmaster/gpseg-1 export PGPORT=5432 export PGDATABASE=testDB EOF source ~/.bash_profile (让环境变量生效) |
2、下载安装包
官网地址: https://network.pivotal.io/products/pivotal-gpdb
注意:下载安装包之前,需要在官网注册账号,方可下载。
3、上传Greenplum的安装文件greenplum-db-5.10.0-rhel7-x86_64.zip并解压缩,该步骤在master的gpadmin 上执行。
unzip greenplum-db-5.10.2-rhel7-x86_64.zip |
4、在root用户下,将下载的文件放在CentOS系统中自己能找到的位置即可,给该文件赋予可执行权限,之后执行该文件,即开始安装,该步骤在master上执行。
chmod +x greenplum-db-5.10.0-rhel7-x86_64.bin ./greenplum-db-5.10.2-rhel7-x86_64.bin |
因为只在master上安装了Greenplum,所以接下来要将安装包批量发送到每个segment上,才能算是整个集群完整安装了Greenplum。后面的操作都是为了连接所有节点,并将安装包发送到每个节点。
greenplum_path.sh中保存了运行Greenplum的一些环境变量设置,包括GPHOOME、PYTHONHOME等设置。
注意执行gpssh-exkeys 命令时需要使用gpadmin账户。
source /data/greenplum/greenplum_path.sh gpssh-exkeys -f /data/greenplum/conf/hostlist [STEP 1 of 5] create local ID and authorize on local host ... /home/gpadmin/.ssh/id_rsa file exists ... key generation skipped [STEP 2 of 5] keyscan all hosts and update known_hosts file [STEP 3 of 5] authorize current user on remote hosts ... send to sdw1 ... send to sdw2 ... send to sdw3 [STEP 4 of 5] determine common authentication file content [STEP 5 of 5] copy authentication files to all remote hosts ... finished key exchange with sdw1 ... finished key exchange with sdw2 ... finished key exchange with sdw3 [INFO] completed successfully |
注意:
<1> 首次执行gpssh-exkeys命令时,在[STEP 3 of 5],要输入每个segment节点的gpadmin用户的密码。
<2> gpssh-exkeys命令使用的时候一定要用gpadmin身份,因为这个命令会生成ssh的免密码登录的秘钥,在/home/gpadmin/.ssh这里。如果以root身份使用gpssh-exkeys命令,那么生成的.ssh秘钥在root的home下面或者是在/home/gpadmin下面但是是root的所
有者,如果之后使用gpadmin身份执行相应操作的时候就没有权限。
<3> 如果你是以root安装的将已安装文件的所有权和组更改为gpadmin:
chown -R gpadmin /usr/local/greenplum* chown -R gpadmin /usr/local/greenplum* |
1、拷贝master节点公钥至各节点:gpssh-exkeys -f /data/greenplum/conf/hostlist
2、无密码登录安装:gpseginstall -f /data/greenplum/conf/hostlist
1、在mdw上创建gpmaster目录
mkdir -p /data/greenplum/gpdata/gpmaster |
2、在每一个数据节点中创建数据目录
gpssh -f /data/greenplum/conf/seg_hosts mkdir -p /data//greenplum/gpdata/disk1/primary mkdir -p /data/greenplum/gpdata/disk1/mirror mkdir -p /data/greenplum/gpdata/disk2/primary mkdir -p /data/greenplum/gpdata/disk2/mirror mkdir -p /data/greenplum/gpdata/disk3/primary mkdir -p /data/greenplum/gpdata/disk3/mirror mkdir -p /data/greenplum/gpdata/disk4/primary mkdir -p /data/greenplum/gpdata/disk4/mirror |
cat>/data/greenplum/gpinitsystem_config<<EOF ARRAY_NAME="Greenplum Data Platform" SEG_PREFIX=gpseg PORT_BASE=40000 declare -a DATA_DIRECTORY=(/data/greenplum/gpdata/disk1/primary /data/greenplum/gpdata/disk2/primary /data/greenplum/gpdata/disk3/primary /data/greenplum/gpdata/disk4/primary) MASTER_HOSTNAME=mdw MASTER_DIRECTORY=/data/greenplum/gpdata/gpmaster MASTER_PORT=5432 TRUSTED_SHELL=ssh CHECK_POINT_SEGMENTS=8 ENCODING=UTF8 MIRROR_PORT_BASE=50000 REPLICATION_PORT_BASE=41000 MIRROR_REPLICATION_PORT_BASE=51000 declare -a MIRROR_DATA_DIRECTORY=(/data/greenplum/gpdata/disk1/mirror /data/greenplum/gpdata/disk2/mirror /data/greenplum/gpdata/disk3/mirror /data/greenplum/gpdata/disk4/mirror) DATABASE_NAME=GP MACHINE_LIST_FILE=/data/greenplum/conf/seg_hosts EOF cat /data/greenplum/gpinitsystem_config |
gpinitsystem -c /data/greenplum/gpinitsystem_config |
该命令是通过配置文件对整个greenplum集群,并在每个数据节点创建8个primary segment 实例和8个mirror segment实例。
注意:经常这一步骤会卡很长时间,如果报错查看日志。
-
- 为master创建standby
gpinitstandby -s mdw-s |
gpstate -f |
20210809:22:50:47:032092 gpstate:mdw:gpadmin-[INFO]:-Starting gpstate with args: -f 20210809:22:50:47:032092 gpstate:mdw:gpadmin-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 5.10.2 build commit:b3c02f3acd880e2d676dacea36be015e4a3826d4' 20210809:22:50:47:032092 gpstate:mdw:gpadmin-[INFO]:-master Greenplum Version: 'PostgreSQL 8.3.23 (Greenplum Database 5.10.2 build commit:b3c02f3acd880e2d676dacea36be015e4a3826d4) on x86_64-pc-linux-gnu, compiled by GCC gcc (GCC) 6.2.0, 64-bit compiled on Aug 10 2018 07:30:09' 20210809:22:50:47:032092 gpstate:mdw:gpadmin-[INFO]:-Obtaining Segment details from master... 20210809:22:50:48:032092 gpstate:mdw:gpadmin-[INFO]:-Standby master details 20210809:22:50:48:032092 gpstate:mdw:gpadmin-[INFO]:----------------------- 20210809:22:50:48:032092 gpstate:mdw:gpadmin-[INFO]:- Standby address = mdw-s 20210809:22:50:48:032092 gpstate:mdw:gpadmin-[INFO]:- Standby data directory = /data/greenplum/gpdata/gpmaster/gpseg-1 20210809:22:50:48:032092 gpstate:mdw:gpadmin-[INFO]:- Standby port = 5432 20210809:22:50:48:032092 gpstate:mdw:gpadmin-[INFO]:- Standby PID = 10606 20210809:22:50:48:032092 gpstate:mdw:gpadmin-[INFO]:- Standby status = Standby host passive 20210809:22:50:48:032092 gpstate:mdw:gpadmin-[INFO]:-------------------------------------------------------------- 20210809:22:50:48:032092 gpstate:mdw:gpadmin-[INFO]:--pg_stat_replication 20210809:22:50:48:032092 gpstate:mdw:gpadmin-[INFO]:-------------------------------------------------------------- 20210809:22:50:48:032092 gpstate:mdw:gpadmin-[INFO]:--WAL Sender State: streaming 20210809:22:50:48:032092 gpstate:mdw:gpadmin-[INFO]:--Sync state: sync 20210809:22:50:48:032092 gpstate:mdw:gpadmin-[INFO]:--Sent Location: 0/C000000 20210809:22:50:48:032092 gpstate:mdw:gpadmin-[INFO]:--Flush Location: 0/C000000 20210809:22:50:48:032092 gpstate:mdw:gpadmin-[INFO]:--Replay Location: 0/C000000 20210809:22:50:48:032092 gpstate:mdw:gpadmin-[INFO]:-------------------------------------------------------------- |
- 查看greenplum 状态
gpstate
gpstart 启动gp集群
gpstop 关闭gp集群
gpstop -u 重新读取配置文件
gpstop -M fast 快速关闭greenplum集群,不管当前是否有连接
-
- 检查mirror状态
gpstate –m
psql -d postgres -c "SELECT * FROM gp_segment_configuration;" |
dbid | content | role | preferred_role | mode | status | port | hostname | address | replication_port ------+---------+------+----------------+------+--------+-------+----------+---------+------------------ 1 | -1 | p | p | s | u | 5432 | mdw | mdw | 2 | 0 | p | p | s | u | 40000 | sdw1 | sdw1 | 41000 6 | 4 | p | p | s | u | 40000 | sdw2 | sdw2 | 41000 10 | 8 | p | p | s | u | 40000 | sdw3 | sdw3 | 41000 3 | 1 | p | p | s | u | 40001 | sdw1 | sdw1 | 41001 7 | 5 | p | p | s | u | 40001 | sdw2 | sdw2 | 41001 11 | 9 | p | p | s | u | 40001 | sdw3 | sdw3 | 41001 4 | 2 | p | p | s | u | 40002 | sdw1 | sdw1 | 41002 8 | 6 | p | p | s | u | 40002 | sdw2 | sdw2 | 41002 12 | 10 | p | p | s | u | 40002 | sdw3 | sdw3 | 41002 5 | 3 | p | p | s | u | 40003 | sdw1 | sdw1 | 41003 9 | 7 | p | p | s | u | 40003 | sdw2 | sdw2 | 41003 13 | 11 | p | p | s | u | 40003 | sdw3 | sdw3 | 41003 14 | 0 | m | m | s | u | 50000 | sdw2 | sdw2 | 51000 15 | 1 | m | m | s | u | 50001 | sdw2 | sdw2 | 51001 16 | 2 | m | m | s | u | 50002 | sdw2 | sdw2 | 51002 17 | 3 | m | m | s | u | 50003 | sdw2 | sdw2 | 51003 18 | 4 | m | m | s | u | 50000 | sdw3 | sdw3 | 51000 19 | 5 | m | m | s | u | 50001 | sdw3 | sdw3 | 51001 20 | 6 | m | m | s | u | 50002 | sdw3 | sdw3 | 51002 21 | 7 | m | m | s | u | 50003 | sdw3 | sdw3 | 51003 22 | 8 | m | m | s | u | 50000 | sdw1 | sdw1 | 51000 23 | 9 | m | m | s | u | 50001 | sdw1 | sdw1 | 51001 24 | 10 | m | m | s | u | 50002 | sdw1 | sdw1 | 51002 25 | 11 | m | m | s | u | 50003 | sdw1 | sdw1 | 51003 26 | -1 | m | m | s | u | 5432 | mdw-s | mdw-s | (26 rows) |
连接数据库 |
psql -d template1 |
创建用户名 |
create role sh with password 'sh' login createdb; |
访问ACL |
cat>>/data/greenplum/gpdata/gpmaster/gpseg-1/pg_hba.conf<<EOF host all sh 192.168.1.0/24 md5 EOF cat /data/greenplum/gpdata/gpmaster/gpseg-1/pg_hba.conf |
重新加载配置文件 |
gpstop -u |
登录数据库密码为sh |
psql -d template1 -U sh -h mdw |
创建名为sales_history的database |
create database sales_history; |
切换至\c sales_history |
\c sales_history |
创建名为sales_history的schema |
create schema sales_history ; |
永久改变schema的访问顺序 |
alter database sales_history set search_path to sales_history,public; |
切换schema至sales_history |
set search_path to sales_history; |
想查看当前的schema,可以用如下命令 |
-
- 使用pgadmin连接GP
CREATE TABLE SALES2 |
create table test01 (id int default nextval('test01_id_seq') primary key, col1 varchar(20) not null , i_time) distributed by(id); |
create sequence test01_id_seq; |
insert into sales2 values ( nextval('test01_id_seq')); |
CREATE TABLE SALES |
CREATE TABLE SALES |
yum install -y git wget cd /usr/local mv go /usr/local/gpadmin/ chown -R gpadmin:gpadmin /usr/local/gpadmin/go su - gpadmin source /usr/local/greenplum-db/greenplum_path.sh vi ~/.bashrc export GOROOT=$HOME/go source ~/.bashrc make build |
git clone https://gitee.com/mirrors_greenplum-db/gpbackup.git 也可以直接在gitee下载下来进行unzip |
[gpadmin@mdw-s gpbackup]$ go env -w GOPROXY=https://goproxy.cn [gpadmin@mdw-s gpbackup]$ make depend fatal: Not a git repository (or any parent up to mount point /home) Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). GO111MODULE=on go mod download make build [gpadmin@mdw-s gpbackup]$ make build fatal: Not a git repository (or any parent up to mount point /home) Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). GO111MODULE=on go build -mod=readonly -tags 'gpbackup' -o /usr/local/gpadmin/gopath/bin/gpbackup -ldflags "-X github.com/greenplum-db/gpbackup/backup.version=" GO111MODULE=on go build -mod=readonly -tags 'gprestore' -o /usr/local/gpadmin/gopath/bin/gprestore -ldflags "-X github.com/greenplum-db/gpbackup/restore.version=" GO111MODULE=on go build -mod=readonly -tags 'gpbackup_helper' -o /usr/local/gpadmin/gopath/bin/gpbackup_helper -ldflags "-X github.com/greenplum-db/gpbackup/helper.version=" [gpadmin@mdw-s gpbackup]$ make build_linux fatal: Not a git repository (or any parent up to mount point /home) Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). env GOOS=linux GOARCH=amd64 GO111MODULE=on go build -mod=readonly -tags 'gpbackup' -o gpbackup -ldflags "-X github.com/greenplum-db/gpbackup/backup.version=" env GOOS=linux GOARCH=amd64 GO111MODULE=on go build -mod=readonly -tags 'gprestore' -o gprestore -ldflags "-X github.com/greenplum-db/gpbackup/restore.version=" env GOOS=linux GOARCH=amd64 GO111MODULE=on go build -mod=readonly -tags 'gpbackup_helper' -o gpbackup_helper -ldflags "-X github.com/greenplum-db/gpbackup/helper.version=" [gpadmin@mdw-s gpbackup]$ which gpbackup /home/gpadmin/gpbackup/gpbackup |
-
- 可选gpbackup-s3-plugin,备份至S3存储
,将备份直接上传到S3协议的存储,例如华为的OBS,阿里的OSS,自已部署的minio |
配置s3-config.yaml文件,我用的是华为的OBS executablepath: /usr/local/gpadmin/gopath/bin/gpbackup_s3_plugin options: endpoint: https://obs.cn-north-1.myhuaweicloud.com aws_access_key_id: AK aws_secret_access_key: SK bucket: 桶名 folder: 目录名 ## 备份test_db 数据库到S3协议的存储 备份的位置:桶名/目录名/backups/YYYYMMDD/YYYYMMDDHHMMSS/
|
##需要先删掉数据库再还原 dropdb test_db --timestamp参数可查看存储的备份位置,你要还原哪个就写相应的时间戳目录名 |
gpbackup -dbname sales_history --backup-dir /data/back1/ --leaf-partition-data
[gpadmin@mdw gpadmin]$ gpbackup -dbname sales_history --backup-dir /data/back1/ --leaf-partition-data 20210810:01:36:04 gpbackup:gpadmin:mdw:039878-[INFO]:-gpbackup version = 20210810:01:36:04 gpbackup:gpadmin:mdw:039878-[INFO]:-Greenplum Database Version = 5.10.2 build commit:b3c02f3acd880e2d676dacea36be015e4a3826d4 20210810:01:36:04 gpbackup:gpadmin:mdw:039878-[INFO]:-Starting backup of database sales_history 20210810:01:36:05 gpbackup:gpadmin:mdw:039878-[INFO]:-Backup Timestamp = 20210810013604 20210810:01:36:05 gpbackup:gpadmin:mdw:039878-[INFO]:-Backup Database = sales_history 20210810:01:36:05 gpbackup:gpadmin:mdw:039878-[INFO]:-Gathering table state information 20210810:01:36:05 gpbackup:gpadmin:mdw:039878-[INFO]:-Acquiring ACCESS SHARE locks on tables Locks acquired: 18 / 18 [==============================================================] 100.00% 0s 20210810:01:36:05 gpbackup:gpadmin:mdw:039878-[INFO]:-Gathering additional table metadata 20210810:01:36:05 gpbackup:gpadmin:mdw:039878-[INFO]:-Getting partition definitions 20210810:01:36:05 gpbackup:gpadmin:mdw:039878-[INFO]:-Getting storage information 20210810:01:36:05 gpbackup:gpadmin:mdw:039878-[INFO]:-Getting child partitions with altered schema 20210810:01:36:05 gpbackup:gpadmin:mdw:039878-[INFO]:-Metadata will be written to /data/back1/gpseg-1/backups/20210810/20210810013604/gpbackup_20210810013604_metadata.sql 20210810:01:36:05 gpbackup:gpadmin:mdw:039878-[INFO]:-Writing global database metadata 20210810:01:36:05 gpbackup:gpadmin:mdw:039878-[INFO]:-Global database metadata backup complete 20210810:01:36:05 gpbackup:gpadmin:mdw:039878-[INFO]:-Writing pre-data metadata 20210810:01:36:05 gpbackup:gpadmin:mdw:039878-[INFO]:-Pre-data metadata metadata backup complete 20210810:01:36:05 gpbackup:gpadmin:mdw:039878-[INFO]:-Writing post-data metadata 20210810:01:36:05 gpbackup:gpadmin:mdw:039878-[INFO]:-Post-data metadata backup complete 20210810:01:36:05 gpbackup:gpadmin:mdw:039878-[INFO]:-Writing data to file Tables backed up: 16 / 16 [============================================================] 100.00% 0s 20210810:01:36:05 gpbackup:gpadmin:mdw:039878-[INFO]:-Data backup complete 20210810:01:36:06 gpbackup:gpadmin:mdw:039878-[INFO]:-Found neither /data/greenplum//bin/gp_email_contacts.yaml nor /usr/local/gpadmin/gp_email_contacts.yaml 20210810:01:36:06 gpbackup:gpadmin:mdw:039878-[INFO]:-Email containing gpbackup report /data/back1/gpseg-1/backups/20210810/20210810013604/gpbackup_20210810013604_report will not be sent 20210810:01:36:06 gpbackup:gpadmin:mdw:039878-[INFO]:-Backup completed successfully |
gpbackup -dbname sales_history --backup-dir /data/back1/ --leaf-partition-data --incremental
[gpadmin@mdw gpadmin]$ gpbackup -dbname sales_history --backup-dir /data/back1/ --leaf-partition-data --incremental 20210810:01:36:47 gpbackup:gpadmin:mdw:039999-[INFO]:-gpbackup version = 20210810:01:36:47 gpbackup:gpadmin:mdw:039999-[INFO]:-Greenplum Database Version = 5.10.2 build commit:b3c02f3acd880e2d676dacea36be015e4a3826d4 20210810:01:36:47 gpbackup:gpadmin:mdw:039999-[INFO]:-Starting backup of database sales_history 20210810:01:36:47 gpbackup:gpadmin:mdw:039999-[INFO]:-Backup Timestamp = 20210810013647 20210810:01:36:47 gpbackup:gpadmin:mdw:039999-[INFO]:-Backup Database = sales_history 20210810:01:36:47 gpbackup:gpadmin:mdw:039999-[INFO]:-Gathering table state information 20210810:01:36:47 gpbackup:gpadmin:mdw:039999-[INFO]:-Acquiring ACCESS SHARE locks on tables Locks acquired: 18 / 18 [==============================================================] 100.00% 0s 20210810:01:36:47 gpbackup:gpadmin:mdw:039999-[INFO]:-Gathering additional table metadata 20210810:01:36:47 gpbackup:gpadmin:mdw:039999-[INFO]:-Getting partition definitions 20210810:01:36:47 gpbackup:gpadmin:mdw:039999-[INFO]:-Getting storage information 20210810:01:36:47 gpbackup:gpadmin:mdw:039999-[INFO]:-Getting child partitions with altered schema 20210810:01:36:47 gpbackup:gpadmin:mdw:039999-[INFO]:-Metadata will be written to /data/back1/gpseg-1/backups/20210810/20210810013647/gpbackup_20210810013647_metadata.sql 20210810:01:36:47 gpbackup:gpadmin:mdw:039999-[INFO]:-Writing global database metadata 20210810:01:36:47 gpbackup:gpadmin:mdw:039999-[INFO]:-Global database metadata backup complete 20210810:01:36:47 gpbackup:gpadmin:mdw:039999-[INFO]:-Writing pre-data metadata 20210810:01:36:47 gpbackup:gpadmin:mdw:039999-[INFO]:-Pre-data metadata metadata backup complete 20210810:01:36:47 gpbackup:gpadmin:mdw:039999-[INFO]:-Writing post-data metadata 20210810:01:36:47 gpbackup:gpadmin:mdw:039999-[INFO]:-Post-data metadata backup complete 20210810:01:36:47 gpbackup:gpadmin:mdw:039999-[INFO]:-Basing incremental backup off of backup with timestamp = 20210810013604 20210810:01:36:47 gpbackup:gpadmin:mdw:039999-[INFO]:-Writing data to file Tables backed up: 11 / 11 [============================================================] 100.00% 0s 20210810:01:36:48 gpbackup:gpadmin:mdw:039999-[INFO]:-Data backup complete 20210810:01:36:49 gpbackup:gpadmin:mdw:039999-[INFO]:-Found neither /data/greenplum//bin/gp_email_contacts.yaml nor /usr/local/gpadmin/gp_email_contacts.yaml 20210810:01:36:49 gpbackup:gpadmin:mdw:039999-[INFO]:-Email containing gpbackup report /data/back1/gpseg-1/backups/20210810/20210810013647/gpbackup_20210810013647_report will not be sent 20210810:01:36:49 gpbackup:gpadmin:mdw:039999-[INFO]:-Backup completed successfully [gpadmin@mdw gpadmin]$ |