centos 7.4 + greenplum 5.7 + make

版权声明:本文为博主原创文章,转载请标明出处。 https://blog.csdn.net/ctypyb2002/article/details/79978518

os: centos 7.4
gp: gpdb-5.7.0

greenplum 简称gp,是由postgresql演变而来,感兴趣的哥们可以baidu一下

三台机器
node1 为master host
node2、node3为segment host

os设置

# cat /etc/centos-release
CentOS Linux release 7.4.1708 (Core)
# uname -a
Linux node1 3.10.0-693.el7.x86_64 #1 SMP Tue Aug 22 21:09:27 UTC 2017 x86_64 x86_64 x86_64 GNU/Linux

修改hostname

# vi /etc/hostname
node1

使用setup或者 nmtui-edit 配置IP

# setup
或者
# nmtui-edit

修改/etc/hosts

vi /etc/hosts

10.0.2.7 node1-pub
10.0.2.8 node2-pub
10.0.2.9 node3-pub

192.168.56.101 node1
192.168.56.102 node2
192.168.56.103 node3

关闭防火墙

# systemctl stop firewalld.service
# systemctl disable firewalld.service
# iptables -F

关闭selinux

vi /etc/selinux/config
SELINUX=DISABLED
或者
vi /etc/sysconfig/selinux
SELINUX=DISABLED

修改/etc/sysctl.conf

#kernel.shmall = 2097152
#kernel.shmmax = 536870912 # Bytes
kernel.shmmni = 4096

fs.aio-max-nr = 1048576
fs.file-max = 68116544

kernel.sem = 250 5120000 100 20480
kernel.sysrq = 1
kernel.core_uses_pid = 1
kernel.msgmnb = 65536
kernel.msgmax = 65536
kernel.msgmni = 2048

net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.arp_filter = 1
net.ipv4.ip_forward = 0
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_tw_recycle=1
net.ipv4.tcp_max_syn_backlog=4096
net.ipv4.ip_local_port_range = 1025 65535
net.core.netdev_max_backlog=10000
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048586

vm.overcommit_memory=2
vm.overcommit_ratio = 95

立即生效

# sysctl -p
# lsipc

修改/etc/security/limits.conf

* soft nofile 65536
* hard nofile 65536
* soft nproc 131072
* hard nproc 131072

上述配置具体含义:

soft nproc: 可打开的文件描述符的最大数(软限制)
hard nproc: 可打开的文件描述符的最大数(硬限制)
soft nofile:单个用户可用的最大进程数量(软限制)
hard nofile:单个用户可用的最大进程数量(硬限制)

修改 /etc/rc.local

# vi /etc/rc.local
#禁用大页
if test -f /sys/kernel/mm/transparent_hugepage/enabled; then
   echo never > /sys/kernel/mm/transparent_hugepage/enabled
fi
if test -f /sys/kernel/mm/transparent_hugepage/defrag; then
   echo never > /sys/kernel/mm/transparent_hugepage/defrag
fi
#修改读块大小
/usr/sbin/blockdev --setra 16384 /dev/sda
#修改 io scheduler为deadline
echo deadline > /sys/block/sda/queue/scheduler

# chmod u+x /etc/rc.d/rc.local

或者修改grub

# vi /etc/default/grub
ipv6.disable=1 numa=off elevator=deadline transparent_hugepage=never
# grub2-mkconfig -o /boot/grub2/grub.cfg

创建 greenplum 组

# groupadd -g 10000 gpadmin

创建 greenplum 用户:

# useradd -u 10000 -g gpadmin gpadmin
# usermod -G root gpadmin
# passwd gpadmin

创建 greenplum 目录

# mkdir -p /usr/local/greenplum-db
# chown -R gpadmin:gpadmin /usr/local/greenplum-db
# mkdir -p /u01/greenplum-data/
# chown -R gpadmin:gpadmin /u01

源码编译安装

https://github.com/greenplum-db/gpdb/releases
https://github.com/greenplum-db/gpdb/tree/5.7.0

安装依赖包,有点多

# yum install centos-release-scl epel-release dh-autoreconf  devtoolset-6-toolchain
# yum install git wget cmake3 rsync coreutils glib2 lrzsz sysstat e4fsprogs xfsprogs ntp zlib zlib-devel openssl openssl-libs openssl-devel pam pam-devel tcl-devel \
 smartmontools OpenIPMI-tools openldap openldap-devel logrotate libcurl-devel htop perl-Env libffi-devel libtool libaio ed net-tools \
 gcc gcc-c++ glibc-static make curl-devel bzip2-devel psutils psutils-perl liblockfile liblockfile-devel libevent libevent-devel vim-common vim-enhanced \
 perl perl-devel perl-ExtUtils-Embed  readline readline-devel apr apr-devel apr-util apr-util-devel libxml2 libxml2-devel \
 libxslt libxslt-devel bison bison-devel bison-runtime flex flex-devel isomd5sum isomd5sum-devel libyaml libyaml-devel

# yum install python python-devel python-isomd5sum python-setuptools python-py
# yum install python-lockfile  
# yum install python-paramiko 

# vi /etc/ld.so.conf
include ld.so.conf.d/*.conf
/usr/local/lib
/usr/local/lib64
# ldconfig

使用pip安装一些必备工具
pip 的安装可以参考 https://pip.pypa.io/en/stable/installing/

# curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
# python get-pip.py

# pip install setuptools
# pip install --upgrade setuptools

# pip install epydoc
# pip install psi
# pip install psutil
# pip install lockfile
# pip install paramiko
# pip install gssapi
# pip install conan
Cannot uninstall 'enum34'. It is a distutils installed project and thus we cannot accurately determine which files belong to it which would lead to only a partial uninstall.

# yum list installed |grep enum34
# yum remove python-enum34.noarch
Running transaction
  Erasing    : python-paramiko-doc-2.1.1-4.el7.noarch                                 1/4 
  Erasing    : python-paramiko-2.1.1-4.el7.noarch                                     2/4 
  Erasing    : python2-cryptography-1.7.2-1.el7_4.1.x86_64                            3/4 
  Erasing    : python-enum34-1.0.4-1.el7.noarch                                       4/4 
  Verifying  : python-paramiko-doc-2.1.1-4.el7.noarch                                 1/4 
  Verifying  : python-enum34-1.0.4-1.el7.noarch                                       2/4 
  Verifying  : python2-cryptography-1.7.2-1.el7_4.1.x86_64                            3/4 
  Verifying  : python-paramiko-2.1.1-4.el7.noarch                                     4/4 

安装cmake (所有节点),如果已经 yum install cmake3,则跳过这一步
参考
https://cmake.org/download/

# cd /root
# mkdir cmake
# cd cmake
# rz
# ./cmake-3.11.0-Linux-x86_64.sh
# ln -s /root/cmake/bin/cmake /usr/bin/cmake

安装gp-xerces (所有节点)
参考
https://github.com/greenplum-db/gp-xerces
https://github.com/greenplum-db/gp-xerces.git

# cd /root
# git clone https://github.com/greenplum-db/gp-xerces.git
# cd gp-xerces
# mkdir build
# cd ./build
# ../configure --prefix=/usr/local
# make
# make install

安装re2c (所有节点)
参考
http://re2c.org/install/install.html
https://github.com/skvadrik/re2c
https://github.com/skvadrik/re2c.git

# cd /root
# git clone https://github.com/skvadrik/re2c.git
# cd ./re2c/re2c
# ./autogen.sh
# ./configure --prefix=/usr/local
# make
# make install

安装ninja (所有节点)
参考
https://ninja-build.org/
https://github.com/ninja-build/ninja
https://github.com/ninja-build/ninja.git

# cd /root
# git clone https://github.com/ninja-build/ninja.git
# cd ninja
# ./configure.py --bootstrap
# ln -s /root/ninja/ninja /usr/bin/ninja

安装 gporca
这里啰嗦两句,gporca是 greenplum 新一代的优化器,在性能上有很大提升
legacy是早期针对单节点 greenplum 而构建的,主要应用于OLTP场景,greenplum在不安装orca的情况下使用的是legacy优化器。
为什么要有两个优化器,其实都是有历史原因的,现在的greenplum使用MPP,主要应用场景变为OLAP场景,legacy对此虽然进行了修改,但是从架构设计上,使得其维护和添加新的功能越来越困难,所以有了GPORCA优化器。

1、手动单独安装gporca (所有节点)
参考
https://github.com/greenplum-db/gporca
https://github.com/greenplum-db/gporca.git

# cd /root
# git clone https://github.com/greenplum-db/gporca.git
# cd gporca
# cmake -GNinja -H. -Bbuild
# ninja install -C build
--
--
-- Installing: /usr/local/include/gpopt/version.h

待安装完成后,进入/gporca/build目录,执行ctest命令进行检查,确保100% tests passed
# cd /root/gporca/build/
# /root/cmake/bin/ctest
100% tests passed, 0 tests failed out of 157

Total Test time (real) = 194.05 sec

2、自动安装,其实最后都是拷贝文件到 /usr/local/include/ /usr/local/lib/
(所有节点)
参考 https://github.com/greenplum-db/gpdb/tree/5.7.0

# su - gpadmin
$ echo 'source scl_source enable devtoolset-6' >> ~/.bashrc
$ cd /tmp
$ rz gpdb-5.7.0.tar.gz
$ unzip ./gpdb-5.7.0.zip
$ cd gpdb-5.7.0
$ cd depends
$ ./configure
$ make
===================================================================
Orca can now be installed on the local system using "make install"
and be used as any normal system library

If you'd rather compile GPDB using ORCA in it's current location and then
install ORCA into the gpdb installation location then first run the top
level configure as follows:
LD_LIBRARY_PATH=/tmp/gpdb-5.7.0/depends/build/lib ./configure \ 
    --with-libraries=/tmp/gpdb-5.7.0/depends/build/lib \ 
    --with-includes=/tmp/gpdb-5.7.0/depends/build/include 

Then run "make".
Then run "LD_LIBRARY_PATH=/tmp/gpdb-5.7.0/depends/build/lib make install"

These steps should work on both MacOS and Linux

# make install_local
===================================================================
Orca can now be installed on the local system using "make install"
and be used as any normal system library

If you'd rather compile GPDB using ORCA in it's current location and then
install ORCA into the gpdb installation location then first run the top
level configure as follows:
LD_LIBRARY_PATH=/tmp/gpdb-5.7.0/depends/build/lib ./configure \ 
    --with-libraries=/tmp/gpdb-5.7.0/depends/build/lib \ 
    --with-includes=/tmp/gpdb-5.7.0/depends/build/include 

Then run "make".
Then run "LD_LIBRARY_PATH=/tmp/gpdb-5.7.0/depends/build/lib make install"

These steps should work on both MacOS and Linux
/bin/mkdir -p /usr/local
cp -R build/* /usr/local

最后两条命令就是拷贝到系统路径里
# /bin/mkdir -p /usr/local
# cp -R build/* /usr/local

后来在 gpdb configure 的时候报了如下错误
checking Checking ORCA version... configure: error: Your ORCA version is expected to be 2.55.XXX
发现 gporca 的版本是 2.55.13 ,后来通过 第一种方式编译 gporca的版本为 2.55.20,没有再报错

重装gporca前清空

rm -rf /usr/local/include/naucrates
rm -rf /usr/local/include/gpdbcost
rm -rf /usr/local/include/gpopt
rm -rf /usr/local/include/gpos
rm -rf /usr/local/lib/libnaucrates.so*
rm -rf /usr/local/lib/libgpdbcost.so*
rm -rf /usr/local/lib/libgpopt.so*
rm -rf /usr/local/lib/libgpos.so*

构建 greenplum

# su - gpadmin
$ cd /tmp
$ unzip ./gpdb-5.7.0.zip
finishing deferred symbolic links:
  gpdb-5.7.0/concourse/tasks/compile_gpdb_open_source.yml -> compile_gpdb_open_source_centos.yml

$ cd /tmp/gpdb-5.7.0

$ ./configure --prefix=/usr/local/greenplum-db --enable-mapreduce --with-perl --with-python --with-libxml --with-gssapi --enable-orca --with-includes=/usr/local/include/ --with-libraries=/usr/local/lib/

如果没有编译gporca优化器,则需要添加 --disable-orca 参数,如下
$ ./configure --prefix=/usr/local/greenplum-db --enable-mapreduce --with-perl --with-python --with-libxml --with-gssapi --disable-orca

相对比较全的选项
--with-gssapi --with-pgport=5432 --with-libedit-preferred --with-perl --with-python --with-openssl
--with-pam --with-krb5 --with-ldap --with-libxml --enable-cassert --enable-debug --enable-testutils --enable-debugbreak --enable-depend


中间有如下报错
./configure: line 11922: #include: command not found

需要多次make时,可以执行autoreconf
# cd /tmp/gpdb-5.7.0
# autoreconf -ivf

$ make -j4
$ make install

$ cd /usr/local/greenplum-db
$ ls -l
total 24
drwxrwxr-x 7 gpadmin gpadmin 4096 Apr 17 13:54 bin
drwxrwxr-x 3 gpadmin gpadmin   24 Apr 17 13:54 doc
drwxrwxr-x 3 gpadmin gpadmin   22 Apr 17 13:54 docs
drwxrwxr-x 2 gpadmin gpadmin   25 Apr 17 13:54 etc
-rw-rw-r-- 1 gpadmin gpadmin  698 Apr 17 13:54 greenplum_path.sh
drwxrwxr-x 4 gpadmin gpadmin 4096 Apr 17 13:54 include
drwxrwxr-x 5 gpadmin gpadmin 4096 Apr 17 13:54 lib
drwxrwxr-x 2 gpadmin gpadmin 4096 Apr 17 13:54 sbin
drwxrwxr-x 4 gpadmin gpadmin   41 Apr 17 13:54 share

设置用户环境变量

# vi /home/gpadmin/.bashrc
source /usr/local/greenplum-db/greenplum_path.sh
# vi /home/gpadmin/.bash_profile
source /usr/local/greenplum-db/greenplum_path.sh

设置完后记得source一下使其立即生效。

# source /home/gpadmin/.bashrc
# source /home/gpadmin/.bash_profile
# echo $PATH

至此 node1上的 greenplum 已经安装完毕。

需要在所有主机安装Greenplum二进制版本
在node1上touch all_host all_segment

# source /usr/local/greenplum-db/greenplum_path.sh
# cd $GPHOME
# vi all_host
node1
node2
node3
# vi all_segment
node2
node3

在node1上建立节点信任,需要输入root密码

# source /usr/local/greenplum-db/greenplum_path.sh
# cd $GPHOME/bin
# ./gpssh-exkeys -f $GPHOME/all_host

在node1上操作批量安装

# source /usr/local/greenplum-db/greenplum_path.sh
# cd $GPHOME/bin
# ./gpseginstall -f $GPHOME/all_segment -u gpadmin -p rootroot

在node1上检查批量安装情况

# source /usr/local/greenplum-db/greenplum_path.sh
# cd $GPHOME/bin
# ./gpssh -f $GPHOME/all_segment -e ls -l $GPHOME

在node1、node2、node3上分别检查数据目录

# ls -l / |grep -i u01 ;ls -l /u01/ ;

时间同步,各个节点要和master的时间保持一致
使用ntp和标准时间、内部时间同步

# yum install ntp
# systemctl start ntpd
# systemctl enable ntpd

或者使用gpssh来相互同步时钟

$ gpssh -f all_host -v date
$ gpssh -f all_host -v ntpd

运行gpcheck 来检查刚配的segment机器的操作系统情况

# gpcheck -f /usr/local/greenplum-db/all_host

硬件性能效验

# gpcheckperf -f /usr/local/greenplum-db/all_host -d /tmp -d /home/gpadmin/ -v -r ds

网络性能效验:

# gpchecknet -f /usr/local/greenplum-db/all_host -d /tmp
# gpchecknet -f /usr/local/greenplum-db/all_host -r N -d /tmp

初始化

gp初始化文件

$ cp /usr/local/greenplum-db/docs/cli_help/gpconfigs/gpinitsystem_config /usr/local/greenplum-db/
$ chmod 775 ./gpinitsystem_config
$ vi /usr/local/greenplum-db/gpinitsystem_config
$ egrep -v "(^$|^#)" ./gpinitsystem_config

ARRAY_NAME="test Greenplum Data Platform"
SEG_PREFIX=gpseg
PORT_BASE=40000
declare -a DATA_DIRECTORY=(/u01/greenplum-data)
MASTER_HOSTNAME=node1
MASTER_DIRECTORY=/u01/greenplum-data
MASTER_PORT=5432
TRUSTED_SHELL=ssh
CHECK_POINT_SEGMENTS=8
ENCODING=UNICODE
DATABASE_NAME=peiybdb
MACHINE_LIST_FILE=/usr/local/greenplum-db/all_segment

简单说明下
ARRAY_NAME:设置阵列名称,默认Greenplum Data Platform。
SEG_PREFIX:设置segment的前缀,默认gpseg。
PORT_BASE:设置segment的起始端口,会从此端口往上增加,默认从40000开始。
DATA_DIRECTORY:设置segment primary的数据存储目录,有几个segment节点就需要设置几个数据存储目录。
MASTER_HOSTNAME:设置master的主机名。
MASTER_DIRECTORY:设置master的存储目录。
MASTER_PORT:设置master的端口,默认5432。
TRUSTED_SHELL:设置节点之间的信任方式,默认SSH。
CHECK_POINT_SEGMENTS:预写日志文件(WAL)数量,默认为8,这意味着为主机上的每个Segment或Master实例分配1088MB的WAL空间。
ENCODING=UNICODE:设置初始字符集,默认UNICODE(UTF-8)。
MACHINE_LIST_FILE:仅包含segment主机地址
注意,其中所有需要的目录都是在创建数据存储区域时做好的。

gp初始化工具

$ cd $GPHOME/bin
$ gpinitsystem -c /usr/local/greenplum-db/gpinitsystem_config -h /usr/local/greenplum-db/all_segment
20180417:17:41:25:005291 gpinitsystem:node1:gpadmin-[INFO]:-Checking configuration parameters, please wait...
20180417:17:41:25:005291 gpinitsystem:node1:gpadmin-[INFO]:-Reading Greenplum configuration file /usr/local/greenplum-db/gpinitsystem_config
20180417:17:41:25:005291 gpinitsystem:node1:gpadmin-[INFO]:-Locale has not been set in /usr/local/greenplum-db/gpinitsystem_config, will set to default value
20180417:17:41:25:005291 gpinitsystem:node1:gpadmin-[INFO]:-Locale set to en_US.utf8
20180417:17:41:25:005291 gpinitsystem:node1:gpadmin-[INFO]:-MASTER_MAX_CONNECT not set, will set to default value 250
20180417:17:41:25:005291 gpinitsystem:node1:gpadmin-[INFO]:-Checking configuration parameters, Completed
20180417:17:41:25:005291 gpinitsystem:node1:gpadmin-[INFO]:-Commencing multi-home checks, please wait...
...
20180417:17:41:27:005291 gpinitsystem:node1:gpadmin-[INFO]:-Configuring build for standard array
20180417:17:41:27:005291 gpinitsystem:node1:gpadmin-[INFO]:-Commencing multi-home checks, Completed
20180417:17:41:27:005291 gpinitsystem:node1:gpadmin-[INFO]:-Building primary segment instance array, please wait...
...
20180417:17:41:29:005291 gpinitsystem:node1:gpadmin-[INFO]:-Checking Master host
20180417:17:41:29:005291 gpinitsystem:node1:gpadmin-[INFO]:-Checking new segment hosts, please wait...
...
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Checking new segment hosts, Completed
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Greenplum Database Creation Parameters
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:---------------------------------------
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Master Configuration
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:---------------------------------------
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Master instance name       = test Greenplum Data Platform
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Master hostname            = node1
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Master port                = 5432
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Master instance dir        = /u01/greenplum-data/gpseg-1
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Master LOCALE              = en_US.utf8
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Greenplum segment prefix   = gpseg
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Master Database            = peiybdb
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Master connections         = 250
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Master buffers             = 128000kB
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Segment connections        = 750
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Segment buffers            = 128000kB
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Checkpoint segments        = 8
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Encoding                   = UNICODE
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Postgres param file        = Off
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Initdb to be used          = /usr/local/greenplum-db/bin/initdb
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-GP_LIBRARY_PATH is         = /usr/local/greenplum-db/lib
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-HEAP_CHECKSUM is           = on
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Ulimit check               = Passed
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Array host connect type    = Single hostname per node
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Master IP address [1]      = ::1
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Master IP address [2]      = 10.0.2.7
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Master IP address [3]      = 192.168.122.1
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Master IP address [4]      = 192.168.56.101
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Master IP address [5]      = fe80::a00:27ff:fe6e:8656
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Master IP address [6]      = fe80::a00:27ff:fef3:ba27
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Standby Master             = Not Configured
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Primary segment #          = 1
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Total Database segments    = 3
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Trusted shell              = ssh
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Number segment hosts       = 3
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Mirroring config           = OFF
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:----------------------------------------
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-Greenplum Primary Segment Configuration
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:----------------------------------------
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-node1   /u01/greenplum-data/gpseg0  40000   2 0
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-node2   /u01/greenplum-data/gpseg1  40000   3 1
20180417:17:41:39:005291 gpinitsystem:node1:gpadmin-[INFO]:-node3   /u01/greenplum-data/gpseg2  40000   4 2

Continue with Greenplum creation Yy|Nn (default=N):
> y
20180417:17:41:43:005291 gpinitsystem:node1:gpadmin-[INFO]:-Building the Master instance database, please wait...
20180417:17:41:46:005291 gpinitsystem:node1:gpadmin-[INFO]:-Starting the Master in admin mode
20180417:17:41:51:005291 gpinitsystem:node1:gpadmin-[INFO]:-Commencing parallel build of primary segment instances
20180417:17:41:51:005291 gpinitsystem:node1:gpadmin-[INFO]:-Spawning parallel processes    batch [1], please wait...
...
20180417:17:41:51:005291 gpinitsystem:node1:gpadmin-[INFO]:-Waiting for parallel processes batch [1], please wait...
...............................
20180417:17:42:23:005291 gpinitsystem:node1:gpadmin-[INFO]:------------------------------------------------
20180417:17:42:23:005291 gpinitsystem:node1:gpadmin-[INFO]:-Parallel process exit status
20180417:17:42:23:005291 gpinitsystem:node1:gpadmin-[INFO]:------------------------------------------------
20180417:17:42:23:005291 gpinitsystem:node1:gpadmin-[INFO]:-Total processes marked as completed           = 3
20180417:17:42:23:005291 gpinitsystem:node1:gpadmin-[INFO]:-Total processes marked as killed              = 0
20180417:17:42:23:005291 gpinitsystem:node1:gpadmin-[INFO]:-Total processes marked as failed              = 0
20180417:17:42:23:005291 gpinitsystem:node1:gpadmin-[INFO]:------------------------------------------------
20180417:17:42:23:005291 gpinitsystem:node1:gpadmin-[INFO]:-Deleting distributed backout files
20180417:17:42:23:005291 gpinitsystem:node1:gpadmin-[INFO]:-Removing back out file
20180417:17:42:23:005291 gpinitsystem:node1:gpadmin-[INFO]:-No errors generated from parallel processes
20180417:17:42:23:005291 gpinitsystem:node1:gpadmin-[INFO]:-Restarting the Greenplum instance in production mode

20180417:17:42:23:009523 gpstop:node1:gpadmin-[INFO]:-Starting gpstop with args: -a -l /home/gpadmin/gpAdminLogs -i -m -d /u01/greenplum-data/gpseg-1
20180417:17:42:23:009523 gpstop:node1:gpadmin-[INFO]:-Gathering information and validating the environment...
20180417:17:42:23:009523 gpstop:node1:gpadmin-[INFO]:-Obtaining Greenplum Master catalog information
20180417:17:42:23:009523 gpstop:node1:gpadmin-[INFO]:-Obtaining Segment details from master...
20180417:17:42:23:009523 gpstop:node1:gpadmin-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 5.0.0 build dev'
20180417:17:42:23:009523 gpstop:node1:gpadmin-[INFO]:-There are 0 connections to the database
20180417:17:42:23:009523 gpstop:node1:gpadmin-[INFO]:-Commencing Master instance shutdown with mode='immediate'
20180417:17:42:23:009523 gpstop:node1:gpadmin-[INFO]:-Master host=node1
20180417:17:42:23:009523 gpstop:node1:gpadmin-[INFO]:-Commencing Master instance shutdown with mode=immediate
20180417:17:42:23:009523 gpstop:node1:gpadmin-[INFO]:-Master segment instance directory=/u01/greenplum-data/gpseg-1
20180417:17:42:24:009523 gpstop:node1:gpadmin-[INFO]:-Attempting forceful termination of any leftover master process
20180417:17:42:24:009523 gpstop:node1:gpadmin-[INFO]:-Terminating processes for segment /u01/greenplum-data/gpseg-1
20180417:17:42:25:009553 gpstart:node1:gpadmin-[INFO]:-Starting gpstart with args: -a -l /home/gpadmin/gpAdminLogs -d /u01/greenplum-data/gpseg-1
20180417:17:42:25:009553 gpstart:node1:gpadmin-[INFO]:-Gathering information and validating the environment...
20180417:17:42:25:009553 gpstart:node1:gpadmin-[INFO]:-Greenplum Binary Version: 'postgres (Greenplum Database) 5.0.0 build dev'
20180417:17:42:25:009553 gpstart:node1:gpadmin-[INFO]:-Greenplum Catalog Version: '301705051'
20180417:17:42:25:009553 gpstart:node1:gpadmin-[INFO]:-Starting Master instance in admin mode
20180417:17:42:26:009553 gpstart:node1:gpadmin-[INFO]:-Obtaining Greenplum Master catalog information
20180417:17:42:26:009553 gpstart:node1:gpadmin-[INFO]:-Obtaining Segment details from master...
20180417:17:42:26:009553 gpstart:node1:gpadmin-[INFO]:-Setting new master era
20180417:17:42:26:009553 gpstart:node1:gpadmin-[INFO]:-Master Started...
20180417:17:42:26:009553 gpstart:node1:gpadmin-[INFO]:-Shutting down master
20180417:17:42:27:009553 gpstart:node1:gpadmin-[INFO]:-Commencing parallel segment instance startup, please wait...
........ 
20180417:17:42:35:009553 gpstart:node1:gpadmin-[INFO]:-Process results...
20180417:17:42:35:009553 gpstart:node1:gpadmin-[INFO]:-----------------------------------------------------
20180417:17:42:35:009553 gpstart:node1:gpadmin-[INFO]:-   Successful segment starts                                            = 3
20180417:17:42:35:009553 gpstart:node1:gpadmin-[INFO]:-   Failed segment starts                                                = 0
20180417:17:42:35:009553 gpstart:node1:gpadmin-[INFO]:-   Skipped segment starts (segments are marked down in configuration)   = 0
20180417:17:42:35:009553 gpstart:node1:gpadmin-[INFO]:-----------------------------------------------------
20180417:17:42:35:009553 gpstart:node1:gpadmin-[INFO]:-Successfully started 3 of 3 segment instances 
20180417:17:42:35:009553 gpstart:node1:gpadmin-[INFO]:-----------------------------------------------------
20180417:17:42:35:009553 gpstart:node1:gpadmin-[INFO]:-Starting Master instance node1 directory /u01/greenplum-data/gpseg-1 
20180417:17:42:36:009553 gpstart:node1:gpadmin-[INFO]:-Command pg_ctl reports Master node1 instance active
20180417:17:42:36:009553 gpstart:node1:gpadmin-[INFO]:-No standby master configured.  skipping...
20180417:17:42:36:009553 gpstart:node1:gpadmin-[INFO]:-Database successfully started
20180417:17:42:36:005291 gpinitsystem:node1:gpadmin-[INFO]:-Completed restart of Greenplum instance in production mode
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-Scanning utility log file for any warning messages
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-Log file scan check passed
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-Greenplum Database instance successfully created
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-------------------------------------------------------
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-To complete the environment configuration, please 
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-update gpadmin .bashrc file with the following
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-1. Ensure that the greenplum_path.sh file is sourced
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-2. Add "export MASTER_DATA_DIRECTORY=/u01/greenplum-data/gpseg-1"
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-   to access the Greenplum scripts for this instance:
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-   or, use -d /u01/greenplum-data/gpseg-1 option for the Greenplum scripts
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-   Example gpstate -d /u01/greenplum-data/gpseg-1
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-Script log file = /home/gpadmin/gpAdminLogs/gpinitsystem_20180417.log
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-To remove instance, run gpdeletesystem utility
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-To initialize a Standby Master Segment for this Greenplum instance
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-Review options for gpinitstandby
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-------------------------------------------------------
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-The Master /u01/greenplum-data/gpseg-1/pg_hba.conf post gpinitsystem
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-has been configured to allow all hosts within this new
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-array to intercommunicate. Any hosts external to this
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-new array must be explicitly added to this file
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-Refer to the Greenplum Admin support guide which is
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-located in the /usr/local/greenplum-db/docs directory
20180417:17:42:42:005291 gpinitsystem:node1:gpadmin-[INFO]:-------------------------------------------------------

node1 上增加 greenplum master的数据变量,最终的修改如下:

# vi /home/gpadmin/.bashrc
source /usr/local/greenplum-db/greenplum_path.sh
export MASTER_DATA_DIRECTORY=/u01/greenplum-data/gpseg-1

# vi /home/gpadmin/.bash_profile
source /usr/local/greenplum-db/greenplum_path.sh
export MASTER_DATA_DIRECTORY=/u01/greenplum-data/gpseg-1

node2、node3 作为segment,最终的修改如下:

# vi /home/gpadmin/.bashrc
source /usr/local/greenplum-db/greenplum_path.sh

# vi /home/gpadmin/.bash_profile
source /usr/local/greenplum-db/greenplum_path.sh

设置访问权限

$ cd /u01/greenplum-data/gpseg-1
$ ls -l
total 60
drwx------ 6 gpadmin gpadmin    54 Apr 17 17:42 base
drwx------ 2 gpadmin gpadmin  4096 Apr 17 17:42 global
-r-------- 1 gpadmin gpadmin   109 Apr 17 17:41 gp_dbid
drwxrwxr-x 5 gpadmin gpadmin    42 Apr 17 17:41 gpperfmon
-rw-rw-r-- 1 gpadmin gpadmin   860 Apr 17 17:41 gpssh.conf
drwx------ 2 gpadmin gpadmin     6 Apr 17 17:41 pg_changetracking
drwx------ 2 gpadmin gpadmin    18 Apr 17 17:41 pg_clog
drwx------ 2 gpadmin gpadmin    18 Apr 17 17:41 pg_distributedlog
drwx------ 2 gpadmin gpadmin     6 Apr 17 17:41 pg_distributedxidmap
-rw-rw-r-- 1 gpadmin gpadmin  4307 Apr 17 17:41 pg_hba.conf
-rw------- 1 gpadmin gpadmin  1636 Apr 17 17:41 pg_ident.conf
drwx------ 2 gpadmin gpadmin   141 Apr 17 17:42 pg_log
drwx------ 4 gpadmin gpadmin    36 Apr 17 17:41 pg_multixact
drwx------ 2 gpadmin gpadmin    25 Apr 17 17:42 pg_stat_tmp
drwx------ 2 gpadmin gpadmin    18 Apr 17 17:41 pg_subtrans
drwx------ 2 gpadmin gpadmin     6 Apr 17 17:41 pg_tblspc
drwx------ 2 gpadmin gpadmin     6 Apr 17 17:41 pg_twophase
drwx------ 2 gpadmin gpadmin     6 Apr 17 17:42 pg_utilitymodedtmredo
-rw------- 1 gpadmin gpadmin     4 Apr 17 17:41 PG_VERSION
drwx------ 3 gpadmin gpadmin    60 Apr 17 17:42 pg_xlog
-rw------- 1 gpadmin gpadmin 21250 Apr 17 17:41 postgresql.conf
-rw------- 1 gpadmin gpadmin   205 Apr 17 17:42 postmaster.opts
-rw------- 1 gpadmin gpadmin    53 Apr 17 17:42 postmaster.pid

熟悉吧,和 postgresql 的目录基本是一致的。

$ vi pg_hba.conf
$ psql -d postgres -c "select pg_reload_conf();"
$ psql -d postgres
psql (8.3.23)
Type "help" for help.

postgres=#

补充一下

$ gpinitsystem -c /usr/local/greenplum-db/gpinitsystem_config -h /usr/local/greenplum-db/all_host

-h 应该是 /usr/local/greenplum-db/all_segment,当时拷贝错了,事后才发现。有空再重做一遍,增加下熟练度。
再次提醒自己,要细心,要细心,要细心

阅读更多
想对作者说点什么? 我来说一句

没有更多推荐了,返回首页

关闭
关闭
关闭