Neo4j社区版高可用搭建

Neo4j社区版高可用搭建

1 架构(drbd+keepalived+neo4j)

 

uploading.4e448015.gif转存失败重新上传取消

2 推荐配置

 最低配置推荐配置
CPUIntel Core i3 8核Intel Core i7 16核
Memory2GB16—32GB or more
Disk10GB SATASSD w/ SATA Express, or NVMe
FilesystemEXT4 (or similar)EXT4/ZFS
JavaOpenJDK 8Oracle Java 8

3 环境说明

角色hostnameip磁盘vip
node101192.168.245.201/dev/sdb192.168.245.203
node102192.168.245.202/dev/sdb192.168.245.203

4 安装准备

4.1 关闭防火墙和selinux

node1 & node2
# 防火墙
# systemctl stop firewalld
# systemctl disable firewalld
​
# selinux
# setenforce 0
vim /etc/sysconfig/selinux
SELINUX=disabled

4.2 配置/etc/hosts

node1 & node2
[root@node1 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.245.201  node101
192.168.245.202  node102

4.3 SSH互信

node1:
# ssh-keygen
# ssh-copy-id -i /root/.ssh/id_rsa.pub  192.168.245.202
node2:
# ssh-keygen
# ssh-copy-id -i /root/.ssh/id_rsa.pub  192.168.245.201

4.4 时钟同步

node1 & node2
crontab -e
*/5 * * * * ntpdate cn.pool.ntp.org ###添加任务
systemctl restart crond

5 安装DRBD

5.1 磁盘检查

node1 & node2
[root@node1 ~]# fdisk -l|grep /dev/sdb
Disk /dev/sdb: 21.5 GB, 21474836480 bytes, 41943040 sectors
[root@node2 ~]# fdisk -l|grep /dev/sdb
Disk /dev/sdb: 21.5 GB, 21474836480 bytes, 41943040 sectors

5.2 安装

node1 & node2
# 方式1 yum方式
rpm -ivh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
yum install -y drbd84-utils kmod-drbd84
​
# 方式2 rpm方式
# 下载rmp到本地,执行下述命令
rpm -ivh elrepo-release-7.0-4.el7.elrepo.noarch.rpm
rpm -ivh drbd90-utils-9.10.0-1.el7.elrepo.x86_64.rpm
rpm -ivh kmod-drbd90-9.0.16-1.el7_6.elrepo.x86_64.rpm

5.3 配置文件/etc/drbd.d/global_common.conf

node1 & node2
[root@node101 neo4j]# cat /etc/drbd.d/global_common.conf 
# DRBD is the result of over a decade of development by LINBIT.
# In case you need professional services for DRBD or have
# feature requests visit http://www.linbit.com
​
global {
    usage-count no;
​
    # Decide what kind of udev symlinks you want for "implicit" volumes
    # (those without explicit volume <vnr> {} block, implied vnr=0):
    # /dev/drbd/by-resource/<resource>/<vnr>   (explicit volumes)
    # /dev/drbd/by-resource/<resource>         (default for implict)
    # udev-always-use-vnr; # treat implicit the same as explicit volumes
​
    # minor-count dialog-refresh disable-ip-verification
    # cmd-timeout-short 5; cmd-timeout-medium 121; cmd-timeout-long 600;
}
​
common {
    handlers {
        # These are EXAMPLE handlers only.
        # They may have severe implications,
        # like hard resetting the node under certain circumstances.
        # Be careful when choosing your poison.
​
        # pri-on-incon-degr "/usr/lib/drbd/notify-pri-on-incon-degr.sh; /usr/lib/drbd/notify-emergency
-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";      # pri-lost-after-sb "/usr/lib/drbd/notify-pri-lost-after-sb.sh; /usr/lib/drbd/notify-emergency
-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";      # local-io-error "/usr/lib/drbd/notify-io-error.sh; /usr/lib/drbd/notify-emergency-shutdown.sh
; echo o > /proc/sysrq-trigger ; halt -f";      # fence-peer "/usr/lib/drbd/crm-fence-peer.sh";
        # split-brain "/usr/lib/drbd/notify-split-brain.sh root";
        # out-of-sync "/usr/lib/drbd/notify-out-of-sync.sh root";
        # before-resync-target "/usr/lib/drbd/snapshot-resync-target-lvm.sh -p 15 -- -c 16k";
        # after-resync-target /usr/lib/drbd/unsnapshot-resync-target-lvm.sh;
        # quorum-lost "/usr/lib/drbd/notify-quorum-lost.sh root";
    }
​
    startup {
        # wfc-timeout degr-wfc-timeout outdated-wfc-timeout wait-after-sb
    }
​
    options {
        # cpu-mask on-no-data-accessible
​
        # RECOMMENDED for three or more storage nodes with DRBD 9:
        # quorum majority;
        # on-no-quorum suspend-io | io-error;
    }
​
    disk {
        on-io-error detach;
        # size on-io-error fencing disk-barrier disk-flushes
        # disk-drain md-flushes resync-rate resync-after al-extents
                # c-plan-ahead c-delay-target c-fill-target c-max-rate
                # c-min-rate disk-timeout
    }
​
    net {
        protocol C;
        # protocol timeout max-epoch-size max-buffers
        # connect-int ping-int sndbuf-size rcvbuf-size ko-count
        # allow-two-primaries cram-hmac-alg shared-secret after-sb-0pri
        # after-sb-1pri after-sb-2pri always-asbp rr-conflict
        # ping-timeout data-integrity-alg tcp-cork on-congestion
        # congestion-fill congestion-extents csums-alg verify-alg
        # use-rle
    }
}

5.4 资源文件

node1 & node2
[root@node101 ~]# cat /etc/drbd.d/neo4j.res 
resource neo4j {
on node101 {
    device  /dev/drbd0;
    disk    /dev/sdb1;
    address 192.168.245.201:7789;
    meta-disk   internal;
    }
on node102 {
    device  /dev/drbd0;
    disk    /dev/sdb1;
    address 192.168.245.202:7789;
    meta-disk   internal;
    }
}

5.5 启用DRBD

node1 & node2
[root@node101 ~]# modprobe drbd
[root@node101 ~]# 
[root@node101 ~]# drbdadm create-md neo4j
initializing activity log
initializing bitmap (512 KB) to all zero
Writing meta data...
New drbd meta data block successfully created.
[root@node101 ~]# 
[root@node101 ~]# drbdadm up neo4j #(如果报错 fdisk /dev/sdb)
[root@node101 ~]# 
[root@node101 ~]# drbdadm -- --force primary neo4j #(仅主库执行)
[root@node101 ~]# 
[root@node101 ~]# drbdadm status neo4j
neo4j role:Primary
  disk:UpToDate
  node102 role:Secondary
    replication:SyncSource peer-disk:Inconsistent done:0.19
[root@node101 ~]# 

5.6 创建文件系统

node1 & node2
mkdir -p /data/drbd
mkfs.ext4 /dev/drbd0
mount /dev/drbd0 /data/drbd
echo 'test' > /data/drbd/1.txt

5.7 主备切换

主:
cd ~
umount /data/drbd
drbdadm secondary neo4j
drbdadm status neo4j
从:
drbdadm primary neo4j
mount /dev/drbd0 /data/drbd
drbdadm status neo4j

6 搭建keepalived

6.1 安装

node1 & node2
cd /usr/local/src
下载keepalived-2.0.18.tar.gz到此目录
tar –xzvf keepalived-2.0.18.tar.gz
cd keepalived-2.0.18
 yum install -y gcc openssl-devel popt-devel  #(先挂载本地yum源)
./configure --prefix=/usr/local/keepalived
make && make install

6.2 配置

node1 cat /etc/keepalived/keepalived.conf

! Configuration File for keepalived
 
global_defs {
   router_id LVS_DEVEL
   #vrrp_strict
}

vrrp_script check_neo4j {
    script "/data/scripts/check_neo4j.sh"
    interval 1
    fall 2
}
 
vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 52
    priority 100
    advert_int 1
    # 如果两节点的上联交换机禁用了组播,则采用vrrp单播通告的方式
    # 本机ip
    unicast_src_ip 192.168.245.201
    unicast_peer {
      # 其他机器ip
        192.168.245.202
    }
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.245.203
    }
    notify_stop /data/scripts/notify_stop.sh
    notify_master /data/scripts/notify_master.sh
    track_script {
        check_neo4j
    }
}

node2 cat /etc/keepalived/keepalived.conf

! Configuration File for keepalived
 
global_defs {
   router_id LVS_DEVEL
   #vrrp_strict
}
 
vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 52
    priority 90
    advert_int 1
    # 如果两节点的上联交换机禁用了组播,则采用vrrp单播通告的方式
    # 本机ip
    unicast_src_ip 192.168.245.202
    unicast_peer {
      #   其他机器ip
        192.168.245.201
    }
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.245.203
    }
    notify_master /data/scripts/notify_master.sh 
    notify_backup /data/scripts/notify_backup.sh
}

node1 & node2

mkdir -p /data/scripts   /data/logs  /data/logs/keepalived/
主:
[root@node1 scripts]# ll
total 12
-rw-r--r-- 1 root root 383 Feb 23 14:57 check_neo4j.sh
-rw-r--r-- 1 root root 496 Feb 23 14:59 notify_master.sh
-rw-r--r-- 1 root root 443 Feb 23 14:58 notify_stop.sh
从:
[root@node2 scripts]# ll
total 8
-rw-r--r-- 1 root root 467 Feb 23 15:03 notify_backup.sh
-rw-r--r-- 1 root root 495 Feb 23 15:04 notify_master.sh

6.3 启动

node1 $ node2
service keepalived start

6.4 观察vip生成

[root@node101 scripts]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 00:0c:29:a7:12:7f brd ff:ff:ff:ff:ff:ff
    inet 192.168.245.201/24 brd 192.168.245.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet 192.168.245.203/32 scope global ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::12a8:7432:6f52:e67b/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever
3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default qlen 1000
    link/ether 52:54:00:c1:fa:a5 brd ff:ff:ff:ff:ff:ff
    inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
       valid_lft forever preferred_lft forever
4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc fq_codel master virbr0 state DOWN group default qlen 1000
    link/ether 52:54:00:c1:fa:a5 brd ff:ff:ff:ff:ff:ff
[root@node101 scripts]# 

7 搭建Neo4j

7.1 安装到/data/drbd盘

node1
cd /drbd/neo4j
tar –xzvf neo4j-community-3.5.14-unix.tar.gz

7.2 启动图数据库

node1
vim conf/neo4j.conf
.bin/neo4j start

7.3 测试集群

http://192.168.245.201:7474/browser/
http://192.168.245.203:7474/browser/
都可以连接
  • 2
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 3
    评论
社区Neo4j是一款流行的图数据库,可支持高可用性。实现高可用的一个方法是使用Neo4j的集群模式。在这种模式下,多个Neo4j实例运行在不同的机器上,共同管理一个数据集。这使得一个节点出现故障时,集群中的其他节点可以接管它的职责,并保持数据库的正常运行。 以下是实现Neo4j高可用的步骤: 1.配置集群模式:通过编辑neo4j.conf文件,指定集群需要使用的协议和端口。必须指定唯一标识符以识别集群中的每个节点。可以指定主节点,其他节点将涉及从节点和读副本。 2.启动实例:使用相同的配置文件,启动所有实例,以便它们可以相互通信,并成为一个集群。在启动过程中,节点会自动加入集群并生成副本。 3.监控进程:使用Neo4j浏览器或类似工具,监控集群中的进程,并查看各个节点的运行状况。可以查看每个节点的日志以及错误信息。可以使用Neo4j浏览器来管理和可视化数据库。 4.故障转移:如果某个节点出现故障,集群中的其他节点将接管其职责,并保持数据库的正常运行。因为数据库是分布式的,所以可以访问所有节点,而不会丢失数据或损失性能。 因此,通过使用集群模式,社区Neo4j可以实现高可用性,同时保持数据的完整性和一致性,并保持高性能。只要节点之间可以相互通信,Neo4j集群就可以在节点出现故障时保持运行,实现高可用性。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值