Ignite集群搭建及整合SpringData实现增删改查

一、Ignite集群搭建

      1、准备三个虚拟机。192.168.91.101、192.168.91.102、192.168.91.103

      2、官网下载 Download - Apache Ignite

        

      3、放入 /usr/local/software-common/ignite目录(新建此目录),执行unzip -o -d ./ apache-ignite-2.14.0-bin.zip 解压至当前目录

      4、新建default.xml 以及 startignite.sh文件

         default.xml内容

<?xml version="1.0" encoding="UTF-8"?>
<beans xmlns="http://www.springframework.org/schema/beans"
    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
    xmlns:util="http://www.springframework.org/schema/util"
    xsi:schemaLocation="
    http://www.springframework.org/schema/beans
    http://www.springframework.org/schema/beans/spring-beans.xsd
    http://www.springframework.org/schema/util
    http://www.springframework.org/schema/util/spring-util.xsd">
<bean id="grid.cfg" class="org.apache.ignite.configuration.IgniteConfiguration">
 
    <!-- 对等类加载是否启用,默认为true不开启很容易报错 -->
    <property name="peerClassLoadingEnabled" value="true"/>
 
    <!-- 系统线程池大小 (max(8, total number of cores)) -->
    <property name="systemThreadPoolSize" value="24"/>
 
    <!-- 公共线程池大小 (max(8, total number of cores)) -->
    <property name="publicThreadPoolSize" value="8"/>
 
    <!-- 查询线程池大小 (max(8, total number of cores)) -->
    <property name="queryThreadPoolSize" value="8"/>
 
    <!-- 服务线程池大小 (max(8, total number of cores)) -->
    <property name="serviceThreadPoolSize" value="8"/>
 
    <!-- 源线程池大小 (max(8, total number of cores)) -->
    <property name="stripedPoolSize" value="8"/>
 
    <!-- 数据流线程池大小(max(8, total number of cores) -->
    <property name="dataStreamerThreadPoolSize" value="8"/>
 
    <!-- 平衡线程池大小-->
    <property name="rebalanceThreadPoolSize" value="8"/>
 
    <!-- 用户验证是否开启 默认为false 开启后默认用户名密码都是ignite -->
<!--   
       <property name="authenticationEnabled" value="true"/>
-->
 
    <!-- 对象序列化过程 -->
    <property name="marshaller">
        <bean class="org.apache.ignite.internal.binary.BinaryMarshaller" />
    </property>
 
    <!-- 数据存储配置 -->
    <property name="dataStorageConfiguration">
        <bean class="org.apache.ignite.configuration.DataStorageConfiguration">
 
            <!--并发性水平   可由自己实际情况而定 -->
            <property name="concurrencyLevel" value="200"/>
 
            <!-- 设置内存页大小 (getconf PAGESIZE) -->
            <property name="pageSize" value="#{4 * 1024}"/>
 
            <!-- Size of the WAL (Write Ahead Log) segment -->
            <property name="walSegmentSize" value="#{1024 * 1024 * 1024}"/>
 
            <!--In our experience LOG_ONLY is a good compromise between durability and performance.-->
            <property name="walMode" value="LOG_ONLY"/>
            <!-- Enable write throttling. -->
            <property name="writeThrottlingEnabled" value="true"/>
 
            <!-- 检查点频率-->
            <!--Checkpointing frequency which is a minimal interval when the dirty pages will be written to the Persistent Store.-->
            <property name="checkpointFrequency" value="180000"/>
 
            <!--数据分布配置 默认是都存放到内存中,此处进行持久化 -->
            <property name="defaultDataRegionConfiguration">
 
                <bean class="org.apache.ignite.configuration.DataRegionConfiguration">
                    <!--是否持久化到磁盘 true为持久化 -->
                    <property name="persistenceEnabled" value="true"/>
 
                    <property name="name" value="vehicle_Region"/>
 
                    <!-- 2G initial size. 初始化内存-->
                    <property name="initialSize" value="#{1L * 1024 * 1024 * 1024}" />
 
                    <!-- 4G maximum size. 最大内存大小-->
                    <property name="maxSize" value="#{1L * 1024 * 1024 * 1024}" />
 
                    <!-- 4G 内存页缓存大小-->
                    <property name="checkpointPageBufferSize" value="#{1L *1024* 1024 * 1024L}" />
                </bean>
 
            </property>
 
            <!-- Defining several data regions for different memory regions 持久化数据存储目录 -->
            <property name="storagePath" value="/usr/local/software-common/ignite/apache-ignite-2.14.0-bin/persistence/storage" />
            <property name="walArchivePath" value="/usr/local/software-common/ignite/apache-ignite-2.14.0-bin/persistence/walArchive" />
            <property name="walPath" value="/usr/local/software-common/ignite/apache-ignite-2.14.0-bin/persistence/wal" />
 
        </bean>
    </property>
 
    <property name="metricsLogFrequency" value="0"/>
    <!--失败检测 超时时长-->
    <property name="failureDetectionTimeout" value="#{60 * 60 * 1000}"/>
    <!-- 服务worker 之间交互 timeout 时间,默认 10s -->
    <property name="systemWorkerBlockedTimeout" value="#{60 * 60 * 1000}"/>
    <!-- 服务出现故障自动重启 -->
    <property name="failureHandler">
        <bean class="org.apache.ignite.failure.RestartProcessFailureHandler"/>
    </property>
 
    <property name="cacheConfiguration">
        <bean class="org.apache.ignite.configuration.CacheConfiguration">
            <!-- Set a cache name. -->
            <property name="name" value="memdb2"/>
 
            <!-- Set asynchronous rebalancing. -->
            <property name="rebalanceMode" value="ASYNC"/>
 
            <!-- Set cache mode. 分区模式,副本为 2 -->
            <property name="cacheMode" value="PARTITIONED"/>
            <property name="backups" value="2"/>
            <!-- 副本同步模式: -->
            <!--  PRIMARY_SYNC (默认模式,primary 写成功即可算成功,从backup节点读数据,有可能读到的任然是旧数据)  -->
            <!--  FULL_SYNC  (写cache的操作在primary节点和backup节点都成功写入后返回, 保证了写入成功后节点之间的数据都一样)  -->
            <!--  FULL_ASYNC (写cache的操作不用等primary节点和backup节点成功写入即可返回, 读primary节点的数据都有可能读到旧数据)  -->
            <property name="writeSynchronizationMode" value="PRIMARY_SYNC"/>
 
            <!-- 分区丢失处理: -->
            <!--  IGNORE (默认模式,即使出现了partition loss的情况,Ignite会自动忽略并且会清空和partion loss相关的状态不会触发EVT_CACHE_REBALANCE_PART_DATA_LOST 事件)  -->
            <!--  READ_WRITE_ALL  (Ignite允许所有的读写操作,就好像partition loss没发生过)  -->
            <!--  READ_WRITE_SAFE (允许对没有丢失的partition的读写操作,但是对已经丢失的partition的读写操作会失败并抛异常)  -->
            <!--  READ_ONLY_ALL (允许对丢失的和正常的partition的读操作,但是写操作会失败并抛异常)  -->
            <!--  READ_ONLY_SAFE (所有的写操作和对丢失partition的读操作都会失败并抛异常。允许对正常的partition的读操作)  -->
            <property name="partitionLossPolicy" value="READ_WRITE_ALL"/>
 
        <!-- enable disk page compression for this cache -->
<!--
            <property name="diskPageCompression" value="SNAPPY"/>
-->
            <!-- optionally set the compression level -->
            <property name="diskPageCompressionLevel" value="10"/>
        </bean>
    </property>
 
    <!-- Set batch size. -->
    <property name="rebalanceBatchSize" value="#{1 * 1024 * 1024 * 1024}"/>
 
    <!-- Set throttle interval. -->
    <property name="rebalanceThrottle" value="100"/>
 
    <!--
               Explicitly configure TCP discovery SPI to provide list of initial nodes.
      Ignite自己本身有发现机制,只需要配置静态IP即可相互发现;单机只需要配置自己即可
    -->
    <property name="discoverySpi">
        <bean class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi">
            <property name="ipFinder">
                <bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder">
                    <property name="addresses">
                    <!--此处放置全部节点IP 如下方-->
                        <list>
                            <value>192.168.91.101:47500..47509</value>
                            <value>192.168.91.102:47500..47509</value>
                            <value>192.168.91.103:47500..47509</value>
                        </list>
                    </property>
                </bean>
            </property>
        </bean>
    </property>
 
</bean>
</beans>

         startignite.sh内容

#! /bin/bash
nohup /usr/local/software-common/ignite/apache-ignite-2.14.0-bin/bin/ignite.sh /usr/local/software-common/ignite/default.xml > /usr/local/software-common/ignite/out.log 2>&1 &

       5、配置IGNITE_HOME环境变量:vim /etc/profile 在最后添加下列内容,esc :wq 保存,source /etc/profile 使配置生效

       6、/etc/systemd/system目录下新建ignite.service文件,授权文件读写权限,内容如下:

[Unit]
Description=start test-demo.jar
Documentation=
After=network.target
Wants=
Requires=

[Service]
ExecStart=/usr/local/software-common/ignite/startignite.sh
ExecStop=
ExecReload=/usr/local/software-common/ignite/startignite.sh
Type=forking

[Install]
WantedBy=multi-user.target

       7、其余机器同样上述操作。配置准备完成后,各机器依次执行systemctl start ignite.service启动ignite。

        三台机器的ignite会自动发现对方(要关闭防火墙!systemctl stop firewalld --> systemctl disable firewalld)

        8、进入/usr/local/software-common/ignite/apache-ignite-2.14.0-bin/bin目录下,执行./control.sh --set-state ACTIVE激活集群即可。

        8、使用DBeaver测试连接,连接成功!集群搭建完成!

        9、最后每台机器执行systemctl enable ignite.service 实现开机自启

二、SpringBoot + SpringData整合Ignite实现增删改查

         pom文件依赖对应版本信息如下:

<dependency>
    <groupId>org.springframework.boot</groupId>
    <artifactId>spring-boot-starter-web</artifactId>
    <version>2.3.0.RELEASE</version>
</dependency>
<dependency>
    <groupId>org.springframework.boot</groupId>
    <artifactId>spring-boot-starter-test</artifactId>
    <version>2.3.0.RELEASE</version>
    <scope>test</scope>
</dependency>

<dependency>
    <groupId>org.apache.ignite</groupId>
    <artifactId>ignite-spring-data-2.2-ext</artifactId>
    <version>1.0.0</version>
</dependency>
<dependency>
    <groupId>org.apache.ignite</groupId>
    <artifactId>ignite-core</artifactId>
    <version>2.14.0</version>
</dependency>
<dependency>
    <groupId>org.apache.ignite</groupId>
    <artifactId>ignite-indexing</artifactId>
    <version>2.14.0</version>
</dependency>
<dependency>
    <groupId>org.apache.ignite</groupId>
    <artifactId>ignite-spring</artifactId>
    <version>2.14.0</version>
</dependency>
<dependency>
    <groupId>org.springframework.data</groupId>
    <artifactId>spring-data-commons</artifactId>
    <version>2.3.0.RELEASE</version>
</dependency>

<dependency>
    <groupId>com.h2database</groupId>
    <artifactId>h2</artifactId>
    <version>1.4.197</version>
</dependency>

<dependency>
    <groupId>org.projectlombok</groupId>
    <artifactId>lombok</artifactId>
    <version>1.18.12</version>
</dependency>

         

        springboot启动类

@SpringBootApplication
@EnableIgniteRepositories
public class IgniteApplication {
    public static void main(String[] args) {
        SpringApplication.run(IgniteApplication.class, args);
    }
}

        ignite配置类 

@Configuration
public class SpringAppCfg {

    @Bean
    public Ignite igniteInstance() {
        IgniteConfiguration cfg = new IgniteConfiguration();
        cfg.setClientMode(true);

        // Classes of custom Java logic will be transferred over the wire from this app.
        cfg.setPeerClassLoadingEnabled(true);

        TcpDiscoveryVmIpFinder tcpDiscoveryVmIpFinder = new TcpDiscoveryVmIpFinder();
        tcpDiscoveryVmIpFinder.setAddresses(Arrays.asList("192.168.91.101:47500..47509", "192.168.91.102:47500..47509", "192.168.91.103:47500..47509"));

        TcpDiscoverySpi tcpDiscoverySpi = new TcpDiscoverySpi();
        tcpDiscoverySpi.setIpFinder(tcpDiscoveryVmIpFinder);
        cfg.setDiscoverySpi(tcpDiscoverySpi);

        Ignite ignite = Ignition.start(cfg);
        return ignite;
    }
}

            person类

@Data
@NoArgsConstructor
@AllArgsConstructor
public class Person {

    @QuerySqlField(index = true)
    private Integer id;

    @QuerySqlField(index = true)
    private String firstName;

    @QuerySqlField(index = true)
    private String phone;

}

         dao层

@RepositoryConfig(cacheName = "person")
public interface PersonRepository extends IgniteRepository<Person, Integer> {
    List<Cache.Entry<Integer, Person>> findByFirstName(String name);

    @Query("SELECT id FROM Person WHERE Id > ?")
    List<Integer> selectId(Integer Id, Pageable pageable);

    @Query("select * from person")
    List<Cache.Entry<Integer, Person>> selectAll(Pageable pageable);

    @Query("SELECT max(id) from person")
    Integer getMaxId();

    @Query(value = "select * from person where id > :Id and firstName = :firstName")
    List<Cache.Entry<Integer, Person>> findByManyParams(@Param("Id") Integer Id, @Param("firstName") String firstName);

    @Query("update person set firstName = :firstName where Id = :Id")
    void updateById(@Param("Id") Integer Id, @Param("firstName") String firstName);

    void deleteById(Integer Id);

}

        测试类

@SpringBootTest
@RunWith(SpringRunner.class)
public class IgniteTest {

    @Resource
    private PersonRepository personRepository;

    @Test
    public void save() {
//        personRepository.save(3, new Person(3, "王五", "11111111111"));
        personRepository.save(4, new Person(null, "赵六", "11111111111"));
    }

    @Test
    public void saveBatch() {
        Map<Integer, Person> map = new HashMap<>();
        for (int i = 0; i < 1000; i++) {
            map.put(i + personRepository.getMaxId(), new Person(null, "testname" + i, 1111111 + i + "1111"));
        }
        personRepository.save(map);
        System.out.println("数据插入成功!");
    }

    @Test
    public void queryByName() {
        List<Cache.Entry<Integer, Person>> entries = personRepository.findByFirstName("王五");
        List<Person> personList = new ArrayList<>();
        for (Cache.Entry<Integer, Person> entry : entries) {
            Integer key = entry.getKey();
            Person value = entry.getValue();
            value.setId(key);
            personList.add(value);
        }
        System.out.println(personList);
    }

    @Test
    public void queryMaxId() {
        Integer maxId = personRepository.getMaxId();
        System.out.println("maxId = " + maxId);
    }

    @Test
    public void queryWithPage() {
        PageRequest page = PageRequest.of(0, 10, Sort.by(Sort.Direction.DESC, "id"));
        List<Integer> integers = personRepository.selectId(4, page);
        System.out.println("integers = " + integers);

        List<Cache.Entry<Integer, Person>> entries = personRepository.selectAll(page);
        List<Person> list = new ArrayList<>();
        for (Cache.Entry<Integer, Person> entry : entries) {
            Integer key = entry.getKey();
            Person value = entry.getValue();
            value.setId(key);
            list.add(value);
        }
        System.out.println(list);
    }

    @Test
    public void queryByManyParams() {
        List<Cache.Entry<Integer, Person>> entries = personRepository.findByManyParams(4, "testname998");
        Cache.Entry<Integer, Person> integerPersonEntry = entries.get(0);
        Person person = integerPersonEntry.getValue();
        person.setId(integerPersonEntry.getKey());
        System.out.println(person);
    }

    @Test
    public void updateById() {
        personRepository.updateById(1, "张三三");
        System.out.println("数据更新成功!");
    }

    @Test
    public void deleteById() {
        personRepository.deleteById(1004);
        System.out.println("数据删除成功!");
    }
}

        测试前置条件:使用DBeaver创建表

CREATE TABLE PUBLIC.person (
   id INTEGER,
   firstName VARCHAR,
   phone VARCHAR,
   PRIMARY KEY (id)
)WITH "template=REPLICATED,CACHE_NAME=person,VALUE_TYPE=com.**.**.Person";
#VALUE_TYPE 写Person类全路径名

INSERT INTO public.person values(1, '张三', '11111111111');
INSERT INTO public.person values(2, '李四', '11111111111');

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值