Flink下沉数据到Redis的两种方式

目录

方式一

方式二​​​​​​​

这里使用Flink的DataStream API,数据源则是通过消费Kafka的主题。因此,maven工程中的依赖必须包括:

<dependency>
    <groupId>org.apache.flink</groupId>
    <artifactId>flink-streaming-java_2.11</artifactId>
    <version>${flink.version}</version>
</dependency>
<dependency>
    <groupId>org.apache.flink</groupId>
    <artifactId>flink-connector-kafka_2.11</artifactId>
    <version>${flink.version}</version>
</dependency>

方式一

使用包:

<dependency>
    <groupId>org.apache.bahir</groupId>
    <artifactId>flink-connector-redis_2.11</artifactId>
    <version>${flink-redis.version}</version>
</dependency>

代码如下:

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.redis.RedisSink;
import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisPoolConfig;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisCommand;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisCommandDescription;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisMapper;

import java.util.Properties;


public class Kafka2RedisDemo {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 消费kafka
        String topic = "test";
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "host1:9092,host2:9092,host3:9092");
        props.setProperty("group.id", "test");
        props.setProperty("auto.offset.reset", "latest");
        props.setProperty("enable.auto.commit", "true");
        props.setProperty("auto.commit.interval.ms", "3000");
        FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<String>(topic, new SimpleStringSchema(), props);
        DataStreamSource<String> stream = env.addSource(consumer);
        // 下沉到redis
        FlinkJedisPoolConfig config = new FlinkJedisPoolConfig.Builder()
                .setHost("localhost")
                .setPort(6379)
                .build();
        stream.addSink(new RedisSink<>(config, new RedisMapper<String>() {
            @Override
            public RedisCommandDescription getCommandDescription() {
                return new RedisCommandDescription(RedisCommand.SET, "testKey");
            }

            private String getMsg(String s, int index) {
                String[] msgArr = s.split(",");
                if (msgArr.length < 2) return "";
                return msgArr[index];
            }

            @Override
            public String getKeyFromData(String s) {
                return getMsg(s, 0);
            }

            @Override
            public String getValueFromData(String s) {
                return getMsg(s, 1);
            }
        }));
        env.execute("Kafka2RedisDemo");
    }
}

方式二

使用包:

<dependency>
    <groupId>redis.clients</groupId>
    <artifactId>jedis</artifactId>
    <version>${redis.version}</version>
</dependency>

代码如下:

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import redis.clients.jedis.Jedis;

import java.util.Properties;


public class Kafka2RedisDemo {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 消费kafka
        String topic = "test";
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "host1:9092,host2:9092,host3:9092");
        props.setProperty("group.id", "test");
        props.setProperty("auto.offset.reset", "latest");
        props.setProperty("enable.auto.commit", "true");
        props.setProperty("auto.commit.interval.ms", "3000");
        FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<String>(topic, new SimpleStringSchema(), props);
        DataStreamSource<String> stream = env.addSource(consumer);
        // 下沉到redis
        stream.addSink(new RichSinkFunction<String>() {
            private Jedis jedis = null;

            @Override
            public void open(Configuration parameters) throws Exception {
                jedis = new Jedis("localhost", 6379);
            }

            @Override
            public void invoke(String value, Context context) {
                jedis.set("testKey", value);
            }

            @Override
            public void close() throws Exception {
                if (jedis != null) jedis.close();
            }
        });
        env.execute("Kafka2RedisDemo");
    }
}

不难看出,方法二其实就是继承了Flink之下的抽象类:RichSinkFunction,然后实现了对应的各方法,个人更喜欢方法二的做法。

现在我们假设,redis是个集群,为了连接它,应该这样做:

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisCluster;
import redis.clients.jedis.JedisPoolConfig;

import java.util.HashSet;
import java.util.Properties;
import java.util.Set;

public class Kafka2RedisDemo {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 消费kafka
        String topic = "test";
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "host1:9092,host2:9092,host3:9092");
        props.setProperty("group.id", "test");
        props.setProperty("auto.offset.reset", "latest");
        props.setProperty("enable.auto.commit", "true");
        props.setProperty("auto.commit.interval.ms", "3000");
        FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<String>(topic, new SimpleStringSchema(), props);
        DataStreamSource<String> stream = env.addSource(consumer);
        // 下沉到redis
        stream.addSink(new RichSinkFunction<String>() {
            private JedisCluster jedisCluster = null;

            @Override
            public void open(Configuration parameters) throws Exception {
                Set<HostAndPort> nodes = new HashSet<>();
                nodes.add(new HostAndPort("host1", 6379));
                nodes.add(new HostAndPort("host2", 6379));
                nodes.add(new HostAndPort("host3", 6379));
                JedisPoolConfig conf = new JedisPoolConfig();
                // 如果不需要验证
//                jedisCluster = new JedisCluster(nodes);
                // 如果存在验证
                jedisCluster = new JedisCluster(nodes, 1000, 10000, 3, "auth", conf);
            }

            @Override
            public void invoke(String value, Context context) {
                jedisCluster.set("testKey", value);
            }

            @Override
            public void close() {
                if (jedisCluster != null) jedisCluster.close();
            }
        });
        env.execute("Kafka2RedisDemo");
    }
}

END.

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
flink-statebackend-redisFlink 提供的一个 StateBackend 插件,用于将 Flink 程序中的状态数据存储到 Redis 中。如果您想在 Flink 程序中使用 RedisStateBackend,需要在项目中引入 flink-statebackend-redis 依赖。 具体来说,在 Maven 项目中,您可以在 pom.xml 文件中添加以下依赖: ``` <dependency> <groupId>org.apache.flink</groupId> <artifactId>flink-statebackend-redis</artifactId> <version>${flink.version}</version> </dependency> ``` 在 Gradle 项目中,您可以在 build.gradle 文件中添加以下依赖: ``` dependencies { implementation "org.apache.flink:flink-statebackend-redis:${flinkVersion}" } ``` 这里的 ${flink.version} 或 ${flinkVersion} 是指您使用的 Flink 版本号。如果您使用的是 Flink 1.12 及以上版本,可以直接使用 flink-statebackend-redis 依赖。如果您使用的是 Flink 1.11 及以下版本,需要先引入 flink-statebackend-rocksdb 依赖: ``` <dependency> <groupId>org.apache.flink</groupId> <artifactId>flink-statebackend-rocksdb</artifactId> <version>${flink.version}</version> </dependency> ``` 或者 ``` dependencies { implementation "org.apache.flink:flink-statebackend-rocksdb:${flinkVersion}" } ``` 然后再引入 flink-statebackend-redis 依赖: ``` <dependency> <groupId>org.apache.flink</groupId> <artifactId>flink-statebackend-redis</artifactId> <version>${flink.version}</version> </dependency> ``` 或者 ``` dependencies { implementation "org.apache.flink:flink-statebackend-redis:${flinkVersion}" } ```

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

柏舟飞流

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值