目录
方式一
方式二???????
这里使用Flink的DataStream API,数据源则是通过消费Kafka的主题。因此,maven工程中的依赖必须包括:
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-streaming-java_2.11</artifactId>
<version>${flink.version}</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-connector-kafka_2.11</artifactId>
<version>${flink.version}</version>
</dependency>
方式一
使用包:
<dependency>
<groupId>org.apache.bahir</groupId>
<artifactId>flink-connector-redis_2.11</artifactId>
<version>${flink-redis.version}</version>
</dependency>
代码如下:
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.redis.RedisSink;
import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisPoolConfig;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisCommand;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisCommandDescription;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisMapper;
import java.util.Properties;
public class Kafka2RedisDemo {
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// 消费kafka
String topic = "test";
Properties props = new Properties();
props.setProperty("bootstrap.servers", "host1:9092,host2:9092,host3:9092");
props.setProperty("group.id", "test");
props.setProperty("auto.offset.reset", "latest");
props.setProperty("enable.auto.commit", "true");
props.setProperty("auto.commit.interval.ms", "3000");
FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<String>(topic, new SimpleStringSchema(), props);
DataStreamSource<String> stream = env.addSource(consumer);
// 下沉到redis
FlinkJedisPoolConfig config = new FlinkJedisPoolConfig.Builder()
.setHost("localhost")
.setPort(6379)
.build();
stream.addSink(new RedisSink<>(config, new RedisMapper<String>() {
@Override
public RedisCommandDescription getCommandDescription() {
return new RedisCommandDescription(RedisCommand.SET, "testKey");
}
private String getMsg(String s, int index) {
String[] msgArr = s.split(",");
if (msgArr.length < 2) return "";
return msgArr[index];
}
@Override
public String getKeyFromData(String s) {
return getMsg(s, 0);
}
@Override
public String getValueFromData(String s) {
return getMsg(s, 1);
}
}));
env.execute("Kafka2RedisDemo");
}
}
方式二
使用包:
<dependency>
<groupId>redis.clients</groupId>
<artifactId>jedis</artifactId>
<version>${redis.version}</version>
</dependency>
代码如下:
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import redis.clients.jedis.Jedis;
import java.util.Properties;
public class Kafka2RedisDemo {
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// 消费kafka
String topic = "test";
Properties props = new Properties();
props.setProperty("bootstrap.servers", "host1:9092,host2:9092,host3:9092");
props.setProperty("group.id", "test");
props.setProperty("auto.offset.reset", "latest");
props.setProperty("enable.auto.commit", "true");
props.setProperty("auto.commit.interval.ms", "3000");
FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<String>(topic, new SimpleStringSchema(), props);
DataStreamSource<String> stream = env.addSource(consumer);
// 下沉到redis
stream.addSink(new RichSinkFunction<String>() {
private Jedis jedis = null;
@Override
public void open(Configuration parameters) throws Exception {
jedis = new Jedis("localhost", 6379);
}
@Override
public void invoke(String value, Context context) {
jedis.set("testKey", value);
}
@Override
public void close() throws Exception {
if (jedis != null) jedis.close();
}
});
env.execute("Kafka2RedisDemo");
}
}
不难看出,方法二其实就是继承了Flink之下的抽象类:RichSinkFunction,然后实现了对应的各方法,个人更喜欢方法二的做法。
现在我们假设,redis是个集群,为了连接它,应该这样做:
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisCluster;
import redis.clients.jedis.JedisPoolConfig;
import java.util.HashSet;
import java.util.Properties;
import java.util.Set;
public class Kafka2RedisDemo {
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// 消费kafka
String topic = "test";
Properties props = new Properties();
props.setProperty("bootstrap.servers", "host1:9092,host2:9092,host3:9092");
props.setProperty("group.id", "test");
props.setProperty("auto.offset.reset", "latest");
props.setProperty("enable.auto.commit", "true");
props.setProperty("auto.commit.interval.ms", "3000");
FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<String>(topic, new SimpleStringSchema(), props);
DataStreamSource<String> stream = env.addSource(consumer);
// 下沉到redis
stream.addSink(new RichSinkFunction<String>() {
private JedisCluster jedisCluster = null;
@Override
public void open(Configuration parameters) throws Exception {
Set<HostAndPort> nodes = new HashSet<>();
nodes.add(new HostAndPort("host1", 6379));
nodes.add(new HostAndPort("host2", 6379));
nodes.add(new HostAndPort("host3", 6379));
JedisPoolConfig conf = new JedisPoolConfig();
// 如果不需要验证
// jedisCluster = new JedisCluster(nodes);
// 如果存在验证
jedisCluster = new JedisCluster(nodes, 1000, 10000, 3, "auth", conf);
}
@Override
public void invoke(String value, Context context) {
jedisCluster.set("testKey", value);
}
@Override
public void close() {
if (jedisCluster != null) jedisCluster.close();
}
});
env.execute("Kafka2RedisDemo");
}
}
END.
|