前言
flink 计算数据1秒有几十条,数据去重,1秒取一条。
1. 去重方式一 流转表
核心代码逻辑
SingleOutputStreamOperator<FundIopvIndicators> streamOperator = EtfIopvFunction.calculateRealTimeIopv(stringKeyedStream);
Table table = tableEnv.fromDataStream(streamOperator, "fundCode,realTimeIopv,computationTime,strTime");
Table duplicateRemoval = tableEnv.sqlQuery(" select strTime,fundCode,realTimeIopv,computationTime from ( "
+ " select strTime,fundCode,realTimeIopv,computationTime, ROW_NUMBER() OVER "
+ " (PARTITION BY strTime ORDER BY strTime desc) AS rownum from "
+ table
+ ") where rownum=1"
);
2 去重方式二 flink sql
CREATE TABLE user_log (
user_id VARCHAR
,item_id VARCHAR
,category_id VARCHAR
,behavior INT
,ts TIMESTAMP(3)
,process_time as proctime()
, WATERMARK FOR ts AS ts
) WITH (
'connector' = 'kafka'
,'topic' = 'user_behavior'
,'properties.bootstrap.servers' = 'localhost:9092'
,'properties.group.id' = 'user_log'
,'scan.startup.mode' = 'group-offsets'
,'format' = 'json'
);
CREATE TABLE user_log_sink (
user_id VARCHAR
,item_id VARCHAR
,category_id VARCHAR
,behavior INT
,ts TIMESTAMP(3)
,num BIGINT
,primary key (user_id) not enforced
) WITH (
'connector' = 'upsert-kafka'
,'topic' = 'user_behavior_sink'
,'properties.bootstrap.servers' = 'localhost:9092'
,'properties.group.id' = 'user_log'
,'key.format' = 'json'
,'key.json.ignore-parse-errors' = 'true'
,'value.format' = 'json'
,'value.json.fail-on-missing-field' = 'false'
,'value.fields-include' = 'ALL'
);
insert into user_log_sink(user_id, item_id, category_id,behavior,ts,num)
SELECT user_id, item_id, category_id,behavior,ts,rownum
FROM (
SELECT user_id, item_id, category_id,behavior,ts,
ROW_NUMBER() OVER (PARTITION BY category_id ORDER BY process_time desc) AS rownum
FROM user_log)
WHERE rownum=1
|