一、背景說明
本文基於IK分詞器,自定義一個UDTF(Table Functions),實現類似Hive的explode行轉列的效果,以此來簡明開發過程。
如下圖Flink三層API介面中,Table API的介面位於最頂層也是最易用的一層,可以套用SQL語法進行程式碼編寫,對於有SQL基礎的能很快上手,但是不足之處在於靈活度有限,自有函式不能滿足使用的時候,需要通過自定義函式實現,類似Hive的UDF/UDTF/UDAF自定義函式,在Flink也可以稱之為Scalar Functions/Table Functions/Aggregate Functions。
二、效果預覽
Kafka端建立生產者傳送json片段:
IDEA側消費資料處理後效果:
如上所示,形成類似Hive的exploed炸裂函式實現行轉列的效果,當然也可以不用IK分詞器,直接按空格進行split實現邏輯是一樣的。
三、程式碼過程
由於Flink一般在流式環境使用,故這裡資料來源使用Kafka,並建立動態表的形式實現,以更好的貼近實際的業務環境。
- 工具類:
package com.test.UDTF;
import org.apache.flink.table.annotation.DataTypeHint;
import org.apache.flink.table.annotation.FunctionHint;
import org.apache.flink.table.functions.TableFunction;
import org.apache.flink.types.Row;
import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;
import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;
/**
* @author: Rango
* @create: 2021-05-04 16:50
* @description: 建立函式,繼承TableFunction並建立eval方法
**/
@FunctionHint(output = @DataTypeHint("ROW<word STRING>"))
public class KeywordUDTF extends TableFunction<Row> {
//按官方文件說明,須按eval命名
public void eval(String value){
List<String> stringList = analyze(value);
for (String s : stringList) {
Row row = new Row(1);
row.setField(0,s);
collect(row);
}
}
//自定義分詞方式
public List<String> analyze(String text){
//字串轉檔案流
StringReader sr = new StringReader(text);
//建立分詞器物件
IKSegmenter ik = new IKSegmenter(sr,true);
//ik分詞後物件為Lexeme
Lexeme lex = null;
//分詞後轉入列表
List<String> keywordList = new ArrayList<>();
while(true){
try {
if ((lex = ik.next())!=null){
keywordList.add(lex.getLexemeText());
}else{
break;
}
} catch(IOException e) {
e.printStackTrace();
}
}return keywordList;
}
}
- 實現類
package com.test.UDTF;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.types.Row;
/**
* @author: Rango
* @create: 2021-05-04 17:11
* @description:
**/
public class KeywordStatsApp {
public static void main(String[] args) throws Exception {
//建立環境
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
EnvironmentSettings settings = EnvironmentSettings.newInstance().inStreamingMode().build();
StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, settings);
//註冊函式
tableEnv.createTemporaryFunction("ik_analyze", KeywordUDTF.class);
//建立動態表
tableEnv.executeSql("CREATE TABLE wordtable (" +
"word STRING" +
") WITH ('connector' = 'kafka'," +
"'topic' = 'keywordtest'," +
"'properties.bootstrap.servers' = 'hadoop102:9092'," +
"'properties.group.id' = 'keyword_stats_app'," +
"'format' = 'json')");
//未切分效果
Table wordTable = tableEnv.sqlQuery("select word from wordtable");
//利用自定義函式對文字進行分切,切分後計為1,方便後續統計使用
Table wordTable1 = tableEnv.sqlQuery("select splitword,1 ct from wordtable," +
"LATERAL TABLE(ik_analyze(word)) as T(splitword)");
tableEnv.toAppendStream(wordTable, Row.class).print("原格式>>>");
tableEnv.toAppendStream(wordTable1, Row.class).print("使用UDTF函式效果>>>");
env.execute();
}
}
- 補充下依賴
<properties>
<java.version>1.8</java.version>
<flink.version>1.12.0</flink.version>
<scala.version>2.12</scala.version>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-java</artifactId>
<version>${flink.version}</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-table-api-java-bridge_${scala.version}</artifactId>
<version>${flink.version}</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-table-planner-blink_${scala.version}</artifactId>
<version>${flink.version}</version>
</dependency>
<dependency>
<groupId>com.janeluo</groupId>
<artifactId>ikanalyzer</artifactId>
<version>2012_u6</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-connector-kafka_${scala.version}</artifactId>
<version>${flink.version}</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-json</artifactId>
<version>${flink.version}</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-clients_${scala.version}</artifactId>
<version>${flink.version}</version>
</dependency>
學習交流,有任何問題還請隨時評論指出交流。