批处理算子
import org.apache.flink.api.common.functions.RichMapFunction
import org.apache.flink.api.common.operators.Order
import org.apache.flink.api.java.aggregation.Aggregations
import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.scala.DataStream
object Transform_Batch_Demo {
def main(args: Array[String]): Unit = {
//创建批处理程序入口
val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
//map算子
val data: DataSet[Int] = env.fromCollection(List(1,2,3,4,5))
//将每个元素扩大10倍
val result: DataSet[Int] = data.map(_*10)
//打印输出
result.print()
//flatMap算子
val listDataSet: DataSet[String] = env.fromCollection(List("hadoop spark","hive mysql","hbase kafka"))
//按照分隔符进行分割
val result: DataSet[String] = listDataSet.flatMap(_.split(" "))
//打印输出
result.print()
/
|