package com.bairong.flink.java;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.operators.AggregateOperator;
import org.apache.flink.api.java.operators.DataSource;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.util.Collector;
/**
* author: YangYunhe
* date: 2019/7/23 19:41
* description:
*/
public class BatchWordCountJava {
public static void main(String[] args) throws Exception {
/*
* 输入路径可以是一个目录,代表读取该目录下的所有文件,也可以是一个具体的文件
* 输出路径:当最终的文件只有一个的时候,把输出路径看成一个文件
* 当最终的文件有多个的时候,把输出路径看成一个目录
*/
String inputPath = "D:\\space\\idea\\course\\learning-flink\\inputPath\\words.txt";
String outputPath = "D:\\space\\idea\\course\\learning-flink\\outputPath\\wordcount_batch_result.txt";
// 1. 获取运行环境
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
// 2. 获取文件内容
DataSource<String> source = env.readTextFile(inputPath);
// 3. 扁平化,每个单次计数为1,分组,累加次数
AggregateOperator<Tuple2<String, Long>> sum = source.flatMap(new FlatMapFunction<String, Tuple2<String, Long>>() {
@Override
public void flatMap(String line, Collector<Tuple2<String, Long>> collector) throws Exception {
String[] words = line.toLowerCase().split("\\W+");
for (String word : words) {
collector.collect(new Tuple2<String, Long>(word, 1L));
}
}
}).groupBy(0).sum(1);
// 4. 写到指定目录下,设置换行符和Tuple元素之间的分隔符
sum.writeAsCsv(outputPath, "\n", ", ").setParallelism(1);
env.execute("BatchWordCountJava");
}
}
package com.bairong.flink.scala
import org.apache.flink.api.scala._
/**
* author: YangYunhe
* date: 2019/7/23 20:13
* description:
*/
object BatchWordCountScala {
def main(args: Array[String]): Unit = {
val inputPath = "D:\\space\\idea\\course\\learning-flink\\inputPath\\words.txt"
val outputPath = "D:\\space\\idea\\course\\learning-flink\\outputPath\\wordcount_batch_result.txt"
val environment = ExecutionEnvironment.getExecutionEnvironment
val source: DataSet[String] = environment.readTextFile(inputPath)
source.flatMap(_.toLowerCase.split("\\W+"))
.map((_, 1))
.groupBy(0)
.sum(1)
.writeAsCsv(outputPath, "\n", ", ")
.setParallelism(1)
environment.execute("BatchWordCountScala")
}
}
# 流处理
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
# 批处理
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
# 流处理
DataStreamSource<String> source = env.socketTextStream(host, port);
# 批处理
DataSource<String> source = env.readTextFile(inputPath);
# 流处理
SingleOutputStreamOperator<WordWithCount> counts = source.flatMap(...)...
# 即处理数据后的类型为DataStream
SingleOutputStreamOperator<T> extends DataStream<T>
# 批处理
AggregateOperator<Tuple2<String, Long>> sum = source.flatMap(...)...
# 即处理数据后的类型为DataSet
AggregateOperator<IN> extends SingleInputOperator<IN, IN, AggregateOperator<IN>>
SingleInputOperator<IN, OUT, O extends SingleInputOperator<IN, OUT, O>> extends Operator<OUT, O>
Operator<OUT, O extends Operator<OUT, O>> extends DataSet<OUT>