Hello,大家好,本次为大家带来的是Hadoop的序列化操作。
序列化:把内存中的对象,转换成字节序列(或其他数据传输协议)以便于存储到磁盘(持久化)和网络传输。 反序列化:将收到字节序列(或其他数据传输协议)或者是磁盘的持久化数据,转换成内存中的对象。
一般来说,"活得"对象只生存在内存里,冠机断点就没有了。而且"活的"对象只能由本地的进程使用,不能被发送到网络上的另外一台计算机。 然而序列化可能存储"活的"对象,可以将"活的"对象发送到远程计算机。
Java的序列化是一个重量级序列化框架(Serializable),一个对象被序列化后,会附带很多额外的信息(各种校验信息,Header,继承体系等),不便于在网络中高效传输。所以,Hadoop自己开发了以套系列化机制(Writable)。
在企业开发中往往常用的基本序列化类型不能满足所有需求,比如在Hadoop框架内部传递一个bean对象,那么该对象就需要实现序列化接口。 具体实现bean对象序列化步骤如下7步:
public FlowBean() {
super();
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(upFlow);
out.writeLong(downFlow);
out.writeLong(sumFlow);
}
@Override
public void readFields(DataInput in) throws IOException {
upFlow = in.readLong();
downFlow = in.readLong();
sumFlow = in.readLong();
}
@Override
public int compareTo(FlowBean o) {
// 倒序排列,从大到小
return this.sumFlow > o.getSumFlow() ? -1 : 1;
}
统计每一个手机号耗费的总上行流量、下行流量、总流量
如果有需要的可以复制下列数据并保存文档,文档名为:phone_data
1 13736230513 192.196.2.1 www.shouhu.com 2481 24681 200
2 13846544121 192.196.2.2 264 0 200
3 13956435636 192.196.2.3 132 1512 200
4 13966251146 192.168.2.1 240 0 404
5 18271575951 192.168.2.2 www.shouhu.com 1527 2106 200
6 18240717138 192.168.2.3 www.hao123.com 4116 1432 200
7 13590439668 192.168.2.4 1116 954 200
8 15910133277 192.168.2.5 www.hao123.com 3156 2936 200
9 13729199489 192.168.2.6 240 0 200
10 13630577991 192.168.2.7 www.shouhu.com 6960 690 200
11 15043685818 192.168.2.8 www.baidu.com 3659 3538 200
12 15959002129 192.168.2.9 www.hao123.com 1938 180 500
13 13560439638 192.168.2.10 918 4938 200
14 13470253144 192.168.2.11 180 180 200
15 13682846555 192.168.2.12 www.qq.com 1938 2910 200
16 13992314666 192.168.2.13 www.gaga.com 3008 3720 200
17 13509468723 192.168.2.14 www.qinghua.com 7335 110349 404
18 18390173782 192.168.2.15 www.sogou.com 9531 2412 200
19 13975057813 192.168.2.16 www.baidu.com 11058 48243 200
20 13768778790 192.168.2.17 120 120 200
21 13568436656 192.168.2.18 www.alibaba.com 2481 24681 200
22 13568436656 192.168.2.19 1116 954 200
7 13590439668 192.168.2.4 1116 954 200
id 手机号码 网络ip 上行流量 下行流量 网络状态码
13560436666 1116 954 2070
手机号码 上行流量 下行流量 总流量
package com.buwenbuhuo.flowsun;
import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
* @author 卜温不火
* @create 2020-04-23 14:14
* com.buwenbuhuo.flowsun - the name of the target package where the new class or interface will be created.
* mapreduce0422 - the name of the current project.
*/
// 1 实现writable接口
public class FlowBean implements Writable {
private long upFlow;
private long downFlow;
private long sumFlow;
//2 反序列化时,需要反射调用空参构造函数,所以必须有
public FlowBean() {
}
@Override
public String toString() {
return upFlow + "\t" + downFlow + "\t" + sumFlow;
}
public void set(long upFlow, long downFlow) {
this.upFlow = upFlow;
this.downFlow = downFlow;
this.sumFlow = upFlow + downFlow;
}
public long getUpFlow() {
return upFlow;
}
public void setUpFlow(long upFlow) {
this.upFlow = upFlow;
}
public long getDownFlow() {
return downFlow;
}
public void setDownFlow(long downFlow) {
this.downFlow = downFlow;
}
public long getSumFlow() {
return sumFlow;
}
public void setSumFlow(long sumFlow) {
this.sumFlow = sumFlow;
}
//3 写序列化方法
public void write(DataOutput out) throws IOException {
out.writeLong(upFlow);
out.writeLong(downFlow);
out.writeLong(sumFlow);
}
//4 反序列化方法
//5 反序列化方法读顺序必须和写序列化方法的写顺序必须一致
public void readFields(DataInput in) throws IOException {
upFlow = in.readLong();
downFlow = in.readLong();
sumFlow = in.readLong();
}
}
package com.buwenbuhuo.flowsun;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
/**
* @author 卜温不火
* @create 2020-04-23 14:14
* com.buwenbuhuo.flowsun - the name of the target package where the new class or interface will be created.
* mapreduce0422 - the name of the current project.
*/
public class FlowMapper extends Mapper<LongWritable, Text, Text, FlowBean> {
private Text phone = new Text();
private FlowBean flow = new FlowBean();
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String[] fields = value.toString().split("\t");
phone.set(fields[1]);
long upFlow = Long.parseLong(fields[fields.length - 3]);
long downFlow = Long.parseLong(fields[fields.length - 2]);
flow.set(upFlow,downFlow);
context.write(phone, flow);
}
}
package com.buwenbuhuo.flowsun;
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
/**
* @author 卜温不火
* @create 2020-04-23 14:15
* com.buwenbuhuo.flowsun - the name of the target package where the new class or interface will be created.
* mapreduce0422 - the name of the current project.
*/
public class FlowReducer extends Reducer<Text, FlowBean, Text, FlowBean> {
private FlowBean sunFlow = new FlowBean();
@Override
protected void reduce(Text key, Iterable<FlowBean> values, Context context)throws IOException, InterruptedException {
long sum_upFlow = 0;
long sum_downFlow = 0;
// 1 遍历所用bean,将其中的上行流量,下行流量分别累加
for (FlowBean value : values) {
sum_upFlow += value.getUpFlow();
sum_downFlow += value.getDownFlow();
}
// 2 封装对象
sunFlow.set(sum_upFlow, sum_downFlow);
// 3 写出
context.write(key, sunFlow);
}
}
package com.buwenbuhuo.flowsun;
/**
* @author 卜温不火
* @create 2020-04-23 14:14
* com.buwenbuhuo.flowsun - the name of the target package where the new class or interface will be created.
* mapreduce0422 - the name of the current project.
*/
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class FlowDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
// 1 获取job实例
Job job = Job.getInstance(new Configuration());
// 2.设置类路径
job.setJarByClass(FlowDriver.class);
// 3 指定本业务job要使用的mapper/Reducer业务类
job.setMapperClass(FlowMapper.class);
job.setReducerClass(FlowReducer.class);
// 4 指定mapper输出数据的kv类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(FlowBean.class);
// 5 指定最终输出的数据的kv类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
// 6 指定job的输入原始文件所在目录
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
// 7 将job中配置的相关参数,以及job所用的java类所在的jar包, 提交给yarn去运行
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
}