Mapreduce编程案例3——自定义类型
FlowBean.java
package Hadoop.mapreduce.flowerLog;
import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class FlowBean implements Writable {
private int upFlow;
private int downFlow;
private int amountFlow;
private String phone;
public FlowBean(){}
public FlowBean(String phone,int upFlow, int downFlow) {
this.phone = phone;
this.upFlow = upFlow;
this.downFlow = downFlow;
this.amountFlow = upFlow + downFlow;
}
public int getUpFlow() {
return upFlow;
}
public void setUpFlow(int upFlow) {
this.upFlow = upFlow;
}
public int getDownFlow() {
return downFlow;
}
public void setDownFlow(int downFlow) {
this.downFlow = downFlow;
}
public int getAmountFlow() {
return amountFlow;
}
public void setAmountFlow(int amountFlow) {
this.amountFlow = amountFlow;
}
public String getPhone() {
return phone;
}
public void setPhone(String phone) {
this.phone = phone;
}
/**
* hadoop在序列化该类的对象所调用的方法
* @param out
* @throws IOException
*/
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(upFlow);
out.writeUTF(phone); //字符串的写入
out.writeInt(downFlow);
out.writeInt(amountFlow);
; //
}
/**
* hadoop在反序列化该类的对象所调用的方法
* @param in
* @throws IOException
*/
@Override
public void readFields(DataInput in) throws IOException {
this.upFlow = in.readInt();
this.phone = in.readUTF();
this.downFlow = in.readInt();
this.amountFlow = in.readInt();
}
@Override
public String toString() {
return this.phone + "," + this.upFlow + "," + this.downFlow + "," + this.amountFlow;
}
}
FlowCountMapper.java
package Hadoop.mapreduce.flowerLog;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import scala.Int;
import java.io.IOException;
public class FlowCountMapper extends Mapper<LongWritable,Text, Text, FlowBean> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
String[] fields = line.split("\t");
String phone = fields[1];
int upFlow = Integer.parseInt(fields[fields.length - 3]);
int downFlow = Integer.parseInt(fields[fields.length - 2]);
context.write(new Text(phone),new FlowBean(phone,upFlow,downFlow));
}
}
FlowCountReduce.java
package Hadoop.mapreduce.flowerLog;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.io.Text;
import java.io.IOException;
public class FlowCountReduce extends Reducer<Text, FlowBean, Text, FlowBean> {
/**
* key:是某个手机号
* values:是这个手机号所产生的所有访问记录中的流量数据
*
* <135,flowBean1><135,flowBean2><135,flowBean3><135,flowBean4>
*/
@Override
protected void reduce (Text key, Iterable < FlowBean > values, Reducer < Text, FlowBean, Text, FlowBean >.
Context context)throws IOException, InterruptedException {
int upSum = 0;
int dSum = 0;
for (FlowBean value : values) {
upSum += value.getUpFlow();
dSum += value.getDownFlow();
}
context.write(key, new FlowBean(key.toString(), upSum, dSum));
}
}
JobFlowSubmitter.java
package Hadoop.mapreduce.flowerLog;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
/**
*本案例的功能:演示自定义数据类型如何实现hadoop的序列化接口
*1,该类一定要保留空参构造函数
*2,write方法中输出字段二进制数据的顺序要与readFields方法读取数据的顺序一致
*
*/
public class JobFlowSubmitter {
public static void main(String[] args) throws Exception{
// 在代码中设置JVM系统参数,用于给job对象来获取访问HDFS的用户身份
System.setProperty("HADOOP_USER_NAME", "root");
Configuration conf = new Configuration();
// 1、设置job运行时要访问的默认文件系统
conf.set("fs.defaultFS", "file:///");
// 2、设置job提交到哪去运行
conf.set("mapreduce.framework.name", "local");
Job job = Job.getInstance(conf);
job.setJarByClass(JobFlowSubmitter.class);
job.setMapperClass(FlowCountMapper.class);
job.setReducerClass(FlowCountReduce.class);
// 3、封装参数:本次job的Mapper实现类、Reducer实现类产生的结果数据的key、value类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(FlowBean.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
// 4、封装参数:本次job要处理的输入数据集所在路径、最终结果的输出路径
FileInputFormat.setInputPaths(job, new Path("C://Users//shujuelin//Desktop//spark//hadoop//input"));
FileOutputFormat.setOutputPath(job,new Path( "C://Users//shujuelin//Desktop//spark//hadoop//output")); // 注意:输出路径必须不存在
// 5、封装参数:想要启动的reduce task的数量
job.setNumReduceTasks(3);
// 6、提交job给yarn
boolean res = job.waitForCompletion(true); //true表示在控制台打印出进度信息
System.exit(res?0:-1); //如果成功则打印0,不成功则打印-1
}
}