一、Map/Reduce
/**
* @program: HadoopDemo
* @description: 排序后的通讯情况
* @author: Mario
* @create: 2019-03-17 21:36
**/
public class SortMR {
//使排序实体作为输出的key
public static class SortMapper extends Mapper<LongWritable, Text, CallBean, NullWritable>{
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String[] resDatas = value.toString().split("\\s+");
String phone = resDatas[0];
Long callOut = Long.parseLong(resDatas[2]);
Long callIn = Long.parseLong(resDatas[3]);
context.write(new CallBean(phone, callOut, callIn),NullWritable.get());
}
}
public static class SortReducer extends Reducer<CallBean, NullWritable,Text, CallBean>{
@Override
protected void reduce(CallBean key, Iterable<NullWritable> values, Context context) throws IOException,
InterruptedException {
String phone = key.getPhoneNo();
context.write(new Text(phone),key);
}
}
public static void main(String[] args) throws Exception {
//设置需要远程服务器登录名称(防止没有权限操作)
System.setProperty("HADOOP_USER_NAME", "hadoop");
//读取src路径下配置文件
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
//设置主运行jar包
job.setJarByClass(SortMR.class);
//设置Map和Reduce类
job.setMapperClass(SortMapper.class);
job.setReducerClass(SortReducer.class);
//设置map输出类型
job.setMapOutputKeyClass(CallBean.class);
job.setMapOutputValueClass(NullWritable.class);
//设置reduce输出类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(CallBean.class);
String [] arg = {"hdfs://192.168.2.100:9000/user/phonecount/data","hdfs://192.168.2.100:9000/user/phonecount/out"};
// 判断output文件夹是否存在,如果存在则删除
Path path = new Path(arg[1]);// 取第1个表示输出目录参数(第0个参数是输入目录)
FileSystem fileSystem = path.getFileSystem(conf);// 根据path找到这个文件
if (fileSystem.exists(path)) {
fileSystem.delete(path, true);// true的意思是,就算output有东西,也一带删除
}
// 设置要处理数据存放位置
FileInputFormat.setInputPaths(job,new Path(arg[0]));
// 设置处理结果的输出数据存放路径
FileOutputFormat.setOutputPath(job,new Path(arg[1]));
//打印执行过程,正常返回0否则返回1
//退出
System.exit(job.waitForCompletion(true)?0:1);
}
}
二、结果
