Hadoop之MR简单例子(分组统计手机号通讯总数,降序排序,分组统计)

一、自定义分组规则

/**
 * @program: HadoopDemo
 * @description: 区域分块
 * @author: Mario
 * @create: 2019-04-18 22:35
 */
public class AreaPartitioner<KEY,VALUE> extends Partitioner<KEY,VALUE> {
    // 预设从数据库中加载至缓存
    private static Map<String, Integer> cacheValues = new HashMap<>();

    static {
        // 加载数据至缓存中
        // loadDataFromDBToCache();
        cacheValues.put("153",0);
        cacheValues.put("182",1);
        cacheValues.put("134",2);
    }

    @Override
    public int getPartition(KEY key, VALUE value, int numPartitions) {
        if (key instanceof CallBean) {
            CallBean callBean = (CallBean) key;
            // 从key中拿出不同的手机号,查询归属地字典,不同省份返回不同组号
            Integer no = cacheValues.get(callBean.getPhoneNo().substring(0, 3));
            int areaCode = no == null ? 3 : no;
            return areaCode;
        } else {
            return 0;
        }
    }
}

二、Map/Reduce/Main

/**
 * @program: HadoopDemo
 * @description: 排序后的通讯情况
 * @author: Mario
 * @create: 2019-03-17 21:36
 **/
public class SortMR {

    //使排序实体作为输出的key
    public static class SortMapper extends Mapper<LongWritable, Text, CallBean, NullWritable>{
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            String[] resDatas = value.toString().split("\\s+");
            String phone = resDatas[0];
            Long callOut = Long.parseLong(resDatas[2]);
            Long callIn = Long.parseLong(resDatas[3]);
            context.write(new CallBean(phone, callOut, callIn),NullWritable.get());
        }
    }

    public static class SortReducer extends Reducer<CallBean, NullWritable,Text, CallBean>{
        @Override
        protected void reduce(CallBean key, Iterable<NullWritable> values, Context context) throws IOException,
                InterruptedException {
            String phone = key.getPhoneNo();
            context.write(new Text(phone),key);
        }
    }

    public static void main(String[] args) throws Exception {
        //设置需要远程服务器登录名称(防止没有权限操作)
        System.setProperty("HADOOP_USER_NAME", "hadoop");

        //读取src路径下配置文件
        Configuration conf = new Configuration();

        Job job = Job.getInstance(conf);
        //设置主运行jar包
        job.setJarByClass(SortMR.class);

        //设置Map、Partitioner和Reduce类
        job.setMapperClass(SortMapper.class);
        job.setPartitionerClass(AreaPartitioner.class);
        job.setReducerClass(SortReducer.class);

        // 设置reduce映射数量,应该和分组数量一致
        job.setNumReduceTasks(4);

        //设置map输出类型
        job.setMapOutputKeyClass(CallBean.class);
        job.setMapOutputValueClass(NullWritable.class);
        //设置reduce输出类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(CallBean.class);



        String [] arg = {"hdfs://192.168.2.100:9000/user/phonecount/data","hdfs://192.168.2.100:9000/user/phonecount/out"};
        // 判断output文件夹是否存在,如果存在则删除
        Path path = new Path(arg[1]);// 取第1个表示输出目录参数(第0个参数是输入目录)
        FileSystem fileSystem = path.getFileSystem(conf);// 根据path找到这个文件
        if (fileSystem.exists(path)) {
            fileSystem.delete(path, true);// true的意思是,就算output有东西,也一带删除
        }
        // 设置要处理数据存放位置
        FileInputFormat.setInputPaths(job,new Path(arg[0]));
        // 设置处理结果的输出数据存放路径
        FileOutputFormat.setOutputPath(job,new Path(arg[1]));


        //打印执行过程,正常返回0否则返回1
        //退出
        System.exit(job.waitForCompletion(true)?0:1);
    }
}

三、分组排序后结

Hadoop之MR简单例子(分组统计手机号通讯总数,降序排序,分组统计)