欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

MapReduce(一)

程序员文章站 2022-07-14 20:31:01
...

wordcount

MapReduce(一)

mapreduce的阶段
MAP 分区 排序 规约 分组 Reduce

map

package com.example;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.util.StringUtils;

import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.net.URI;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Map;

/* k1     v1
 * 0      hello,world
 * 11     hello,HDFS
 * -----------------------
 * k2     v2
 * hello   1
 * world   1
 * hello   1
 */

public class jobMapper extends Mapper<LongWritable, Text,Text,LongWritable> {
    Text k=new Text();
    LongWritable v=new LongWritable(1);

    public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        String[] line = value.toString().split(" ");

        for (String s : line) {
            k.set(s);
            context.write(k,v);
        }

    }
}

reduce

package com.example;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.awt.*;
import java.io.IOException;
import java.util.Iterator;

/*
 * k2     v2
 * hello   1
 * world   1
 * hello   1
 * -------------------
 */
public class jobReduce extends Reducer<Text, LongWritable,Text,LongWritable> {
    protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
        //Long sum=0L;
        //for (LongWritable value : values) {
        //    sum+=value.get();
        //}
        //context.write(key,new LongWritable(sum));
        System.out.println("========================================");
        System.out.println("KEY: "+key);
        while (values.iterator().hasNext()) {
            System.out.println("value: "+values.iterator().next());
        }
        System.out.println("========================================");

    }
}

jobmain

package com.example;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.IOException;

public class jobMain extends Configured implements Tool {
    public int run(String[] strings) throws Exception {
        Job job = Job.getInstance(super.getConf(),"xxxxxx");
        job.setJarByClass(jobMain.class);

        job.setInputFormatClass(TextInputFormat.class);
        TextInputFormat.addInputPath(job,new Path("/tmp/README.txt"));
        job.setMapperClass(jobMapper.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(LongWritable.class);
        //分区 排序 规约 分组

        // job.addCacheFile(new Path("/tmp/xxx").toUri());  //将hdfs文件(精确到文件) 缓存到NM 本地

        //job.setNumReduceTasks(0);
        job.setReducerClass(jobReduce.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(LongWritable.class);
        //设置输出类
        job.setOutputFormatClass(TextOutputFormat.class);
        TextOutputFormat.setOutputPath(job,new Path("/tmp/out1"));
        boolean b = job.waitForCompletion(true);
        return b?0:1;
    }

    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        int run = ToolRunner.run(conf,new jobMain(), args);
        System.out.println(run);
    }


}