MapReduce编程实例:二次排序
程序员文章站
2022-07-14 13:30:02
...
设计思路:
二次排序的含义为先按某列对数据进行排序,在该次排序的基础上再按照另一列的值进行排序:
4 3
4 2
4 1
3 4
2 7
2 3
3 1
3 2
3 3
SecondaryMapper:
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
//在map中,将输入的value作为输出的key输出,value代表一整行数据
public class SecondaryMapper extends Mapper<LongWritable, Text, Text, NullWritable> {
protected void map(LongWritable key, Text value, Context context) throws java.io.IOException ,InterruptedException {
//仅仅将value作为key输出,不能使用new NullWritable()来定义,获取空值只能NullWritable.get()来获取
context.write(value, NullWritable.get());
};
}
SecondaryReducer:
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class SecondaryReducer extends Reducer<Text, IntWritable, NullWritable, Text>{
protected void reduce(Text key, Iterable<Text> values, Context context) throws java.io.IOException ,InterruptedException {
for(Text value:values){
context.write(NullWritable.get(), value);
}
};
}
KeyPartitioner:
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner;
public class KeyPartitioner extends HashPartitioner<Text, NullWritable>{
@Override
public int getPartition(Text key, NullWritable value, int numReduceTasks) {
return (key.toString().split(" ")[0].hashCode()&Integer.MAX_VALUE)%numReduceTasks;
}
}
SortComparator :
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
public class SortComparator extends WritableComparator{
public SortComparator() {
super(Text.class,true);
}
@Override
public int compare(WritableComparable key1, WritableComparable key2) {
//如果第一个字段相同,则需要比较第二个字段
if(Integer.parseInt(key1.toString().split(" ")[0])==Integer.parseInt(key2.toString().split(" ")[0])){
if(Integer.parseInt(key1.toString().split(" ")[1])>Integer.parseInt(key2.toString().split(" ")[1])){
return 1;
}else if(Integer.parseInt(key1.toString().split(" ")[1])<Integer.parseInt(key2.toString().split(" ")[1])){
return -1;
}else if(Integer.parseInt(key1.toString().split(" ")[1])==Integer.parseInt(key2.toString().split(" ")[1])){
return 0;
}
}
//如果第一个排序字段不同,则比较第一个排序字段
else{
if(Integer.parseInt(key1.toString().split(" ")[0])>Integer.parseInt(key2.toString().split(" ")[0])){
return 1;
}else if(Integer.parseInt(key1.toString().split(" ")[0])<Integer.parseInt(key2.toString().split(" ")[0])){
return -1;
}
}
return 0;
}
}
GroupingComparator:
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
//将按第一个字段进行分组,该字段相同的所有记录将会进入一个集合参与迭代
public class GroupingComparator extends WritableComparator{
protected GroupingComparator(){
super(Text.class,true);
}
@Override
public int compare(WritableComparable a, WritableComparable b) {
if(Integer.parseInt(a.toString().split(" ")[0])==Integer.parseInt(b.toString().split(" ")[0])){
return 0;
}else if(Integer.parseInt(a.toString().split(" ")[0])>Integer.parseInt(b.toString().split(" ")[0])){
return 1;
}else if(Integer.parseInt(a.toString().split(" ")[0])<Integer.parseInt(b.toString().split(" ")[0])){
return -1;
}
return 0;
}
}
JobRun:
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
public class JobRun {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf=new Configuration();
Job job=new Job(conf,"Sort");
job.setJarByClass(JobRun.class);
job.setMapperClass(SecondaryMapper.class);
job.setReducerClass(SecondaryReducer.class);
job.setPartitionerClass(KeyPartitioner.class);
job.setSortComparatorClass(SortComparator.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(NullWritable.class);
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(Text.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setGroupingComparatorClass(GroupingComparator.class);
FileInputFormat.setInputPaths(job, new Path("/input/sort"));
FileOutputFormat.setOutputPath(job, new Path("/output/sort"));
//将Reducer的个数强制设定为1,这样出来的结果才是全局有序,否则只是在某个Reducer中有序
job.setNumReduceTasks(1);
System.out.println(job.waitForCompletion(true)?0:1);
}
}
二次排序后的结果:
数据过程(按自己理解画了个图,也不知道对不对):
上一篇: 地震仪器专题-地震前兆观测