运用MapReduce实现wordCount
程序员文章站
2022-07-14 13:54:51
...
一、idea环境配置
要使用MapReduce来进行单词计数,需要导入需要的jar包,所以我们这里使用Maven来建工程,可以通过配置文件参数来自动引入所需要的jar包,下面是配置文件poem.xml的参数:
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
//创建Maven工程时建的组名和工程名
<groupId>cn.kgc</groupId>
<artifactId>hdfsTest2</artifactId>
<version>1.0-SNAPSHOT</version>
<name>hdfsTest2</name>
<!-- FIXME change it to the project's website -->
<url>http://www.example.com</url>
<properties>
//这里使用Maven1.8版本
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
</properties>
<dependencies>
<dependency>
//junit使用4.12版本
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
<scope>test</scope>
</dependency>
<dependency>
//hadoop2.6.0版本的jar包
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.6.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-client -->
<dependency>
//client的jar包
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.6.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-hdfs -->
<dependency>
//hdfs的jar包
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.6.0</version>
</dependency>
<dependency>
//log日志的jar包
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>1.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth</artifactId>
<version>2.6.0</version>
</dependency>
<dependency>
//MapReduce的jar包
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>2.6.0</version>
</dependency>
<dependency>
//hadoop的测试工具包
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-jobclient</artifactId>
<version>2.6.0</version>
</dependency>
</dependencies>
</project>
配置成功后,等待下载jar包。
二、MapReduce的简单工作流程
1.将文本信息转换成字符串类型
2.用split方法以空格将字符串分割存到数组里
3.以每一个单词作为map的key,单词出现的次数作为map的value
4.Combine是优化的步骤,在mapper阶段就对value的值进行计算,每一个分片只返回单个大的value,而不再是都是1的value
5.Suffle/sort阶段是对单词进行排序并且对所有分片的value值进行再次统计
6.reduce阶段对上一阶段的value值做最后的计算,输出单词对应出现的次数。
三、代码编写
1.Mapper类
package kb07.wordcount;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class WCMapper extends Mapper<LongWritable, Text,Text, IntWritable> {
Text k = new Text();
IntWritable v = new IntWritable(1); //value的值出现一个赋一个1
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString(); //1.将文本转化成字符串
String[]words = line.split("\\s+"); //2.将字符串切割
for (String word : words) { //3.将每一个单词写出
k.set(word);
context.write(k,v);
}
}
}
2.Reducer类
package cn.kgc.mr.wc;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
/**
* KEYIN:reduce端输入的Key类型,即map端输出的key类型
* VALEINP:reduce端输入的value类型,即map端输出的value类型
* KEYOUT:reduce端输出的key类型
* VALUEOUT:reduce端输出的value类型
*/
public class WCReducer extends Reducer<Text, IntWritable,Text,IntWritable> {
int sum ;
IntWritable v = new IntWritable();
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
//reduce端接收到的类型大概是这样的 (wish,(1,1,1,1,1,1,1,1))
sum = 0;
//对迭代器进行累加求和
for (IntWritable count : values) {
sum += count.get();
}
//将key和value进行写出
v.set(sum);
context.write(key,v);
}
}
3.Driver类
package kb07.wordcount;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class WCDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
//1.创建配置文件
Configuration configuration=new Configuration();
Job job =Job.getInstance(configuration,"wordcount");
//2.设置jar的位置
job.setJarByClass(WCDriver.class);
//3.设置Map和Reduce的位置
job.setMapperClass(WCMapper.class);
job.setReducerClass(WCReducer.class);
//4.设置map输出的key,value的类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
//5.设置reduce的输出key,value类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//6.设置输入输出路径
FileInputFormat.setInputPaths(job,new Path("file:///D:\\test\\idea datas\\hdfsTest2\\data\\wcinput\\wc.txt"));
FileOutputFormat.setOutputPath(job,new Path("file:///D:\\test\\idea datas\\hdfsTest2\\data\\wcoutput"));
//7.提交程序运行
boolean result = job.waitForCompletion(true);
System.exit(result?0:1);
}
}
四、idea运行结果
输入文件:
输出结果: