hadoop平台运行WordCount程序
1. 经典的WordCound程序(WordCount.java)
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class WordCount extends Configured implements Tool {
public static class MapClass extends MapReduceBase implements
Mapper<LongWritable, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
String line = value.toString();
StringTokenizer itr = new StringTokenizer(line);
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
output.collect(word, one);
}
}
}
/**
* A reducer class that just emits the sum of the input values.
*/
public static class Reduce extends MapReduceBase implements
Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterator<IntWritable> values,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
int sum = 0;
while (values.hasNext()) {
sum += values.next().get();
}
output.collect(key, new IntWritable(sum));
}
}
static int printUsage() {
System.out.println("wordcount [-m <maps>] [-r <reduces>] <input> <output>");
ToolRunner.printGenericCommandUsage(System.out);
return -1;
}
/**
* The main driver for word count map/reduce program. Invoke this method to
* submit the map/reduce job.
*
* @throws IOException
* When there is communication problems with the job tracker.
*/
public int run(String[] args) throws Exception {
JobConf conf = new JobConf(getConf(), WordCount.class);
conf.setJobName("wordcount");
// the keys are words (strings)
conf.setOutputKeyClass(Text.class);
// the values are counts (ints)
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(MapClass.class);
conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
List<String> other_args = new ArrayList<String>();
for (int i = 0; i < args.length; ++i) {
try {
if ("-m".equals(args[i])) {
conf.setNumMapTasks(Integer.parseInt(args[++i]));
} else if ("-r".equals(args[i])) {
conf.setNumReduceTasks(Integer.parseInt(args[++i]));
} else {
other_args.add(args[i]);
}
} catch (NumberFormatException except) {
System.out.println("ERROR: Integer expected instead of "
+ args[i]);
return printUsage();
} catch (ArrayIndexOutOfBoundsException except) {
System.out.println("ERROR: Required parameter missing from "
+ args[i - 1]);
return printUsage();
}
}
// Make sure there are exactly 2 parameters left.
if (other_args.size() != 2) {
System.out.println("ERROR: Wrong number of parameters: "
+ other_args.size() + " instead of 2.");
return printUsage();
}
FileInputFormat.setInputPaths(conf, other_args.get(0));
FileOutputFormat.setOutputPath(conf, new Path(other_args.get(1)));
JobClient.runJob(conf);
return 0;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new WordCount(), args);
System.exit(res);
}
}
2. 保证hadoop集群是配置好了的,单机的也好。新建一个目录,比如 /home/admin/WordCount编译WordCount.java程序。
javac -classpath /home/admin/hadoop/hadoop-0.19.1-core.jar WordCount.java -d /home/admin/WordCount
3. 编译完后在/home/admin/WordCount目录会发现三个class文件 WordCount.class,WordCount$Map.class,WordCount$Reduce.class。
cd 进入 /home/admin/WordCount目录,然后执行:
就会生成 WordCount.jar 文件。
4. 构造一些输入数据
input1.txt和input2.txt的文件里面是一些单词。如下:
Hello, i love china
are you ok?
[[email protected] WordCount]$ cat input2.txt
hello, i love word
You are ok
在hadoop上新建目录,和put程序运行所需要的输入文件:
hadoop fs-mkdir/tmp/output
hadoop fs-put input1.txt/tmp/input/
hadoop fs-put input2.txt/tmp/input/
5. 运行程序,会显示job运行时的一些信息。
10/09/1622:49:43WARN mapred.JobClient: Use GenericOptionsParserforparsing the arguments. Applications should implement Toolforthe same.
10/09/1622:49:43INFO mapred.FileInputFormat: Total input paths to process :2
10/09/1622:49:43INFO mapred.JobClient: Running job: job_201008171228_76165
10/09/1622:49:44INFO mapred.JobClient: map0%reduce0%
10/09/1622:49:47INFO mapred.JobClient: map100%reduce0%
10/09/1622:49:54INFO mapred.JobClient: map100%reduce100%
10/09/1622:49:55INFO mapred.JobClient: Job complete: job_201008171228_76165
10/09/1622:49:55INFO mapred.JobClient: Counters:16
10/09/1622:49:55INFO mapred.JobClient: File Systems
10/09/1622:49:55INFO mapred.JobClient: HDFS bytes read=62
10/09/1622:49:55INFO mapred.JobClient: HDFS bytes written=73
10/09/1622:49:55INFO mapred.JobClient: Local bytes read=152
10/09/1622:49:55INFO mapred.JobClient: Local bytes written=366
10/09/1622:49:55INFO mapred.JobClient: Job Counters
10/09/1622:49:55INFO mapred.JobClient: Launched reduce tasks=1
10/09/1622:49:55INFO mapred.JobClient: Rack-local map tasks=2
10/09/1622:49:55INFO mapred.JobClient: Launched map tasks=2
10/09/1622:49:55INFO mapred.JobClient: Map-Reduce Framework
10/09/1622:49:55INFO mapred.JobClient: Reduce input groups=11
10/09/1622:49:55INFO mapred.JobClient: Combine output records=14
10/09/1622:49:55INFO mapred.JobClient: Map input records=4
10/09/1622:49:55INFO mapred.JobClient: Reduce output records=11
10/09/1622:49:55INFO mapred.JobClient: Map output bytes=118
10/09/1622:49:55INFO mapred.JobClient: Map input bytes=62
10/09/1622:49:55INFO mapred.JobClient: Combine input records=14
10/09/1622:49:55INFO mapred.JobClient: Map output records=14
10/09/1622:49:55INFO mapred.JobClient: Reduce input records=14
6. 查看运行结果
Found2items
drwxr-x----admin admin02010-09-1622:43/tmp/output/_logs
-rw-r-----1admin admin1022010-09-1622:44/tmp/output/part-00000
[[email protected] WordCount]$ hadoop fs-cat/tmp/output/part-00000
Hello,1
You1
are2
china1
hello,1
i2
love2
ok1
ok?1
word1
you1
1:java.io.FileNotFoundException
这个异常是因为目录创建上有问题,于是重新检查了下目录,发现自己弄成/opt/hadoop/tmp/inout。而是/tmp/input
2:org.apache.hadoop.mapred.FileAlreadyExistsException
这个异常主要是因为上一个导致的,因为hadoop 由于进行的是耗费资源的计算,生产的结果默认是不能被覆盖的,因此中间结果输出目录一定不能存在,否则出现这个错误。
于是就执行命令删除output文件 /opt/hadoop/bin/hadoop fs -rmr /tmp/output
3:ERROR namenode.NameNode: java.io.IOException: Cannot create directory /usr/local/hadoop-datastore/hadoop-hadoop/dfs/name/current
是因为hadoop-database 文件夹没有获取权限上一篇: 如何快速熟悉一个软件项目
下一篇: RCP(二)
推荐阅读
-
hadoop 提交程序并监控运行
-
windows 运行hadoop的WordCount报nativeio.NativeIO$Windows.createDirectoryWithMode0(Lj
-
教程|要想Hadoop能够运行Python程序,就要会MRJob
-
一个可以让.net程序在非WIN平台上运行的软件Mono
-
hadoop学习笔记——用python写wordcount程序
-
使用Mono让.NET程序跨平台运行
-
使用windows下的Eclipse或者IDEA远程连接Linux的Hadoop并运行wordcount
-
hadoop 部署后测试 运行wordcount
-
Windows下IntelliJ IDEA远程连接服务器中Hadoop运行WordCount(详细版)
-
Hadoop运行wordcount实例任务卡在job running的多种情况及解决方法