hadoop在eclipse当中如何添加源码?
[学习笔记]
/*org.apache.hadoop.mapreduce.mapper.context,java.lang.interruptedexception,想看map的源代码,按control,点击,出现attach source code,点击external location/external file,找到源代码,就在source目录下,,d:\hadoop-2.7.4\src
其中key为此行的开头相对于文件的起始位置,value就是此行的字符文本
*/ public void map(object key, text value, context context) throws ioexception, interruptedexception {
system.out.println("key is 马克-to-win @ 马克java社区 "+key.tostring()+" value is "+value.tostring());
stringtokenizer itr = new stringtokenizer(value.tostring());
while (itr.hasmoretokens()) {
word.set(itr.nexttoken());
context.write(word, one);
}
}
}
public static class intsumreducer extends reducer<text, intwritable, text, intwritable> {
private intwritable result = new intwritable();
public void reduce(text key, iterable<intwritable> values, context context)
throws ioexception, interruptedexception {
system.out.println("reduce key is 马克-to-win @ 马克java社区 "+key.tostring());
int sum = 0;
for (intwritable val : values) {
int valvalue=val.get();
system.out.println("valvalue is"+valvalue);
sum += valvalue ;
}
result.set(sum);
context.write(key, result);
}
}
public static void main(string[] args) throws exception {
configuration conf = new configuration();
string[] otherargs = new genericoptionsparser(conf, args).getremainingargs();
if (otherargs.length != 2) {
system.err.println("usage: wordcount <in> <out>");
system.exit(2);
}
job job = new job(conf, "word count");
job.setjarbyclass(wordcount.class);
job.setmapperclass(tokenizermapper.class);
文章转载自原文:
上一篇: 通过AOP实现MyBatis多数据源的动态切换实例教程
下一篇: MapReduce的输入文件是两个