windows10+eclipse+hadoop2.5.0环境搭建
一、Linux上配置Hadoop集群环境。
二、windows基础环境搭建
1.jdk环境配置
--》安装好jdk后配置相关JAVA_HOME环境变量配置jdk安装目录,path环境变量配置jdk的
bin目录,classpath环境变量配置 .;%JAVA_HOME%\lib;%JAVA_HOME%\lib\tools.jar;
2.下载 Hadoop-2.5.0.tar.gz
5.修改windows的hosts文件,配置主机名
--》192.168.1.101 Hadoop01 --》这是配置Linux的IP与主机名关联
三、eclipse环境配置
1.解压Hadoop- 2.5.0.tar.gz,将hadoop.dll,winutils.exe复制到Hadoop-2.5.0/bin目录下
2.将hadoop-eclipse-plugin-2.5.1.jar拷贝至eclipse的plugins目录下,重启eclipse
3.打开菜单栏Windows-Preferences-Hadoop Map/Reduce 配置windows上解压后的Hadoop-2.5.0.tar.gz目录
4.配置连接Hadoop信息:Windows-ShowView填写配置信息,如下图示
Host:hdfs的主机IP,这里Hadoop01,因为我们在windows的hosts文件中已经配置了主机名与IP绑定了
左边Port:hdfs的web访问端口,右边Port:hdfs内部访问端口
5.如果连接成功,则显示以下信息就是hdfs文件系统的所有文件信息
4.创建一个Java项目编写WordCount程序
package com.bigdata.mapreduce;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import com.bigdata.mapreduce.MapReduceModule.MapReduceMapper.MapReduceReducer;
public class MapReduceModule {
// step 1: Mapper Class
public static class MapReduceMapper extends Mapper {
private Text mapOutputKey = new Text();
// 出现一次就记录一次
private IntWritable mapOutputValue = new IntWritable(1);
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 读取文件的每一行,将Text类型转换成String类型
String lineValue = value.toString();
// 分割单词,以空格分割
String[] strs = lineValue.split(" ");
// 分割之后将单词从数组中一个个拿出来,组成,比如
for (String str : strs) {
// 设置key输出
mapOutputKey.set(str);
// map输出
context.write(mapOutputKey, mapOutputValue);
}
}
// step2: Reducer Class
public static class MapReduceReducer extends Reducer {
private IntWritable outputValue = new IntWritable();
@Override
protected void reduce(Text key, Iterable values, Context context)
throws IOException, InterruptedException {
// temp: sum
int sum = 0;
// 对值进行跌代累加
for (IntWritable value : values) {
// total
sum += value.get();
}
// set output value
outputValue.set(sum);
// 最终输出
context.write(key, outputValue);
}
}
}
// step3: Driver
public int run(String[] args) throws Exception {
// 获取集群中的相关配置信息
Configuration configuration = new Configuration();
// 创建一个Job任务
Job job = Job.getInstance(configuration, this.getClass().getSimpleName());
// 整个MapReduce程序运行的入口,或者叫jar包的入口,jar具体运行的是哪个类
job.setJarByClass(this.getClass());
// 设置Job
// input输入,输入路径
Path inpath = new Path(args[0]);
FileInputFormat.addInputPath(job, inpath);
// outout输出,输出路径
Path outpath = new Path(args[1]);
FileOutputFormat.setOutputPath(job, outpath);
// 设置Mapper
job.setMapperClass(MapReduceMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// 设置Reducer
job.setReducerClass(MapReduceReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
// 提交Job -》 YARN
boolean isSuccess = job.waitForCompletion(true);
return isSuccess ? 0 : 1;
}
public static void main(String[] args) throws Exception {
args = new String[] { "hdfs://bigdata-senior01.liuhongyang.com:8020/user/admin/mapreduce/input",
"hdfs://bigdata-senior01.liuhongyang.com:8020/user/admin/mapreduce/output3" };
// run job
int status = new MapReduceModule().run(args);
// 关闭
System.exit(status);
}
}
运行此类
如果产生Exception in thread "main" java.lang.UnsatisfiedLinkError:
org.apache.hadoop.io.nativeio.NativeIO$Windows.access0(Ljava/lang/String;I)异常
则将Hadoop-common包中org.apache.hadoop.util.DiskChecker.class文件复制出来在项目中创建
org.apache.hadoop.util.DiskChecker.java文件,注释94行代码,checkDirAccess(dir)