Hadoop-2.7.1单机版安装 博客分类: Hadoop hadoop安装
程序员文章站
2024-02-25 20:26:42
...
一、修改配置(core-site.xml、hdfs-site.xml、mapred-site.xml、yarn-site.xml)
hadoop-2.7.1/etc/hadoop/core-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://127.0.0.1:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/tmp</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131702</value>
</property>
</configuration>
hadoop-2.7.1/etc/hadoop/hdfs-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/tmp/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/tmp/dfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>127.0.0.1:9001</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
</configuration>
hadoop-2.7.1/etc/hadoop/mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!-- <property>
<name>mapred.job.tracker</name>
<value>127.0.0.1:9001</value>
</property> -->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>127.0.0.1:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>127.0.0.1:19888</value>
</property>
</configuration>
hadoop-2.7.1/etc/hadoop/yarn-site.xml
<?xml version="1.0"?>
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!-- <property>
<name>yarn.nodemanager.auxservices.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>127.0.0.1:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>127.0.0.1:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>127.0.0.1:8031</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>127.0.0.1:8033</value>
</property> -->
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>127.0.0.1:8088</value>
</property>
<!-- <property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>768</value>
</property> -->
</configuration>
二、配置本机密码登录
输入命令:ssh-keygen后一致按回车健
输入命令:cp id_rsa.pub authorized_keys;复制之前检测是否已存在authorized_keys文件,若存在需要将id_rsa.pub的内容复制到 authorized_keys中
另外可能需要服务器上
~/.ssh权限设置为700
~/.ssh/authorized_keys的权限设置为600
三、启动
先格式化:hadoop-2.7.1/bin/hdfs namenode -format
启动:hadoop-2.7.1/sbin/start-dfs.sh
启动:hadoop-2.7.1/sbin/start-yarn.sh
四:验证
在浏览器中输入:
http://127.0.0.1:8088
http://127.0.0.1:50070
PS:在Window上启动时出现了如下错误:
org.apache.hadoop.io.nativeio.NativeIO$Windows.access0(Ljava/lang/String;I)Z
时需要下载源代码重新编译,如下:将610行注释掉,直接返回true
若出现下面的错误需要下载winutils.exe文件放入D:\Tools\hadoop-2.7.1\bin目录下
DEPRECATED: Use of this script to execute hdfs command is deprecated.
Instead use the hdfs command for it.
15/12/04 19:18:12 ERROR util.Shell: Failed to locate the winutils binary in the
hadoop binary path
java.io.IOException: Could not locate executable D:\Tools\hadoop-2.7.1\bin\winutils.exe in the Hadoop binaries.
at org.apache.hadoop.util.Shell.getQualifiedBinPath(Shell.java:356)
at org.apache.hadoop.util.Shell.getWinUtilsPath(Shell.java:371)
at org.apache.hadoop.util.Shell.<clinit>(Shell.java:364)
at org.apache.hadoop.util.StringUtils.<clinit>(StringUtils.java:80)
at org.apache.hadoop.hdfs.server.datanode.DataNode.secureMain(DataNode.j
ava:2483)
at org.apache.hadoop.hdfs.server.datanode.DataNode.main(DataNode.java:2508)
hadoop-common-2.2.0-bin-master.zip下载地址:https://codeload.github.com/srccodes/hadoop-common-2.2.0-bin/zip/master
hadoop-2.7.1/etc/hadoop/core-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://127.0.0.1:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/tmp</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131702</value>
</property>
</configuration>
hadoop-2.7.1/etc/hadoop/hdfs-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/tmp/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/tmp/dfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>127.0.0.1:9001</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
</configuration>
hadoop-2.7.1/etc/hadoop/mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!-- <property>
<name>mapred.job.tracker</name>
<value>127.0.0.1:9001</value>
</property> -->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>127.0.0.1:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>127.0.0.1:19888</value>
</property>
</configuration>
hadoop-2.7.1/etc/hadoop/yarn-site.xml
<?xml version="1.0"?>
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!-- <property>
<name>yarn.nodemanager.auxservices.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>127.0.0.1:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>127.0.0.1:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>127.0.0.1:8031</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>127.0.0.1:8033</value>
</property> -->
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>127.0.0.1:8088</value>
</property>
<!-- <property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>768</value>
</property> -->
</configuration>
二、配置本机密码登录
输入命令:ssh-keygen后一致按回车健
输入命令:cp id_rsa.pub authorized_keys;复制之前检测是否已存在authorized_keys文件,若存在需要将id_rsa.pub的内容复制到 authorized_keys中
另外可能需要服务器上
~/.ssh权限设置为700
~/.ssh/authorized_keys的权限设置为600
三、启动
先格式化:hadoop-2.7.1/bin/hdfs namenode -format
启动:hadoop-2.7.1/sbin/start-dfs.sh
启动:hadoop-2.7.1/sbin/start-yarn.sh
四:验证
在浏览器中输入:
http://127.0.0.1:8088
http://127.0.0.1:50070
PS:在Window上启动时出现了如下错误:
org.apache.hadoop.io.nativeio.NativeIO$Windows.access0(Ljava/lang/String;I)Z
时需要下载源代码重新编译,如下:将610行注释掉,直接返回true
若出现下面的错误需要下载winutils.exe文件放入D:\Tools\hadoop-2.7.1\bin目录下
DEPRECATED: Use of this script to execute hdfs command is deprecated.
Instead use the hdfs command for it.
15/12/04 19:18:12 ERROR util.Shell: Failed to locate the winutils binary in the
hadoop binary path
java.io.IOException: Could not locate executable D:\Tools\hadoop-2.7.1\bin\winutils.exe in the Hadoop binaries.
at org.apache.hadoop.util.Shell.getQualifiedBinPath(Shell.java:356)
at org.apache.hadoop.util.Shell.getWinUtilsPath(Shell.java:371)
at org.apache.hadoop.util.Shell.<clinit>(Shell.java:364)
at org.apache.hadoop.util.StringUtils.<clinit>(StringUtils.java:80)
at org.apache.hadoop.hdfs.server.datanode.DataNode.secureMain(DataNode.j
ava:2483)
at org.apache.hadoop.hdfs.server.datanode.DataNode.main(DataNode.java:2508)
hadoop-common-2.2.0-bin-master.zip下载地址:https://codeload.github.com/srccodes/hadoop-common-2.2.0-bin/zip/master