cdh4 hadoop,hive,impala,hbase本地库搭建及安装
--hadoop文件位置:
log目录 :
/var/log/hadoop-hdfs
/var/log/hadoop-mapreduce
/var/log/hbase
/var/log/hive
/var/log/hive
/var/log/impala
安装目录:
/usr/lib
启动命令目录:
/etc/init.d/
配置文件目录:
/etc/hadoop/conf
/etc/hbase/conf
/etc/hive/conf
/etc/impala/conf
环境变量:
/etc/profile
让/etc/profile文件修改后立即生效 ,可以使用如下命令:
source /etc/profile
node配置:
/etc/hosts
资源库位置 :
/etc/yum.repos.d/
1.本地cdh4 搭建/
1、安装httpd服务,作用:用browser查看本地库
--安装httpd
yum install httpd
--启动httpd
vi /etctpdtpd.conf (配置文件)
service httpd start
service httpd stop
service httpd restart
--关闭防火墙
service iptables status
service iptables stop
--浏览器查看
http://ip地址
2、安装createrepo
--Createrepo用于创建本地源。
sudo yum install yum-utils createrepo
3、搭建本地源
--下载cloudera-manager.repo,cloudera-cdh4.repo到/etc/yum.repos.d/目录下
cd /etc/yum.repos.d/
wget http://archive.cloudera.com/cm4/redhat/6/x86_64/cm/cloudera-manager.repo
wget http://archive.cloudera.com/cdh4/redhat/6/x86_64/cdh/cloudera-cdh4.repo
--将远程源的RPM包同步到本地的httpd服务目录
cd /var/www/html/
reposync -r cloudera-manager
reposync -r cloudera-cdh4
--生成或刷新:RPMS,和一个放依赖关系等信息的文件夹repodata。
cd /var/www/html/cloudera-manager
createrepo .
cd /var/www/html/cloudera-cdh4
createrepo .
至此,CDH4本地源搭建完成。
4:本地源的使用
--修改cloudera-manager.repo中的baseurl修改指向本地局域网的服务器
将baseurl=http://archive.cloudera.com/cm4/redhat/6/x86_64/cm/4/ 改为:
baseurl=http://172.16.2.52/cloudera-manager/
--修改cloudera-cdh4.repo中的baseurl修改指向本地局域网的服务器
将baseurl=http://archive.cloudera.com/cdh4/redhat/6/x86_64/cdh/4/ 改为:
baseurl=http://172.16.2.52/cloudera-cdh4/
baseurl的地址是服务器地址+放置RPM包的目录名,结构如下图:里面有一个放RPM包的文件夹RPMS,和一个放依赖关系等信息的文件夹repodata。
2.cloudera manager安装
--安装
sudo yum install cloudera-manager-daemons
sudo yum install cloudera-manager-server
sudo yum install cloudera-manager-server-db
sudo yum install cloudera-manager-agent
--启动
service cloudera-scm-server start
service cloudera-manager-server-db start
service cloudera-scm-agent start
3.cdh4 安装
//安装 装CDH4
yum install hadoop-0.20-mapreduce-jobtracker hadoop-0.20-mapreduce-tasktracker
yum install hadoop-hdfs-namenode
yum install hadoop-hdfs-datanode
yum install hadoop-0.20-conf-pseudo
sudo -u hdfs hdfs namenode -format
--启动hdfs
for x in `cd /etc/init.d ; ls hadoop-hdfs-*` ; do sudo service $x start ; done
4..安装 hive
参考资料:http://heylinux.com/archives/2456.html
--安装hive
sudo yum install hive hive-metastore hive-server
--安装mysql
sudo yum install mysql mysql-server mysql-devel
service mysqld start
--安装mysql连接驱动
sudo yum install mysql-connector-java
ln -s /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib/mysql-connector-java.jar
--设置root密码
sudo /usr/bin/mysql_secure_installation
根据提示输入密码
01 [...]
02 Enter current password for root (enter for none):
03 OK, successfully used password, moving on...
04 [...]
05 Set root password? [Y/n] y
06 New password:hadoophive
07 Re-enter new password:hadoophive
08 Remove anonymous users? [Y/n] Y
09 [...]
10 Disallow root login remotely? [Y/n] N
11 [...]
12 Remove test database and access to it [Y/n] Y
13 [...]
14 Reload privilege tables now? [Y/n] Y
15 All done!
--进入mysql 依次执行
mysql -u root -p hadoophive
CREATE DATABASE metastore;
USE metastore;
SOURCE /usr/lib/hive/scripts/metastore/upgrade/mysql/hive-schema-0.10.0.mysql.sql;
CREATE USER 'hive'@'%' IDENTIFIED BY 'hadoophive';
CREATE USER 'hive'@'localhost' IDENTIFIED BY 'hadoophive';
GRANT ALL PRIVILEGES ON metastore.* TO 'hive'@'%';
GRANT ALL PRIVILEGES ON metastore.* TO 'hive'@'localhost';
FLUSH PRIVILEGES;
quit;
--启动hive
--修改配置文件hive-site.xml
sudo vim /etc/hive/conf/hive-site.xml参考 hive 配置
<configuration>
<!-- Hive Configuration can either be stored in this file or in the hadoop configuration files -->
<!-- that are implied by Hadoop setup variables. -->
<!-- Aside from Hadoop setup variables - this file is provided as a convenience so that Hive -->
<!-- users do not have to edit hadoop configuration files (that may be managed as a centralized -->
<!-- resource). -->
<!-- Hive Execution Parameters -->
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://localhost:3306/metastore</value>
<description>JDBC connect string for a JDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
<description>Driver class name for a JDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>hive</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>hadoophive</value>
</property>
<property>
<name>hive.metastore.uris</name>
<value>thrift://127.0.0.1:9083</value>
<description>IP address (or fully-qualified domain name) and port of the metastore host</description>
</property>
</configuration>
--启动hive
service hive-metastore start
service hive-server start
--依次执行
$ sudo -u hdfs hadoop fs -mkdir /user/hive
$ sudo -u hdfs hadoop fs -chown hive /user/hive
$ sudo -u hdfs hadoop fs -mkdir /tmp
$ sudo -u hdfs hadoop fs -chmod 777 /tmp
$ sudo -u hdfs hadoop fs -chmod o+t /tmp
$ sudo -u hdfs hadoop fs -mkdir /data
$ sudo -u hdfs hadoop fs -chown hdfs /data
$ sudo -u hdfs hadoop fs -chmod 777 /data
$ sudo -u hdfs hadoop fs -chmod o+t /data
$ sudo chown -R hive:hive /var/lib/hive
$ sudo vim /tmp/kv1.txt
1,www.baidu.com
2,wwww.google.com
3,wwww.sina.com.cn
4,wwww.163.com
5,wheylinx.com
CREATE TABLE IF NOT EXISTS pokes ( foo INT,bar STRING ) ROW FORMAT DELIMITED FIELDS TERMINATED BY "," LINES TERMINATED BY "\n";
show tables;
desc formatted pokes;
LOAD DATA LOCAL INPATH '/tmp/kv1.txt' OVERWRITE INTO TABLE pokes;
select * from pokes;
5.安装impala
参考资料:http://heylinux.com/archives/2456.html
--下载资源包
sudo wget http://archive.cloudera.com/impala/redhat/6/x86_64/impala/cloudera-impala.repo
--同步资源库
cd /var/www/html/
reposync -r cloudera-impala
cd /var/www/html/cloudera-impala
createrepo .
--修改cloudera-impala.repo
将baseurl=http://archive.cloudera.com/impala/redhat/6/x86_64/impala/1/改为:
baseurl=http://172.16.2.52/cloudera-impala/
--安装
sudo yum install impala
sudo yum install impala-shell
sudo yum install impala-server
sudo yum install impala-state-store
--添加配置/etc/hadoop/conf/hdfs-site.xml如下
sudo vim /etc/hadoop/conf/hdfs-site.xml
<property>
<name>dfs.client.read.shortcircuit</name>
<value>true</value>
</property>
<property>
<name>dfs.domain.socket.path</name>
<value>/var/run/hadoop-hdfs/dn._PORT</value>
</property>
<property>
<name>dfs.client.file-block-storage-locations.timeout</name>
<value>3000</value>
</property>
<property>
<name>dfs.datanode.hdfs-blocks-metadata.enabled</name>
<value>true</value>
</property>
--复制文件到/etc/impala/conf/
sudo cp -rpa /etc/hadoop/conf/core-site.xml /etc/impala/conf/
sudo cp -rpa /etc/hadoop/conf/hdfs-site.xml /etc/impala/conf/
sudo cp -rpa /etc/hive/conf/hive-site.xml /etc/impala/conf/
--重新启动datanode 及 impala
sudo service hadoop-hdfs-datanode restart
sudo service impala-state-store restart
sudo service impala-server restart
--进入impala命令行
impala-shell
5.安装hbase(伪分布)
--安装zookper
yum install zookeeper-server
--启动zookper
service zookeeper-server start
--安装hbase,hbase-master,hbase-regionserver
sudo yum install hbase
sudo yum install hbase-master
sudo yum install hbase-regionserver
--修改配置
sudo vim /etc/security/limits.conf
hdfs - nofile 32768
hbase - nofile 32768
--修改配置
$sudo vim /etc/pam.d/common-session
session required pam_limits.so
--修改hadoop配置
sudo vim /etc/hadoop/conf/hdfs-site.xml
<property>
<name>dfs.datanode.max.xcievers</name>
<value>4096</value>
</property>
--修改hbase配置/etc/hbase/conf/hbase-site.xml
<configuration>
<property>
<name>hbase.rootdir</name>
<value>hdfs://myhost:8020/hbase</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
</configuration>
--创建hbase工作目录
sudo -u hdfs hadoop fs -mkdir /hbase
sudo -u hdfs hadoop fs -chown hbase /hbase
--hdfs重启
sudo /etc/init.d/hadoop-hdfs-namenode restart
sudo /etc/init.d/hadoop-hdfs-datanode restart
--启动
sudo service hbase-master start
sudo service hbase-regionserver start
--进入hbase
hbase shell
转载于:https://my.oschina.net/u/569297/blog/190510