Linux_BigData常用配置/命令
程序员文章站
2022-07-14 11:11:15
...
Linux
双网卡配置
vi /etc/sysconfig/network-scripts/ifcfg-eth0 #配置网卡/更改IP
rm -rf /etc/udev/rules.d/70-persistent-net.rules #删除MAC地址
service network restart #重启网络
service iptables stop #关闭防火墙
chkconfig iptables off # 关闭防火墙自启
ifconfig # 查看网络信息
vi /etc/hosts # 主机名和IP的映射
vi /etc/sysconfig/network #配置主机名与IP的映射关系
NETWORKING=yes
HOSTNAME=HadoopNode00
vi /etc/sysconfig/network #更改主机名
192.168.11.20 HadoopNode00
#ssh免密登陆
ssh-****** -t rsa # 生成**
ssh-copy-id HadoopNOde00
网卡一
DEVICE=eth0
TYPE=Ethernet
ONBOOT=yes
NM_CONTROLLED=yes
BOOTPROTO=static
IPADDR=192.168.134.99
NETMASK=255.255.255.0
BROADCAST=192.168.134.255#解释:
#网卡1使用NAT模式
#静态IP
#网段与自身虚拟机配置保持一致 即可
#作为虚拟机之前通信使用
网卡二
DEVICE=eth1
TYPE=Ethernet
ONBOOT=yes
NM_CONTROLLED=yes
BOOTPROTO=dhcp#解释:
#网卡1使用桥接模式
#动态IP
#作为连接公网使用
同步时钟
ntpdate -u ntp.api.bz
Hadoop
格式化namenode
hdfs namenode -format
启动hdfs
start-dfs.sh # 开启HDFS
stop-dfs.sh # 关闭hdfs
上传文件
hadoop fs -put /root/install.log /1.txt
ls文件
hadoop fs -ls /
下载文件
hadoop fs -get /1.txt /root/1.txt
删除文件
hadoop fs -rm /2.txt
删除文件夹
hadoop fs -rm -r -f /data
查看文件
hadoop fs -cat /1.txt
创建文件夹
hadoop fs -mkdir /data
复制文件
hadoop fs -cp /1.txt /data/
启动YARN
start-yarn.sh
远程Jar 包部署
hadoop jar jar包名 Job全限定名
Zookeeper
启动
./bin/zkServer.sh start ./conf/zk.conf
状态查看
./bin/zkServer.sh status ./conf/zk.conf
连接
./bin/zkCli.sh -server 192.168.15.130:2181
其他指令
connect host:port
get path [watch]
ls path [watch]
set path data [version]
rmr path
quit
printwatches on|off
create [-s] [-e] path data acl 默认持久节点 -s 顺序节点 -e 临时节点
stat path [watch]
close
ls2 path [watch]
history
setAcl path acl
getAcl path
addauth scheme auth
delete path [version]
connect (链接ZooKeeper)
connect localhost:2181
ls / ls2 (查看子节点)
[zk: localhost:2181(CONNECTED) 19] ls /
[dubbo, hadoop-ha, zookeeper]
--------------------------------------
[zk: localhost:2181(CONNECTED) 20] ls2 /
[dubbo, hadoop-ha, zookeeper]
cZxid = 0x0
ctime = Thu Jan 01 08:00:00 CST 1970
mZxid = 0x0
mtime = Thu Jan 01 08:00:00 CST 1970
pZxid = 0x1001
cversion = 4
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 0
numChildren = 4
create(创建节点)
[zk: localhost:2181(CONNECTED) 15] create /test'1'
Created /test
delete(删除节点)
[zk: localhost:2181(CONNECTED) 21] delete /test
[zk: localhost:2181(CONNECTED) 22] ls /
[dubbo, hadoop-ha, zookeeper]
5.10 rmr (递归删除节点)
[zk: localhost:2181(CONNECTED) 28] ls /test
[cjh]
---------------------------------
[zk: localhost:2181(CONNECTED) 44] rmr /test/cjh
[zk: localhost:2181(CONNECTED) 46] ls /test
[]
quit(退出ZookeeperSHell客户端)
quit
stat(查看节点状态)
[zk: localhost:2181(CONNECTED) 51] stat /test
cZxid = 0x1004
ctime = Tue Mar 12 15:28:00 CST 2019
mZxid = 0x1004
mtime = Tue Mar 12 15:28:00 CST 2019
pZxid = 0x100f
cversion = 10
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 1
numChildren = 0
上一篇: Mybatis中#和$的区别
下一篇: mybatis中$和#的区别