RAC配置笔记
iscsi常用命令(我通过openfiler实现iscsi存储)
# iscsiadm -m discovery -t st -p ip:port //发现iscsi存储
# iscsiadm -m node -o delete -t target -p ip:port //删除iscsi发现记录
# iscsiadm -m node //查看iscsi发现记录
# iscsiadm -m session //查看会话情况
# iscsiadm -m node -t target -p ip:port -l //登录iscsi存储
# iscsiadm -m node -t target -p ip:port -u //登出iscsi存储
vim /etc/iscsi/initiatorname.iscsi //添加客户端认证
initiatorname=iqn.2018-12.com.oven:client //名称与服务端acl设置的名称一致
systemctl restart
iscsid //更新iqn
iscsiadm -m node -t iqn.2018-12.com.oven:master
-p 192.168.4.10 -l //更新iqn后登录成功
[root@rhel1 ~]# fdisk /dev/sde --分区
[root@rhel1 ~]# udevadm test /sys/block/sde ----查看信息
配置udev来固定iscsi磁盘分区
[root@rhel1 ~]# vi /etc/udev/rules.d/99-openiscsi.rules ---->redhat7
kernel=="sd*", subsystem=="block", program=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", result=="14f504e46494c455248704e5673662d486e38762d505a7470", symlink+="asm/asm_ocr3/part%n",owner="grid",group="asmadmin",mode="0660"
kernel=="sd*", subsystem=="block", program=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", result=="14f504e46494c45524a59386b4a412d39354f472d35776769", symlink+="asm/asm_fra/part%n",owner="grid",group="asmadmin",mode="0660"
kernel=="sd*", subsystem=="block", program=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", result=="14f504e46494c455273656f6353492d3762575a2d69475577",symlink+="asm/asm_dbfile/part%n",owner="grid",group="asmadmin",mode="0660"
kernel=="sd*", subsystem=="block", program=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", result=="14f504e46494c455233326755744d2d544a586f2d4234696e", symlink+="asm/asm_ocr1/part%n",owner="grid",group="asmadmin",mode="0660"
kernel=="sd*", subsystem=="block", program=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", result=="14f504e46494c45524d79356d34542d354b456f2d326b3877", symlink+="asm/asm_ocr2/part%n",owner="grid",group="asmadmin",mode="0660"
[root@rhel1 ~]# udevadm control --reload-rules
[root@rhel1 ~]# udevadm trigger
[root@rhel1 asm]# tree
├── asm_dbfile
│ └── part1 -> ../../sdf1
├── asm_fra
│ └── part1 -> ../../sde1
├── asm_ocr1
│ └── part1 -> ../../sdc1
├── asm_ocr2
│ └── part1 -> ../../sdb1
└── asm_ocr3
└── part1 -> ../../sdd1
5 directories, 5 files
asm配置安装
[root@rhel1 ~]# yum install kmod-oracleasm.x86_64
[root@rhel1 ~]# rpm -ivh /soft/oracleasmlib-2.0.12-1.el7.x86_64.rpm
[root@rhel1 ~]# rpm -ivh /soft/oracleasm-support-2.1.11-2.el7.x86_64.rpm
[root@rhel1 ~]# /etc/init.d/oracleasm configure
/etc/init.d/oracleasm is deprecated. use 'oracleasm configure -i'
[root@rhel1 ~]# oracleasm configure –i ---- 两个节点都需操作
configuring the oracle asm library driver.
this will configure the on-boot properties of the oracle asm library
driver. the following questions will determine whether the driver is
loaded on boot and what permissions it will have. the current values
will be shown in brackets ('[]'). hitting <enter> without typing an
answer will keep that current value. ctrl-c will abort.
default user to own the driver interface []: grid
default group to own the driver interface []: asmadmin
start oracle asm library driver on boot (y/n) [n]: y
scan for oracle asm disks on boot (y/n) [y]: y
writing oracle asm library driver configuration: done
[root@rhel1 asm]# oracleasm createdisk dbfile /dev/asm/asm_dbfile/part1
writing disk header: done
instantiating disk: done
[root@rhel2 ~]# oracleasm scandisks
reloading disk partitions: done
cleaning any stale asm disks...
scanning system for asm disks...
instantiating disk "ocrdisk3"
instantiating disk "fras"
instantiating disk "dbfiles"
instantiating disk "ocrdisk1"
instantiating disk "ocrdisk2"
[root@rhel2 ~]# oracleasm listdisks
dbfiles
fras
ocrdisk1
ocrdisk2
ocrdisk3
[root@rhel2 ~]#oracleasm querydisk -p ocrdisk1 -----查询asm磁盘所对应的设备文件
内核参数设置:
[root@ rhel1 ~]# vi /etc/sysctl.conf
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.shmall =
kernel.shmmax =
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048586
执行sysctl -p
配置oracle、grid用户的shell限制
[root@ rhel1 ~]# vi /etc/security/limits.conf
grid soft nproc 2047
grid hard nproc 16384
grid soft nofile 1024
grid hard nofile 65536
oracle soft nproc 2047
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536
[root@rhel1 ~]# vi /etc/pam.d/login
session required /lib64/security/pam_limits.so
添加用户
[root@ rhel1 ~]# groupadd -g 1001 oinstall
[root@ rhel1 ~]# groupadd -g 1002 asmadmin
[root@ rhel1 ~]# groupadd -g 1003 dba
[root@ rhel1 ~]# groupadd -g 1004 oper
[root@ rhel1 ~]# groupadd -g 1005 asmdba
[root@ rhel1 ~]# groupadd -g 1006 asmoper
[root@ rhel1 ~]# useradd -u 1002 -g oinstall -g asmadmin,asmdba,asmoper grid
[root@ rhel1 ~]# usermod -g oinstall -g dba,oper,asmdba oracle
[root@ rhel1 ~]# mkdir -p /u01/app/11.2.0/grid
[root@ rhel1 ~]# chown -r grid:oinstall /u01
[root@ rhel1 ~]# mkdir /u01/app/oracle
[root@ rhel1 ~]# chown -r oracle:oinstall /u01/app/oracle/
[root@ rhel1 ~]# chmod -r 775 /u01/
dns配置
[root@rhel2 ~]#yum install named
[root@rhel2 ~]# vi /etc/named.conf ----添加以下内容
zone "86.168.192.in-addr.arpa" in{
type master;
file "192.168.86.db";
};
zone "example.com." in {
type master;
file "example.com.db";
};
[root@rhel2 ~]# vi /var/named/example.com.db
$ttl 1h
@ in soa homeserver.localdomain. root.homeserver.localdomain. (
5 ; serial
3h ; refresh
1h ; retry
1w ; expire
1h ) ; minimum
ns homeserver.localdomain.
rhel-cluster-scan.grid in a 192.168.86.170
rhel-cluster-scan.grid in a 192.168.86.171
rhel-cluster-scan.grid in a 192.168.86.172
[root@rhel2 ~]# vi /var/named/192.168.86.db
$ttl 1h
@ in soa homeserver.localdomain.grid.example.com. root.homeserver.localdomain.grid.example.com. (
2 ; serial
3h ; refresh
1h ; retry
1w ; expire
1h ) ; minimum
ns homeserver.localdomain.grid.example.com.
170 in ptr rhel-cluster-scan.grid.example.com.
171 in ptr rhel-cluster-scan.grid.example.com.
172 in ptr rhel-cluster-scan.grid.example.com.
[root@rhel2 ~]# vi /etc/resolv.conf
nameserver 192.168.86.152
[root@rhel2 ~]# vi /etc/nsswitch.conf -----在hosts: files dns加上nis
hosts: files dns nis
[root@rhel2 ~]# nslookup rhel-cluster-scan.grid.example.com
server: 192.168.86.152
address: 192.168.86.152#53
name: rhel-cluster-scan.grid.example.com
address: 192.168.86.172
name: rhel-cluster-scan.grid.example.com
address: 192.168.86.170
name: rhel-cluster-scan.grid.example.com
address: 192.168.86.171
[grid@ rhel1 ~]#/u01/app/grid/runcluvfy.sh stage -pre crsinst -n rhel1,rhel2 -fixup –verbose ------执行检查
ssh互信配置 每个节点都需要
[oracle@ rhel1 ~]$
ssh-keygen -t rsa
ssh-keygen -t dsa
[oracle@ rhel1 ~]$
ssh rhel1 cat ~/.ssh/id_rsa.pub >> authorized_keys
ssh rhel2 cat ~/.ssh/id_rsa.pub >> authorized_keys
ssh rhel1 cat ~/.ssh/id_dsa.pub >> authorized_keys
ssh rhel2 cat ~/.ssh/id_dsa.pub >> authorized_keys
[oracle@ rhel1.ssh]$ scp authorized_keys rhel2:~/.ssh/
[oracle@ rhel2.ssh]$ chmod 600 authorized_keys
执行root.sh出现错误
adding daemon to inittab
crs-4124: oracle high availability services startup failed.
crs-4000: command start failed, or completed with errors.
ohasd failed to start: inappropriate ioctl for device
ohasd failed to start at /u01/app/11.2.0/grid/crs/install/rootcrs.pl line 443.
[root@rhel1 ~]# /u01/app/11.2.0/grid/crs/install/roothas.pl -deconfig -force -verbose
出现 adding daemon to inittab 时执行
[root@rhel1 ~]#dd if=/var/tmp/.oracle/npohasd of=/dev/null bs=1024 count=1 (rhel7+11.2.0.1)
[root@rhel1 ~]# /u01/app/11.2.0/grid/root.sh ---->等待上一个节点执行完在执行