@20170829
補充一下為何需要做這樣的測試
因為有時遇到客戶想要讓ASM進行Normal Redunancy資料抄寫的需求, 分別將資料寫入到storage1, storage2...
可是在voting disks 的數量是3的時候, 一定會有一種狀況下, storage1 上有兩個投票權(2 voting disks), storage2 上有一個投票權(1 voting disk),
在storage1 壞掉的狀況下, 等於2/3 的voting都掛了, 會讓Grid Infrastructure判定Cluster有問題, 而無法帶起整個Cluster resources (VIP/ nodeapps/ listener/ instances)
於是乎就要想一些方法, 讓online voting disk數量 > offline voting disk 數量, cluster才能正常啟動.
假設我們使用三個voting disk (normal redunancy) ,
votingdisk1 : /dev/sdh1
votingdisk2 : /dev/sdi1
votingdisk3 : /dev/mapper/vg--crsmirror-lv_mirror : /dev/sdj, /dev/sdk
模擬測試使用LVM mirror votingdisk3 , 刪掉所有OCR ASM disk Header之後修復, 以及修復LVM mirror disk的過程
Create Physical Volume
pvcreate /dev/sdj /dev/sdk
[root@rac1 ~]# pvcreate /dev/sdj /dev/sdk
Physical volume "/dev/sdj" successfully created
Physical volume "/dev/sdk" successfully created
Display the physical volumes:
# pvdisplay
[root@rac1 ~]# pvdisplay
--- Physical volume ---
PV Name /dev/sda2
VG Name vg_rac1
PV Size 39.51 GiB / not usable 3.00 MiB
Allocatable yes (but full)
PE Size 4.00 MiB
Total PE 10114
Free PE 0
Allocated PE 10114
PV UUID wuRltg-6PRJ-ehHd-xxBc-NDMT-sJzw-ksDLU5
"/dev/sdj" is a new physical volume of "2.00 GiB"
--- NEW Physical volume ---
PV Name /dev/sdj
VG Name
PV Size 2.00 GiB
Allocatable NO
PE Size 0
Total PE 0
Free PE 0
Allocated PE 0
PV UUID rWOc2k-RRHn-qaf1-cyfp-9ise-yo9r-ukIhs7
"/dev/sdk" is a new physical volume of "2.00 GiB"
--- NEW Physical volume ---
PV Name /dev/sdk
VG Name
PV Size 2.00 GiB
Allocatable NO
PE Size 0
Total PE 0
Free PE 0
Allocated PE 0
PV UUID BkdUPV-bDSf-Q0Ps-XSdy-cK47-97pD-7adxaW
[root@rac1 ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/sda2 vg_rac1 lvm2 a-- 39.51g 0
/dev/sdj lvm2 --- 2.00g 2.00g
/dev/sdk lvm2 --- 2.00g 2.00g
Create Volume Group
# vgcreate vg-crsmirror /dev/sdj /dev/sdk
[root@rac1 ~]# vgcreate vg-crsmirror /dev/sdj /dev/sdk
Volume group "vg-crsmirror" successfully created
[root@rac1 ~]#
[root@rac1 ~]# vgdisplay
--- Volume group ---
VG Name vg-crsmirror
System ID
Format lvm2
Metadata Areas 2
Metadata Sequence No 1
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 0
Open LV 0
Max PV 0
Cur PV 2
Act PV 2
VG Size 3.99 GiB
PE Size 4.00 MiB
Total PE 1022
Alloc PE / Size 0 / 0
Free PE / Size 1022 / 3.99 GiB
VG UUID hX8G3p-Ft0n-pYAW-oGWZ-QDnP-RcoA-phC8eN
Create Mirrored Logical Volume
lvcreate -L 512M -m1 -n lv_mirror vg-crsmirror
[root@rac1 ~]# lvcreate -L 2040M -m1 -n lv_mirror vg-crsmirror #lvremove /dev/vg-crsmirror/lv_mirror
Logical volume "lv_mirror" created
Activating LVM
lvchange -ay /dev/vg-crsmirror/lv_mirror
#lvchange -an /dev/vg-crsmirror/lv_mirror
Create LVM On a New Hard Disk
fdisk -l |grep mirror
[root@rac1 vg-crsmirror]# fdisk -l |grep mirror
Disk /dev/mapper/vg--crsmirror-lv_mirror_mlog: 4 MB, 4194304 bytes
Disk /dev/mapper/vg--crsmirror-lv_mirror_mimage_0: 536 MB, 536870912 bytes
Disk /dev/mapper/vg--crsmirror-lv_mirror_mimage_1: 536 MB, 536870912 bytes
Disk /dev/mapper/vg--crsmirror-lv_mirror: 536 MB, 536870912 bytes
[root@rac1 vg-crsmirror]# lvs -a -o +seg_pe_ranges --segments
LV VG Attr #Str Type SSize PE Ranges
lv_mirror vg-crsmirror mwi-a-m--- 2 mirror 512.00m lv_mirror_mimage_0:0-127 lv_mirror_mimage_1:0-127
[lv_mirror_mimage_0] vg-crsmirror iwi-aom--- 1 linear 512.00m /dev/sdj:0-127
[lv_mirror_mimage_1] vg-crsmirror iwi-aom--- 1 linear 512.00m /dev/sdk:0-127
[lv_mirror_mlog] vg-crsmirror lwi-aom--- 1 linear 4.00m /dev/sdk:128-128
lv_root vg_rac1 -wi-ao---- 1 linear 39.01g /dev/sda2:0-9985
lv_swap vg_rac1 -wi-ao---- 1 linear 512.00m /dev/sda2:9986-10113
[root@rac1 vg-crsmirror]# fdisk -l /dev/mapper/vg--crsmirror-lv_mirror
Disk /dev/mapper/vg--crsmirror-lv_mirror: 536 MB, 536870912 bytes
255 heads, 63 sectors/track, 65 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
-------------------------------------------------------------------------------
在第二台執行的過程
[root@rac2 ~]# pvcreate /dev/sdj /dev/sdk
Can't initialize physical volume "/dev/sdj" of volume group "vg-crsmirror" without -ff
Can't initialize physical volume "/dev/sdk" of volume group "vg-crsmirror" without -ff
[root@rac2 ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/sda2 vg_rac2 lvm2 a-- 39.51g 0
/dev/sdj vg-crsmirror lvm2 a-- 2.00g 1.50g
/dev/sdk vg-crsmirror lvm2 a-- 2.00g 1.49g
[root@rac2 ~]# vgs
VG #PV #LV #SN Attr VSize VFree
vg-crsmirror 2 1 0 wz--n- 3.99g 2.99g
vg_rac2 1 2 0 wz--n- 39.51g 0
[root@rac2 ~]#
[root@rac2 ~]# lvcreate -L 512M -m1 -n lv_mirror vg-crsmirror
Logical volume "lv_mirror" already exists in volume group "vg-crsmirror"
[root@rac2 ~]# lvdisplay |grep mirror
LV Path /dev/vg-crsmirror/lv_mirror
LV Name lv_mirror
VG Name vg-crsmirror
[root@rac2 dev]# lvm pvscan
PV /dev/sdj VG vg-crsmirror lvm2 [2.00 GiB / 1.50 GiB free]
PV /dev/sdk VG vg-crsmirror lvm2 [2.00 GiB / 1.49 GiB free]
PV /dev/sda2 VG vg_rac2 lvm2 [39.51 GiB / 0 free]
Total: 3 [43.50 GiB] / in use: 3 [43.50 GiB] / in no VG: 0 [0 ]
[root@rac2 dev]# lvm vgscan
Reading all physical volumes. This may take a while...
Found volume group "vg-crsmirror" using metadata type lvm2
Found volume group "vg_rac2" using metadata type lvm2
[root@rac2 dev]# lvm lvscan
inactive '/dev/vg-crsmirror/lv_mirror' [512.00 MiB] inherit
ACTIVE '/dev/vg_rac2/lv_root' [39.01 GiB] inherit
ACTIVE '/dev/vg_rac2/lv_swap' [512.00 MiB] inherit
-------------------------------------------------------------------------------
[root@rac1 ~]# fdisk /dev/mapper/vg--crsmirror-lv_mirror
Device contains neither a valid DOS partition table, nor Sun, SGI or OSF disklabel
Building a new DOS disklabel with disk identifier 0x30a357cb.
Changes will remain in memory only, until you decide to write them.
After that, of course, the previous content won't be recoverable.
Warning: invalid flag 0x0000 of partition table 4 will be corrected by w(rite)
WARNING: DOS-compatible mode is deprecated. It's strongly recommended to
switch off the mode (command 'c') and change display units to
sectors (command 'u').
Command (m for help): n
Command action
e extended
p primary partition (1-4)
p
Partition number (1-4): 1
First cylinder (1-65, default 1):
Using default value 1
Last cylinder, +cylinders or +size{K,M,G} (1-65, default 65):
Using default value 65
Command (m for help): w
The partition table has been altered!
Calling ioctl() to re-read partition table.
WARNING: Re-reading the partition table failed with error 22: Invalid argument.
The kernel still uses the old table. The new table will be used at
the next reboot or after you run partprobe(8) or kpartx(8)
Syncing disks.
[root@rac1 ~]#
[root@rac1 ~]# fdisk -l |grep mirror
Disk /dev/mapper/vg--crsmirror-lv_mirror_mlog: 4 MB, 4194304 bytes
Disk /dev/mapper/vg--crsmirror-lv_mirror_mimage_0: 536 MB, 536870912 bytes
/dev/mapper/vg--crsmirror-lv_mirror_mimage_0p1 1 65 522081 83 Linux
Disk /dev/mapper/vg--crsmirror-lv_mirror_mimage_1: 536 MB, 536870912 bytes
/dev/mapper/vg--crsmirror-lv_mirror_mimage_1p1 1 65 522081 83 Linux
Disk /dev/mapper/vg--crsmirror-lv_mirror: 536 MB, 536870912 bytes
/dev/mapper/vg--crsmirror-lv_mirrorp1 1 65 522081 83 Linux
[root@rac1 ~]# oracleasm createdisk newocr1 /dev/sdh1
Writing disk header: done
Instantiating disk: done
[root@rac1 ~]# oracleasm createdisk newocr2 /dev/sdi1
Writing disk header: done
Instantiating disk: done
oracleasm createdisk newocr3 /dev/mapper/vg--crsmirror-lv_mirror
Writing disk header: done
Instantiating disk: done
[root@rac1 ~]# oracleasm scandisks
Reloading disk partitions: done
Cleaning any stale ASM disks...
Scanning system for ASM disks...
[root@rac1 ~]# oracleasm scandisks^C
[root@rac1 ~]# oracleasm listdisks
DISK1
DISK2
DISK3
DISK4
DISK5
DISK6
NEWOCR1
NEWOCR2
NEWOCR3
-------------------------------------------------------------------------------
[root@rac2 mapper]# lvchange -ay /dev/vg-crsmirror/lv_mirror
[root@rac2 mapper]# oracleasm scandisks
Reloading disk partitions: done
Cleaning any stale ASM disks...
Scanning system for ASM disks...
Instantiating disk "NEWOCR3"
[root@rac2 mapper]# oracleasm listdisks
DISK1
DISK2
DISK3
DISK4
DISK5
DISK6
NEWOCR1
NEWOCR2
NEWOCR3
---
# 刪掉所有OCR ASM disk Header之後修復
前置作業, 把CRS都停下來之後, 刪除LUN header
oracleasm deletedisk NEWOCR1
oracleasm deletedisk NEWOCR2
oracleasm deletedisk NEWOCR3
[root@rac1 ~]# /u01/app/12.1.0.2/grid/bin/ocrconfig -showbackup
PROT-26: Oracle Cluster Registry backup locations were retrieved from a local copy
rac1 2015/11/04 18:18:33 /u01/app/12.1.0.2/grid/cdata/racscan/backup00.ocr 3281833642
rac1 2015/11/04 14:18:33 /u01/app/12.1.0.2/grid/cdata/racscan/backup01.ocr 3281833642
rac1 2015/11/04 10:18:33 /u01/app/12.1.0.2/grid/cdata/racscan/backup02.ocr 3281833642
rac1 2015/11/03 14:18:32 /u01/app/12.1.0.2/grid/cdata/racscan/day.ocr 3281833642
rac1 2015/10/31 04:56:42 /u01/app/12.1.0.2/grid/cdata/racscan/week.ocr 3281833642
rac1 2015/03/27 13:42:09 /u01/app/12.1.0.2/grid/cdata/racscan/backup_20150327_134209.ocr 3281833642
[root@rac1 ~]# su - grid
[grid@rac1 ~]$ olsnodes
rac1
rac2
[root@rac1 ~]# /u01/app/12.1.0.2/grid/bin/crsctl stop crs -f
CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'rac1'
CRS-2673: Attempting to stop 'ora.mdnsd' on 'rac1'
CRS-2673: Attempting to stop 'ora.gpnpd' on 'rac1'
CRS-2673: Attempting to stop 'ora.ctssd' on 'rac1'
CRS-2673: Attempting to stop 'ora.evmd' on 'rac1'
CRS-2673: Attempting to stop 'ora.asm' on 'rac1'
CRS-2673: Attempting to stop 'ora.drivers.acfs' on 'rac1'
CRS-2677: Stop of 'ora.drivers.acfs' on 'rac1' succeeded
CRS-2677: Stop of 'ora.mdnsd' on 'rac1' succeeded
CRS-2677: Stop of 'ora.gpnpd' on 'rac1' succeeded
CRS-2677: Stop of 'ora.evmd' on 'rac1' succeeded
CRS-2677: Stop of 'ora.ctssd' on 'rac1' succeeded
CRS-2677: Stop of 'ora.asm' on 'rac1' succeeded
CRS-2673: Attempting to stop 'ora.cluster_interconnect.haip' on 'rac1'
CRS-2677: Stop of 'ora.cluster_interconnect.haip' on 'rac1' succeeded
CRS-2673: Attempting to stop 'ora.cssd' on 'rac1'
CRS-2677: Stop of 'ora.cssd' on 'rac1' succeeded
CRS-2673: Attempting to stop 'ora.gipcd' on 'rac1'
CRS-2677: Stop of 'ora.gipcd' on 'rac1' succeeded
CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'rac1' has completed
CRS-4133: Oracle High Availability Services has been stopped.
[root@rac2 ~]# /u01/app/12.1.0.2/grid/bin/crsctl stop crs -f
CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'rac2'
CRS-2673: Attempting to stop 'ora.asm' on 'rac2'
CRS-2673: Attempting to stop 'ora.mdnsd' on 'rac2'
CRS-2673: Attempting to stop 'ora.gpnpd' on 'rac2'
CRS-2673: Attempting to stop 'ora.drivers.acfs' on 'rac2'
CRS-2677: Stop of 'ora.drivers.acfs' on 'rac2' succeeded
CRS-2677: Stop of 'ora.mdnsd' on 'rac2' succeeded
CRS-2677: Stop of 'ora.gpnpd' on 'rac2' succeeded
CRS-2677: Stop of 'ora.asm' on 'rac2' succeeded
CRS-2673: Attempting to stop 'ora.cluster_interconnect.haip' on 'rac2'
CRS-2677: Stop of 'ora.cluster_interconnect.haip' on 'rac2' succeeded
CRS-2673: Attempting to stop 'ora.ctssd' on 'rac2'
CRS-2673: Attempting to stop 'ora.evmd' on 'rac2'
CRS-2677: Stop of 'ora.ctssd' on 'rac2' succeeded
CRS-2677: Stop of 'ora.evmd' on 'rac2' succeeded
CRS-2673: Attempting to stop 'ora.cssd' on 'rac2'
CRS-2677: Stop of 'ora.cssd' on 'rac2' succeeded
CRS-2673: Attempting to stop 'ora.gipcd' on 'rac2'
CRS-2677: Stop of 'ora.gipcd' on 'rac2' succeeded
CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'rac2' has completed
CRS-4133: Oracle High Availability Services has been stopped.
/u01/app/12.1.0.2/grid/bin/ocrconfig -restore /u01/app/12.1.0.2/grid/cdata/racscan/backup00.ocr
[root@rac1 ~]# /u01/app/12.1.0.2/grid/bin/ocrconfig -restore /u01/app/12.1.0.2/grid/cdata/racscan/backup00.ocr
PROT-35: The configured OCR locations are not accessible
PROC-26: Error while accessing the physical storage
Start ASM on one node in exclusive mode.
/u01/app/12.1.0.2/grid/bin/crsctl start crs -excl -nocrs
[root@rac1 ~]# /u01/app/12.1.0.2/grid/bin/crsctl start crs -excl -nocrs
CRS-4123: Oracle High Availability Services has been started.
CRS-2672: Attempting to start 'ora.evmd' on 'rac1'
CRS-2672: Attempting to start 'ora.mdnsd' on 'rac1'
CRS-2676: Start of 'ora.evmd' on 'rac1' succeeded
CRS-2676: Start of 'ora.mdnsd' on 'rac1' succeeded
CRS-2672: Attempting to start 'ora.gpnpd' on 'rac1'
CRS-2676: Start of 'ora.gpnpd' on 'rac1' succeeded
CRS-2672: Attempting to start 'ora.cssdmonitor' on 'rac1'
CRS-2672: Attempting to start 'ora.gipcd' on 'rac1'
CRS-2676: Start of 'ora.cssdmonitor' on 'rac1' succeeded
CRS-2676: Start of 'ora.gipcd' on 'rac1' succeeded
CRS-2672: Attempting to start 'ora.cssd' on 'rac1'
CRS-2672: Attempting to start 'ora.diskmon' on 'rac1'
CRS-2676: Start of 'ora.diskmon' on 'rac1' succeeded
CRS-2676: Start of 'ora.cssd' on 'rac1' succeeded
CRS-2672: Attempting to start 'ora.cluster_interconnect.haip' on 'rac1'
CRS-2672: Attempting to start 'ora.ctssd' on 'rac1'
CRS-2676: Start of 'ora.ctssd' on 'rac1' succeeded
CRS-2676: Start of 'ora.cluster_interconnect.haip' on 'rac1' succeeded
CRS-2672: Attempting to start 'ora.asm' on 'rac1'
CRS-2676: Start of 'ora.asm' on 'rac1' succeeded
Check CRSD is down and stop it if it is not.
[root@rac1 ~]# /u01/app/12.1.0.2/grid/bin/crsctl status resource ora.crsd -init
NAME=ora.crsd
TYPE=ora.crs.type
TARGET=OFFLINE
STATE=OFFLINE
[root@rac1 ~]# /u01/app/12.1.0.2/grid/bin/crsctl query css votedisk
## STATE File Universal Id File Name Disk group
-- ----- ----------------- --------- ---------
1. ONLINE c86457b0b3234f29bffc5d814e92df63 (ORCL:DISK1) [DATA]
Located 1 voting disk(s).
[root@rac1 ~]#
[grid@rac1 ~]$ sqlplus / as sysasm
SQL*Plus: Release 12.1.0.2.0 Production on Sun Nov 8 02:03:30 2015
Copyright (c) 1982, 2014, Oracle. All rights reserved.
Connected to:
Oracle Database 12c Enterprise Edition Release 12.1.0.2.0 - 64bit Production
With the Real Application Clusters and Automatic Storage Management options
SQL> -- drop diskgroup newocr force including contents;
-- alter diskgroup newocr rebalance; 如果 drop不掉記得先rebalance
SQL> create diskgroup newocr normal redundancy disk 'ORCL:NEWOCR1', 'ORCL:NEWOCR2', 'ORCL:NEWOCR3' attribute 'compatible.asm' = '12.1';
create diskgroup newocr normal redundancy disk 'ORCL:NEWOCR1', 'ORCL:NEWOCR2', 'ORCL:NEWOCR3' attribute 'compatible.asm' = '12.1'
*
ERROR at line 1:
ORA-15018: diskgroup cannot be created
ORA-15410: Disks in disk group NEWOCR do not have equal size.
SQL>
create diskgroup OCRNEW normal redundancy disk 'ORCL:NEWOCR1' size 2000M, 'ORCL:NEWOCR2' size 2000M, 'ORCL:NEWOCR3' size 2000M attribute 'compatible.asm' = '11.2';
SQL>
Diskgroup created.
SQL>
[root@rac1 ~]# /u01/app/12.1.0.2/grid/bin/ocrconfig -restore /u01/app/12.1.0.2/grid/cdata/racscan/backup00.ocr
[root@rac1 ~]#
[root@rac1 ~]# /u01/app/12.1.0.2/grid/bin/ocrcheck
Status of Oracle Cluster Registry is as follows :
Version : 4
Total space (kbytes) : 409568
Used space (kbytes) : 1420
Available space (kbytes) : 408148
ID : 2056011687
Device/File Name : +OCRNEW
Device/File integrity check succeeded
Device/File not configured
Device/File not configured
Device/File not configured
Device/File not configured
Cluster registry integrity check succeeded
Logical corruption check succeeded
[root@rac1 ~]#
[root@rac1 ~]# /u01/app/12.1.0.2/grid/bin/crsctl stop crs -f
CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'rac1'
CRS-2673: Attempting to stop 'ora.storage' on 'rac1'
CRS-2673: Attempting to stop 'ora.mdnsd' on 'rac1'
CRS-2673: Attempting to stop 'ora.gpnpd' on 'rac1'
CRS-2673: Attempting to stop 'ora.drivers.acfs' on 'rac1'
CRS-2677: Stop of 'ora.storage' on 'rac1' succeeded
CRS-2673: Attempting to stop 'ora.ctssd' on 'rac1'
CRS-2673: Attempting to stop 'ora.evmd' on 'rac1'
CRS-2673: Attempting to stop 'ora.asm' on 'rac1'
CRS-2677: Stop of 'ora.drivers.acfs' on 'rac1' succeeded
CRS-2677: Stop of 'ora.mdnsd' on 'rac1' succeeded
CRS-2677: Stop of 'ora.gpnpd' on 'rac1' succeeded
CRS-2677: Stop of 'ora.evmd' on 'rac1' succeeded
CRS-2677: Stop of 'ora.ctssd' on 'rac1' succeeded
CRS-2677: Stop of 'ora.asm' on 'rac1' succeeded
CRS-2673: Attempting to stop 'ora.cluster_interconnect.haip' on 'rac1'
CRS-2677: Stop of 'ora.cluster_interconnect.haip' on 'rac1' succeeded
CRS-2673: Attempting to stop 'ora.cssd' on 'rac1'
CRS-2677: Stop of 'ora.cssd' on 'rac1' succeeded
CRS-2673: Attempting to stop 'ora.gipcd' on 'rac1'
CRS-2677: Stop of 'ora.gipcd' on 'rac1' succeeded
CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'rac1' has completed
CRS-4133: Oracle High Availability Services has been stopped.
[root@rac1 ~]# /u01/app/12.1.0.2/grid/bin/crsctl start crs
CRS-4123: Oracle High Availability Services has been started.
[root@rac2 ~]# /u01/app/12.1.0.2/grid/bin/crsctl start crs
CRS-4123: Oracle High Availability Services has been started.
---
#修復LVM mirror disk的過程
#原建立LVM mirror disk的方法
#vgcreate vg-crsmirror /dev/sdj /dev/sdk
# 模擬lvm mirror disk corruption, 手動用dd的方式把sdk disk摧毀
[root@rac1 ~]# dd if=/dev/zero of=/dev/sdk bs=1M count=512
512+0 records in
512+0 records out
536870912 bytes (537 MB) copied, 2.18916 s, 245 MB/s
[root@rac1 ~]# oracleasm scandisks
Reloading disk partitions: done
Cleaning any stale ASM disks...
Scanning system for ASM disks...
[root@rac1 ~]# oracleasm listdisks
DISK1
DISK2
DISK3
DISK4
DISK5
DISK6
NEWOCR1
NEWOCR2
NEWOCR3
[root@rac2 ~]# oracleasm listdisks
DISK1
DISK2
DISK3
DISK4
DISK5
DISK6
NEWOCR1
NEWOCR2
NEWOCR3
#reboot both rac nodes , 檢查RAC狀態無法啟動 , 發現lvmmirror 有問題
[root@rac1 rac1]# pvscan -vP
PARTIAL MODE. Incomplete logical volumes will be processed.
Wiping cache of LVM-capable devices
Wiping internal VG cache
Walking through all physical volumes
Couldn't find device with uuid BkdUPV-bDSf-Q0Ps-XSdy-cK47-97pD-7adxaW.
There are 1 physical volumes missing.
PV /dev/sdj VG vg-crsmirror lvm2 [2.00 GiB / 4.00 MiB free]
PV unknown device VG vg-crsmirror lvm2 [2.00 GiB / 0 free]
PV /dev/sda2 VG vg_rac1 lvm2 [39.51 GiB / 0 free]
Total: 3 [43.50 GiB] / in use: 3 [43.50 GiB] / in no VG: 0 [0 ]
[root@rac1 rac1]# vgscan -vP
PARTIAL MODE. Incomplete logical volumes will be processed.
Wiping cache of LVM-capable devices
Wiping internal VG cache
Reading all physical volumes. This may take a while...
Finding all volume groups
Finding volume group "vg-crsmirror"
Couldn't find device with uuid BkdUPV-bDSf-Q0Ps-XSdy-cK47-97pD-7adxaW.
There are 1 physical volumes missing.
There are 1 physical volumes missing.
Found volume group "vg-crsmirror" using metadata type lvm2
Finding volume group "vg_rac1"
Found volume group "vg_rac1" using metadata type lvm2
[root@rac1 rac1]# ls /etc/lvm/archive
vg-crsmirror_00000-577242756.vg vg-crsmirror_00010-1015974208.vg
vg-crsmirror_00001-1371052698.vg vg-crsmirror_00011-454078281.vg
vg-crsmirror_00002-1418557307.vg vg-crsmirror_00012-354651776.vg
vg-crsmirror_00003-1647225163.vg vg-crsmirror_00013-1375971973.vg
vg-crsmirror_00004-696018933.vg vg-crsmirror_00014-1967818712.vg
vg-crsmirror_00005-82168042.vg vg-crsmirror_00015-64490743.vg
vg-crsmirror_00006-554305478.vg vg-crsmirror_00016-955890756.vg
vg-crsmirror_00007-1163445813.vg vg-crsmirror_00017-1706169581.vg
vg-crsmirror_00008-909403487.vg vg-crsmirror_00018-465009429.vg
vg-crsmirror_00009-1210442952.vg vg_rac1_00000-913431249.vg
[root@rac1 rac1]#
#從自動封存的backup來回復sdk disk
pvcreate --uuid "BkdUPV-bDSf-Q0Ps-XSdy-cK47-97pD-7adxaW" --restorefile /etc/lvm/archive/vg-crsmirror_00018-465009429.vg /dev/sdk
[root@rac1 rac1]# pvcreate --uuid "BkdUPV-bDSf-Q0Ps-XSdy-cK47-97pD-7adxaW" --restorefile /etc/lvm/archive/vg-crsmirror_00018-465009429.vg /dev/sdk
Couldn't find device with uuid BkdUPV-bDSf-Q0Ps-XSdy-cK47-97pD-7adxaW.
Physical volume "/dev/sdk" successfully created
[root@rac1 rac1]# vgcfgrestore vg-crsmirror
Restored volume group vg-crsmirror
[root@rac1 rac1]# lvs -a -o +devices
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert Devices
lv_mirror vg-crsmirror mwi---m--- 1.99g lv_mirror_mlog lv_mirror_mimage_0(0),lv_mirror_mimage_1(0)
[lv_mirror_mimage_0] vg-crsmirror Iwi---m--- 1.99g /dev/sdj(0)
[lv_mirror_mimage_1] vg-crsmirror Iwi---m--- 1.99g /dev/sdk(0)
[lv_mirror_mlog] vg-crsmirror lwi---m--- 4.00m /dev/sdk(510)
lv_root vg_rac1 -wi-ao---- 39.01g /dev/sda2(0)
lv_swap vg_rac1 -wi-ao---- 512.00m /dev/sda2(9986)
[root@rac1 rac1]# lvchange -ay /dev/vg-crsmirror/lv_mirror
[root@rac1 rac1]# oracleasm listdisks
DISK1
DISK2
DISK3
DISK4
DISK5
DISK6
NEWOCR1
NEWOCR2
[root@rac1 rac1]# oracleasm scandisks
Reloading disk partitions: done
Cleaning any stale ASM disks...
Scanning system for ASM disks...
Instantiating disk "NEWOCR3"
[root@rac1 rac1]# oracleasm listdisks
DISK1
DISK2
DISK3
DISK4
DISK5
DISK6
NEWOCR1
NEWOCR2
NEWOCR3
[root@rac2 ~]# lvchange -ay /dev/vg-crsmirror/lv_mirror
[root@rac2 ~]# oracleasm scandisks
Reloading disk partitions: done
Cleaning any stale ASM disks...
Scanning system for ASM disks...
Instantiating disk "NEWOCR3"
[root@rac2 ~]# oracleasm listdisks
DISK1
DISK2
DISK3
DISK4
DISK5
DISK6
NEWOCR1
NEWOCR2
NEWOCR3
#Check LVM mirror status
[root@rac1 rac1]# lvs | fgrep "mwi"
lv_mirror vg-crsmirror mwi-a-m--- 1.99g lv_mirror_mlog 100.00
[root@rac2 ~]# lvs | fgrep "mwi"
lv_mirror vg-crsmirror mwi-a-m--- 1.99g lv_mirror_mlog 100.00
#reboot db1
[oracle@rac1 ~]$ srvctl status database -d orcl
Instance orcl1 is running on node rac1
Instance orcl2 is not running on node rac2
#restart db2 crs daemon
[root@rac2 ~]# /u01/app/12.1.0.2/grid/bin/crsctl stop crs -f
CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'rac2'
CRS-2673: Attempting to stop 'ora.drivers.acfs' on 'rac2'
CRS-2673: Attempting to stop 'ora.ctssd' on 'rac2'
CRS-2673: Attempting to stop 'ora.evmd' on 'rac2'
CRS-2673: Attempting to stop 'ora.asm' on 'rac2'
CRS-2673: Attempting to stop 'ora.mdnsd' on 'rac2'
CRS-2673: Attempting to stop 'ora.gpnpd' on 'rac2'
CRS-2677: Stop of 'ora.drivers.acfs' on 'rac2' succeeded
CRS-2677: Stop of 'ora.ctssd' on 'rac2' succeeded
CRS-2677: Stop of 'ora.evmd' on 'rac2' succeeded
CRS-2677: Stop of 'ora.mdnsd' on 'rac2' succeeded
CRS-2677: Stop of 'ora.gpnpd' on 'rac2' succeeded
CRS-2677: Stop of 'ora.asm' on 'rac2' succeeded
CRS-2673: Attempting to stop 'ora.cluster_interconnect.haip' on 'rac2'
CRS-2677: Stop of 'ora.cluster_interconnect.haip' on 'rac2' succeeded
CRS-2673: Attempting to stop 'ora.cssd' on 'rac2'
CRS-2677: Stop of 'ora.cssd' on 'rac2' succeeded
CRS-2673: Attempting to stop 'ora.gipcd' on 'rac2'
CRS-2677: Stop of 'ora.gipcd' on 'rac2' succeeded
CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'rac2' has completed
CRS-4133: Oracle High Availability Services has been stopped.
[root@rac2 ~]# /u01/app/12.1.0.2/grid/bin/crsctl start crs
CRS-4123: Oracle High Availability Services has been started.
[root@rac2 ~]# ps -ef |grep pmon
grid 18563 1 0 03:02 ? 00:00:00 asm_pmon_+ASM2
oracle 21650 1 0 03:03 ? 00:00:00 ora_pmon_orcl2
root 22085 18525 0 03:03 pts/1 00:00:00 grep pmon
[root@rac2 ~]# su - oracle
[oracle@rac2 ~]$ srvctl status database -d orcl
Instance orcl1 is running on node rac1
Instance orcl2 is not running on node rac2
[oracle@rac2 ~]$ srvctl status database -d orcl
Instance orcl1 is running on node rac1
Instance orcl2 is running on node rac2
[oracle@rac2 ~]$
-------------------------------------------------------------------------------
ref:
https://bbs.archlinux.org/viewtopic.php?id=28942
http://www.ewan.cc/?q=node/108
http://unix.stackexchange.com/questions/12452/how-to-check-lvm-mirroring-under-linux
關於ASM Disk Header 可以參考這篇文章
http://jaychu649.blogspot.tw/2017/06/asm-disk-header.html+
沒有留言:
張貼留言