Showing posts with label RAC. Show all posts
Showing posts with label RAC. Show all posts

Monday, February 5, 2024

How to check all node are serving in RAC - 4 node cluster

 

Query to check , all node are serving connection in RAC


Query-1

select inst_id,count(*)
from
   gv$session
where
   username is not null
group by
   inst_id;


Query-2


select
   inst_id,
   count(*)
from
   gv$session
where
   status='ACTIVE' and username is not null
group by
   inst_id;





Thursday, December 10, 2020

Cluster status check command

 

12.1.0.2 Grid status check command

=====================================



[root@node1 ~]# crsctl check cluster -all

**************************************************************

node1:

CRS-4537: Cluster Ready Services is online

CRS-4529: Cluster Synchronization Services is online

CRS-4533: Event Manager is online

**************************************************************

node2:

CRS-4537: Cluster Ready Services is online

CRS-4529: Cluster Synchronization Services is online

CRS-4533: Event Manager is online

**************************************************************

[root@node1 ~]#

[root@node1 ~]#

[root@node1 ~]# date

Thu Dec 10 04:10:04 IST 2020


[root@node1 ~]#



[root@node1 ~]# crsctl stat resource -t

--------------------------------------------------------------------------------

Name           Target  State        Server                   State details

--------------------------------------------------------------------------------

Local Resources

--------------------------------------------------------------------------------

ora.DATA1.dg

               ONLINE  ONLINE       node1                    STABLE

               ONLINE  ONLINE       node2                    STABLE

ora.LISTENER.lsnr

               ONLINE  ONLINE       node1                    STABLE

               ONLINE  ONLINE       node2                    STABLE

ora.asm

               ONLINE  ONLINE       node1                    Started,STABLE

               ONLINE  ONLINE       node2                    Started,STABLE

ora.net1.network

               ONLINE  ONLINE       node1                    STABLE

               ONLINE  ONLINE       node2                    STABLE

ora.ons

               ONLINE  ONLINE       node1                    STABLE

               ONLINE  ONLINE       node2                    STABLE

--------------------------------------------------------------------------------

Cluster Resources

--------------------------------------------------------------------------------

ora.LISTENER_SCAN1.lsnr

      1        ONLINE  ONLINE       node2                    STABLE

ora.LISTENER_SCAN2.lsnr

      1        ONLINE  ONLINE       node1                    STABLE

ora.LISTENER_SCAN3.lsnr

      1        ONLINE  ONLINE       node1                    STABLE

ora.MGMTLSNR

      1        ONLINE  ONLINE       node1                    169.254.18.158 10.0.

                                                             0.1,STABLE

ora.cvu

      1        ONLINE  ONLINE       node1                    STABLE

ora.mgmtdb

      1        ONLINE  ONLINE       node1                    Open,STABLE

ora.node1.vip

      1        ONLINE  ONLINE       node1                    STABLE

ora.node2.vip

      1        ONLINE  ONLINE       node2                    STABLE

ora.oc4j

      1        ONLINE  ONLINE       node1                    STABLE

ora.scan1.vip

      1        ONLINE  ONLINE       node2                    STABLE

ora.scan2.vip

      1        ONLINE  ONLINE       node1                    STABLE

ora.scan3.vip

      1        ONLINE  ONLINE       node1                    STABLE

--------------------------------------------------------------------------------

[root@node1 ~]#




[root@node1 ~]# crsctl status resource ora.LISTENER_SCAN2.lsnr -v

NAME=ora.LISTENER_SCAN2.lsnr

TYPE=ora.scan_listener.type

LAST_SERVER=node1

STATE=ONLINE on node1

TARGET=ONLINE

CARDINALITY_ID=1

RESTART_COUNT=0

FAILURE_COUNT=0

FAILURE_HISTORY=

ID=ora.LISTENER_SCAN2.lsnr 1 1

INCARNATION=1

LAST_RESTART=12/10/2020 02:17:20

LAST_STATE_CHANGE=12/10/2020 02:17:19

STATE_DETAILS=

INTERNAL_STATE=STABLE

INSTANCE_COUNT=1



Saturday, May 30, 2020

CRS-4700: The Cluster Time Synchronization Service is in Observer mode.



CRS-4700: The Cluster Time Synchronization Service is in Observer mode.


[oracle@racnode1 ~]$ crsctl check ctss

CRS-4700: The Cluster Time Synchronization Service is in Observer mode.
[oracle@racnode1 ~]$
[root@racnode1 ~]#
[root@racnode1 ~]#
[root@racnode1 ~]#
[root@racnode1 ~]# systemctl status chronyd
● chronyd.service - NTP client/server
   Loaded: loaded (/usr/lib/systemd/system/chronyd.service; disabled; vendor preset: enabled)
   Active: inactive (dead)
     Docs: man:chronyd(8)
           man:chrony.conf(5)
[root@racnode1 ~]#
[root@racnode1 ~]# systemctl stop chronyd
[root@racnode1 ~]#
[root@racnode1 ~]#
[root@racnode1 ~]# systemctl disable chronyd
[root@racnode1 ~]#
[root@racnode1 ~]#
[root@racnode1 ~]# systemctl status chronyd
● chronyd.service - NTP client/server
   Loaded: loaded (/usr/lib/systemd/system/chronyd.service; disabled; vendor preset: enabled)
   Active: inactive (dead)
     Docs: man:chronyd(8)
           man:chrony.conf(5)
[root@racnode1 ~]#
[root@racnode1 ~]#
[root@racnode1 ~]# ls -lrt /etc/chrony.conf
-rw-r--r--. 1 root root 1092 Jan 29  2018 /etc/chrony.conf
[root@racnode1 ~]#
[root@racnode1 ~]#
[root@racnode1 ~]# mv /etc/chrony.conf  /etc/chrony.conf_old
[root@racnode1 ~]#
[root@racnode1 ~]# ls -lrt /etc/chrony.conf
ls: cannot access /etc/chrony.conf: No such file or directory
[root@racnode1 ~]#
[root@racnode1 ~]#
[root@racnode1 ~]# ls -lrt /etc/chrony.conf*
-rw-r--r--. 1 root root 1092 Jan 29  2018 /etc/chrony.conf_old
[root@racnode1 ~]#
[root@racnode1 ~]#

[root@racnode1 ~]# cd /u01/app/12.2.0.1/grid/bin/

[root@racnode1 bin]#
[root@racnode1 bin]# ./crsctl stop cluster -all
CRS-2673: Attempting to stop 'ora.crsd' on 'racnode1'
CRS-2790: Starting shutdown of Cluster Ready Services-managed resources on server 'racnode1'
CRS-2673: Attempting to stop 'ora.guggu.db' on 'racnode1'
CRS-2673: Attempting to stop 'ora.qosmserver' on 'racnode1'
CRS-2673: Attempting to stop 'ora.chad' on 'racnode1'
CRS-2677: Stop of 'ora.chad' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.mgmtdb' on 'racnode1'
CRS-2677: Stop of 'ora.guggu.db' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.LISTENER.lsnr' on 'racnode1'
CRS-2673: Attempting to stop 'ora.LISTENER_SCAN1.lsnr' on 'racnode1'
CRS-2673: Attempting to stop 'ora.cvu' on 'racnode1'
CRS-2673: Attempting to stop 'ora.racnode1.vip' on 'racnode1'
CRS-2677: Stop of 'ora.LISTENER_SCAN1.lsnr' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.scan1.vip' on 'racnode1'
CRS-2677: Stop of 'ora.LISTENER.lsnr' on 'racnode1' succeeded
CRS-2677: Stop of 'ora.mgmtdb' on 'racnode1' succeeded
CRS-2677: Stop of 'ora.racnode1.vip' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.MGMTLSNR' on 'racnode1'
CRS-2673: Attempting to stop 'ora.DATA.dg' on 'racnode1'
CRS-2677: Stop of 'ora.DATA.dg' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.asm' on 'racnode1'
CRS-2677: Stop of 'ora.asm' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.ASMNET1LSNR_ASM.lsnr' on 'racnode1'
CRS-2677: Stop of 'ora.cvu' on 'racnode1' succeeded
CRS-2677: Stop of 'ora.scan1.vip' on 'racnode1' succeeded
CRS-2677: Stop of 'ora.qosmserver' on 'racnode1' succeeded
CRS-2677: Stop of 'ora.MGMTLSNR' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.racnode2.vip' on 'racnode1'
CRS-2677: Stop of 'ora.ASMNET1LSNR_ASM.lsnr' on 'racnode1' succeeded
CRS-2677: Stop of 'ora.racnode2.vip' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.ons' on 'racnode1'
CRS-2677: Stop of 'ora.ons' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.net1.network' on 'racnode1'
CRS-2677: Stop of 'ora.net1.network' on 'racnode1' succeeded
CRS-2792: Shutdown of Cluster Ready Services-managed resources on 'racnode1' has completed
CRS-2677: Stop of 'ora.crsd' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.ctssd' on 'racnode1'
CRS-2673: Attempting to stop 'ora.evmd' on 'racnode1'
CRS-2673: Attempting to stop 'ora.storage' on 'racnode1'
CRS-2677: Stop of 'ora.storage' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.asm' on 'racnode1'
CRS-2677: Stop of 'ora.ctssd' on 'racnode1' succeeded
CRS-2677: Stop of 'ora.evmd' on 'racnode1' succeeded
CRS-2677: Stop of 'ora.asm' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.cluster_interconnect.haip' on 'racnode1'
CRS-2677: Stop of 'ora.cluster_interconnect.haip' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.cssd' on 'racnode1'
CRS-2677: Stop of 'ora.cssd' on 'racnode1' succeeded
[root@racnode1 bin]#
[root@racnode1 bin]#
[root@racnode1 bin]#



[root@racnode1 bin]#

[root@racnode1 bin]# ./crsctl start cluster -all
CRS-2672: Attempting to start 'ora.cssdmonitor' on 'racnode1'
CRS-2672: Attempting to start 'ora.evmd' on 'racnode1'
CRS-2676: Start of 'ora.cssdmonitor' on 'racnode1' succeeded
CRS-2672: Attempting to start 'ora.cssd' on 'racnode1'
CRS-2672: Attempting to start 'ora.diskmon' on 'racnode1'
CRS-2676: Start of 'ora.diskmon' on 'racnode1' succeeded
CRS-2676: Start of 'ora.evmd' on 'racnode1' succeeded
CRS-2676: Start of 'ora.cssd' on 'racnode1' succeeded
CRS-2672: Attempting to start 'ora.ctssd' on 'racnode1'
CRS-2672: Attempting to start 'ora.cluster_interconnect.haip' on 'racnode1'
CRS-2676: Start of 'ora.ctssd' on 'racnode1' succeeded
CRS-2676: Start of 'ora.cluster_interconnect.haip' on 'racnode1' succeeded
CRS-2672: Attempting to start 'ora.asm' on 'racnode1'
CRS-2676: Start of 'ora.asm' on 'racnode1' succeeded
CRS-2672: Attempting to start 'ora.storage' on 'racnode1'
CRS-2676: Start of 'ora.storage' on 'racnode1' succeeded
CRS-2672: Attempting to start 'ora.crsd' on 'racnode1'
CRS-2676: Start of 'ora.crsd' on 'racnode1' succeeded
[root@racnode1 bin]#
[root@racnode1 bin]#
[root@racnode1 bin]#


[root@racnode1 bin]# ./crsctl check ctss

CRS-4701: The Cluster Time Synchronization Service is in Active mode.
CRS-4702: Offset (in msec): 0
[root@racnode1 bin]#

[oracle@racnode1 diag]$ cluvfy comp clocksync -verbose


Verifying Clock Synchronization ...

  Node Name                             Status
  ------------------------------------  ------------------------
  racnode1                              passed

  Node Name                             State

  ------------------------------------  ------------------------
  racnode1                              Active

  Node Name     Time Offset               Status

  ------------  ------------------------  ------------------------
  racnode1      0.0                       passed
Verifying Clock Synchronization ...PASSED

Verification of Clock Synchronization across the cluster nodes was successful.


CVU operation performed:      Clock Synchronization across the cluster nodes

Date:                         May 30, 2020 9:51:15 PM
CVU home:                     /u01/app/12.2.0.1/grid/
User:                         oracle
[oracle@racnode1 diag]$


[oracle@racnode2 ~]$ cluvfy comp clocksync -verbose -n racnode1,racnode2

Verifying Clock Synchronization ...
  Node Name                             Status
  ------------------------------------  ------------------------
  racnode2                              passed
  racnode1                              passed

  Node Name                             State
  ------------------------------------  ------------------------
  racnode2                              Active
  racnode1                              Active

  Node Name     Time Offset               Status
  ------------  ------------------------  ------------------------
  racnode2      0.0                       passed
  racnode1      0.0                       passed
Verifying Clock Synchronization ...PASSED

Verification of Clock Synchronization across the cluster nodes was successful.

CVU operation performed:      Clock Synchronization across the cluster nodes
Date:                         May 31, 2020 12:24:56 PM
CVU home:                     /u01/app/12.2.0.1/grid/
User:                         oracle
[oracle@racnode2 ~]$




#./crsctl stop cluster –all
# systemctl stop ntpd
#mv /etc/ntp.conf /etc/ntp.conf.original
#systemctl disable ntpd
#./crsctl start cluster –all






Saturday, February 2, 2019

How to identify the Master Node in RAC

How to identify the Master Node in RAC
============================================

In RAC only Masternode is responsible for take backup of OCR.

[grid@rac1 bin]$ ./oclumon manage -get MASTER

Master = rac1
[grid@rac1 bin]$


[root@rac1 bin]# ./ocrconfig -manualbackup

rac2     2019/02/02 22:49:23     +DATA:/rac/OCRBACKUP/backup_20190202_224923.ocr.327.999211765     70732493
[root@rac1 bin]#

[root@rac1 bin]# ./ocrconfig -showbackup

rac2     2018/12/12 20:09:04     +DATA:/rac/OCRBACKUP/backup00.ocr.283.994709331     70732493

rac2     2018/12/12 20:09:04     +DATA:/rac/OCRBACKUP/day.ocr.284.994709345     70732493

rac2     2018/12/12 20:09:04     +DATA:/rac/OCRBACKUP/week.ocr.285.994709347     70732493

rac2     2019/02/02 22:49:23     +DATA:/rac/OCRBACKUP/backup_20190202_224923.ocr.327.999211765     70732493
[root@rac1 bin]#
[root@rac1 bin]# ./ocrconfig -showbackup auto

rac2     2018/12/12 20:09:04     +DATA:/rac/OCRBACKUP/backup00.ocr.283.994709331     70732493

rac2     2018/12/12 20:09:04     +DATA:/rac/OCRBACKUP/day.ocr.284.994709345     70732493

rac2     2018/12/12 20:09:04     +DATA:/rac/OCRBACKUP/week.ocr.285.994709347     70732493
[root@rac1 bin]#
[root@rac1 bin]# ./ocrconfig -showbackup manual

rac2     2019/02/02 22:49:23     +DATA:/rac/OCRBACKUP/backup_20190202_224923.ocr.327.999211765     70732493
[root@rac1 bin]#



[grid@rac1 trace]$ cat ocssd.trc |grep 'master node'
2019-01-21 12:39:42.772 :    CSSD:2787034880: clssgmCMReconfig: GM master node for incarnation 443540379 is node rac1, number 1, with birth incarnation 443540379, the old master is 65535 and new master is 1
2019-01-21 12:39:42.976 :    CSSD:2787034880: clssgmCMReconfig: reconfiguration successful, incarnation 443540379 with 2 nodes, local node number 1, master node rac1, number 1
2019-01-21 12:39:49.250 :    CSSD:2788611840: clssgmCMReconfig: GM master node for incarnation 443540380 is node rac1, number 1, with birth incarnation 443540379, the old master is 1 and new master is 1
2019-01-21 12:39:49.255 :    CSSD:2788611840: clssgmCMReconfig: reconfiguration successful, incarnation 443540380 with 1 nodes, local node number 1, master node rac1, number 1
2019-01-21 12:48:05.570 :    CSSD:832739072: clssgmCMReconfig: GM master node for incarnation 443540883 is node rac1, number 1, with birth incarnation 443540883, the old master is 65535 and new master is 1
2019-01-21 12:48:05.571 :    CSSD:832739072: clssgmCMReconfig: reconfiguration successful, incarnation 443540883 with 1 nodes, local node number 1, master node rac1, number 1
2019-01-21 13:13:57.075 :    CSSD:834316032: clssgmCMReconfig: GM master node for incarnation 443540884 is node rac1, number 1, with birth incarnation 443540883, the old master is 1 and new master is 1
2019-01-21 13:13:59.038 :    CSSD:834316032: clssgmCMReconfig: reconfiguration successful, incarnation 443540884 with 2 nodes, local node number 1, master node rac1, number 1
2019-01-22 23:46:52.347 :    CSSD:2292303616: clssgmCMReconfig: GM master node for incarnation 443666809 is node rac1, number 1, with birth incarnation 443666809, the old master is 65535 and new master is 1
2019-01-22 23:46:52.350 :    CSSD:2292303616: clssgmCMReconfig: reconfiguration successful, incarnation 443666809 with 1 nodes, local node number 1, master node rac1, number 1
2019-02-02 21:20:12.712 :    CSSD:4233729792: clssgmCMReconfig: GM master node for incarnation 444608391 is node <null>, number 2, with birth incarnation 444608390, the old master is 65535 and new master is 2
2019-02-02 21:20:13.044 :    CSSD:4233729792: clssgmCMReconfig: reconfiguration successful, incarnation 444608391 with 2 nodes, local node number 1, master node rac2, number 2
[grid@rac1 trace]$



[grid@rac1 trace]$ cat crsd.trc |grep 'master'
2019-02-02 21:21:34.370 :  OCRMAS:1811937024: th_master_check_hashids_helper: Comparing device hash IDs between local and master.
2019-02-02 21:21:34.370 :  OCRMAS:1811937024: th_master_check_hashids_helper: Local dev (987031272, 1028247821, 0, 0, 0)
2019-02-02 21:21:34.370 :  OCRMAS:1811937024: th_master_check_hashids_helper: Master dev (987031272, 1028247821, 0, 0, 0)
2019-02-02 21:21:34.370 :  OCRMAS:1811937024: th_connect_master: Using GIPC type to connect
2019-02-02 21:21:34.370 :  OCRMAS:1811937024: th_connect_master:10: Master host name [rac2]
2019-02-02 21:21:34.370 :  OCRMAS:1811937024: proath_connect_master: Attempting to connect to master at address [rac2:1494-d8b5-fb40-8a6a]
2019-02-02 21:21:34.826 :  OCRMAS:1811937024: proath_master: SUCCESSFULLY CONNECTED TO THE MASTER
2019-02-02 21:21:34.826 :  OCRMAS:1811937024: th_master: NEW OCR MASTER IS 2
2019-02-02 21:21:34.829 :  OCRSRV:2758700032: th_reg_master_change: Master change callback registered. Client:[1]
2019-02-02 21:21:34.829 :  OCRSRV:2758700032: th_reg_master_change: Notified master change
2019-02-02 21:21:34.829 :  OCRAPI:2758700032: a_reg_master_change: Registered master change callback. flags:[4]
2019-02-02 21:21:34.842 : CRSMAIN:2758700032:  Registering for mastership change events...
2019-02-02 21:21:34.842 :  OCRSRV:2758700032: th_reg_master_change: Master change callback registered. Client:[0]
2019-02-02 21:21:34.842 :  OCRSRV:2758700032: th_reg_master_change: Notified master change
2019-02-02 21:21:34.843 :  OCRAPI:2758700032: a_reg_master_change: Registered master change callback. flags:[2]
2019-02-02 21:24:24.651 :UiServer:1595901696: {1:60436:2} Master change notification has received. New master: 2
[grid@rac1 trace]$
[grid@rac1 trace]$

[grid@rac1 trace]$ pwd
/u01/app/grid/product/18.0.0.0/grid_base/diag/crs/rac1/crs/trace
[grid@rac1 trace]$



Oracle Cluster Registry (OCR) and Oracle Local Registry (OLR) - OCRCHECK : Oracle Cluster Registry Check utility in RAC


Oracle Cluster Registry (OCR) and Oracle Local Registry (OLR)

ocrcheck  command in RAC
==============================



[root@rac1 bin]# ./ocrcheck -help
Name:
        ocrcheck - Displays health of Oracle Cluster/Local Registry.

Synopsis:
        ocrcheck [-config | -backupfile <backupfilename>] [-details] [-local]

  -config       Displays the configured locations of the Oracle Cluster Registry.
                This can be used with the -local option to display the configured
                location of the Oracle Local Registry
  -details      Displays detailed configuration information.
  -local        The operation will be performed on the Oracle Local Registry.
  -backupfile <backupfilename>  The operation will be performed on the backup file.

Notes:
        * This command for Oracle Cluster Registry is not supported from a Leaf node.

[root@rac1 bin]#

OCR (Oracle Cluster Registry) information
================================================

The Oracle Clusterware (Oracle Grid Infrastructure GI stack in 11gR2) uses OCR to manage resources and node membership information. 
It contains the following information shared across the nodes in the cluster

ASM diskgroups, volumes, filesystems, and instances
RAC databases and instances information
SCAN listeners and local listeners
SCAN VIPs and Local VIPs
Nodes and node applications

User defined resources


[root@rac1 bin]# ./ocrcheck
Status of Oracle Cluster Registry is as follows :
         Version                  :          4
         Total space (kbytes)     :     491684
         Used space (kbytes)      :      84796
         Available space (kbytes) :     406888
         ID                       : 1718087688
         Device/File Name         :      +DATA
                                    Device/File integrity check succeeded

                                    Device/File not configured

                                    Device/File not configured

                                    Device/File not configured

                                    Device/File not configured

         Cluster registry integrity check succeeded

         Logical corruption check succeeded

[root@rac1 bin]#




[root@rac1 bin]#
[root@rac1 bin]# ./ocrcheck -config
Oracle Cluster Registry configuration is :
         Device/File Name         :      +DATA
[root@rac1 bin]# ./ocrcheck -details
Status of Oracle Cluster Registry is as follows :
         Version                  :          4
         Total space (kbytes)     :     491684
         Used space (kbytes)      :      84796
         Available space (kbytes) :     406888
         ID                       : 1718087688
         Device/File Name         : +DATA/rac/OCRFILE/registry.255.993952649
                                    Device/File integrity check succeeded

                                    Device/File not configured

                                    Device/File not configured

                                    Device/File not configured

                                    Device/File not configured

         Cluster registry integrity check succeeded

         Logical corruption check succeeded

[root@rac1 bin]#


[root@rac1 bin]# cat /etc/oracle/ocr.loc
#Device/file +DATA getting replaced by device +DATA/rac/OCRFILE/registry.255.993952649
ocrconfig_loc=+DATA/rac/OCRFILE/registry.255.993952649
local_only=false[root@rac1 bin]#

[root@rac1 bin]#


OLR(Oracle Local Registry) Information 
=========================================
Its contains node-specific information required by OHASD . Every node has its own dedicated OLR file.(not shared between the nodes)


[root@rac1 bin]# ./ocrcheck -local
Status of Oracle Local Registry is as follows :
         Version                  :          4
         Total space (kbytes)     :     491684
         Used space (kbytes)      :      83164
         Available space (kbytes) :     408520
         ID                       :  161118435
         Device/File Name         : /u01/app/grid/product/18.0.0.0/grid/cdata/rac1.olr
                                    Device/File integrity check succeeded

         Local registry integrity check succeeded

         Logical corruption check succeeded

[root@rac1 bin]#


[root@rac1 bin]#
[root@rac1 bin]# ./ocrcheck  -local -details
Status of Oracle Local Registry is as follows :
         Version                  :          4
         Total space (kbytes)     :     491684
         Used space (kbytes)      :      83164
         Available space (kbytes) :     408520
         ID                       :  161118435
         Device/File Name         : /u01/app/grid/product/18.0.0.0/grid/cdata/rac1.olr
                                    Device/File integrity check succeeded

         Local registry integrity check succeeded

         Logical corruption check succeeded

[root@rac1 bin]#

[root@rac1 bin]# cat /etc/oracle/olr.loc
olrconfig_loc=/u01/app/grid/product/18.0.0.0/grid/cdata/rac1.olr
crs_home=/u01/app/grid/product/18.0.0.0/grid
orplus_config=FALSE

[root@rac1 bin]#

Thursday, January 17, 2019

UDEV SCSI Rules Configuration In Oracle Linux 6


UDEV SCSI Rules Configuration In Oracle Linux 6 




[root@rac1 ~]# ls /dev/sd*
/dev/sda  /dev/sda1  /dev/sda2  /dev/sdb  /dev/sdc  /dev/sdd  /dev/sdd1
[root@rac1 ~]#



[root@rac1 dev]# /sbin/scsi_id -g -u -d /dev/sdb
1ATA_VBOX_HARDDISK_VB6673148e-70a3b8f4
[root@rac1 dev]# /sbin/scsi_id -g -u -d /dev/sdc
1ATA_VBOX_HARDDISK_VBc778fc49-01bbefbb
[root@rac1 dev]#
[root@rac1 ~]# /sbin/scsi_id -g -u -d /dev/sdd
1ATA_VBOX_HARDDISK_VBf8c61d16-a391798e
[root@rac1 ~]#


/sbin/scsi_id -g -u -d /dev/sdd

[root@rac2 dev]#
[root@rac2 dev]# /sbin/scsi_id -g -u -d /dev/sdb
1ATA_VBOX_HARDDISK_VB6673148e-70a3b8f4
[root@rac2 dev]# /sbin/scsi_id -g -u -d /dev/sdc
1ATA_VBOX_HARDDISK_VBc778fc49-01bbefbb
[root@rac2 dev]#
[root@rac2 ~]# /sbin/scsi_id -g -u -d /dev/sdd
1ATA_VBOX_HARDDISK_VBf8c61d16-a391798e
[root@rac2 ~]#




/etc/udev/rules.d/99-oracle-asmdevices.rules

For Oracle Linux 6


KERNEL=="sd?1", BUS=="scsi", PROGRAM=="/sbin/scsi_id -g -u -d /dev/$parent", RESULT=="1ATA_VBOX_HARDDISK_VB6673148e-70a3b8f4", NAME="DISK1", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd?1", BUS=="scsi", PROGRAM=="/sbin/scsi_id -g -u -d /dev/$parent", RESULT=="1ATA_VBOX_HARDDISK_VBc778fc49-01bbefbb", NAME="DISK2", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd?1", BUS=="scsi", PROGRAM=="/sbin/scsi_id -g -u -d /dev/$parent", RESULT=="1ATA_VBOX_HARDDISK_VBf8c61d16-a391798e", NAME="DISK3", OWNER="grid", GROUP="asmadmin", MODE="0660"


/sbin/udevadm test /block/sdb/sdb1

/sbin/udevadm test /block/sdc/sdc1

/sbin/udevadm test /block/sdd/sdd1





https://oracle-base.com/articles/linux/udev-scsi-rules-configuration-in-oracle-linux

CRS-4404: The following nodes did not reply within the allotted time


CRS-4404: The following nodes did not reply within the allotted time



[grid@rac1 bin]$ ./crsctl check cluster -all
**************************************************************
rac1:
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
CRS-4404: The following nodes did not reply within the allotted time:
rac2


After reboot second node ,  again ran same command its working fine .


[grid@rac1 bin]$ ./crsctl check cluster -all
**************************************************************
rac1:
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
rac2:
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
[grid@rac1 bin]$

Sunday, January 6, 2019

Oracle 18c RAC Database Installation on Oracle Linux


Oracle 18c RAC Database Installation on Oracle Linux




SQL> SELECT DISTINCT
i.instance_name asm_instance_name,
i.host_name asm_host_name,
c.instance_name client_instance_name,
c.status
FROM gv$instance i, gv$asm_client c
WHERE i.inst_id = c.inst_id;  2    3    4    5    6    7

ASM_INSTANCE_NAM ASM_HOST_NAME                                                    CLIENT_INSTANCE_NAME                                             STATUS
---------------- ---------------------------------------------------------------- ---------------------------------------------------------------- ------------
+ASM1            rac1.localdomain                                                 +APX1                                                            CONNECTED
+ASM1            rac1.localdomain                                                 +ASM1                                                            CONNECTED
+ASM1            rac1.localdomain                                                 rac1.localdomain                                                 CONNECTED
+ASM2            rac2.localdomain                                                 +APX2                                                            CONNECTED
+ASM2            rac2.localdomain                                                 +ASM2                                                            CONNECTED
+ASM2            rac2.localdomain                                                 -MGMTDB                                                          CONNECTED
+ASM2            rac2.localdomain                                                 rac2.localdomain                                                 CONNECTED


[grid@rac1 bin]$ ./crsctl check cluster -all
**************************************************************
rac1:
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
rac2:
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
[grid@rac1 bin]$

[grid@rac1 bin]$ ./crsctl stat res -t

--------------------------------------------------------------------------------
Name           Target  State        Server                   State details
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.ASMNET1LSNR_ASM.lsnr
               ONLINE  ONLINE       rac1                     STABLE
               ONLINE  ONLINE       rac2                     STABLE
ora.DATA.GHCHKPT.advm
               OFFLINE OFFLINE      rac1                     STABLE
               OFFLINE OFFLINE      rac2                     STABLE
ora.DATA.dg
               ONLINE  ONLINE       rac1                     STABLE
               ONLINE  ONLINE       rac2                     STABLE
ora.LISTENER.lsnr
               ONLINE  ONLINE       rac1                     STABLE
               ONLINE  ONLINE       rac2                     STABLE
ora.chad
               ONLINE  ONLINE       rac1                     STABLE
               ONLINE  ONLINE       rac2                     STABLE
ora.data.ghchkpt.acfs
               OFFLINE OFFLINE      rac1                     STABLE
               OFFLINE OFFLINE      rac2                     STABLE
ora.helper
               OFFLINE OFFLINE      rac1                     IDLE,STABLE
               OFFLINE OFFLINE      rac2                     IDLE,STABLE
ora.net1.network
               ONLINE  ONLINE       rac1                     STABLE
               ONLINE  ONLINE       rac2                     STABLE
ora.ons
               ONLINE  ONLINE       rac1                     STABLE
               ONLINE  INTERMEDIATE rac2                     STABLE
ora.proxy_advm
               ONLINE  ONLINE       rac1                     STABLE
               ONLINE  ONLINE       rac2                     STABLE
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.LISTENER_SCAN1.lsnr
      1        ONLINE  ONLINE       rac1                     STABLE
ora.LISTENER_SCAN2.lsnr
      1        ONLINE  ONLINE       rac2                     STABLE
ora.LISTENER_SCAN3.lsnr
      1        ONLINE  ONLINE       rac2                     STABLE
ora.MGMTLSNR
      1        ONLINE  ONLINE       rac2                     169.254.30.20 192.16
                                                             8.10.2,STABLE
ora.asm
      1        ONLINE  ONLINE       rac1                     Started,STABLE
      2        ONLINE  ONLINE       rac2                     Started,STABLE
      3        OFFLINE OFFLINE                               STABLE
ora.cvu
      1        ONLINE  ONLINE       rac2                     STABLE
ora.guggu.db
      1        OFFLINE OFFLINE                               Instance Shutdown,ST
                                                             ABLE
      2        OFFLINE OFFLINE                               Instance Shutdown,ST
                                                             ABLE
ora.india.db
      1        ONLINE  ONLINE       rac1                     Open,HOME=/u01/app/o
                                                             racle/product/18.0.0
                                                             .0/db,STABLE
      2        ONLINE  ONLINE       rac2                     Open,HOME=/u01/app/o
                                                             racle/product/18.0.0
                                                             .0/db,STABLE
ora.mgmtdb
      1        ONLINE  ONLINE       rac2                     Open,STABLE
ora.qosmserver
      1        ONLINE  INTERMEDIATE rac2                     STABLE
ora.rac1.vip
      1        ONLINE  ONLINE       rac1                     STABLE
ora.rac2.vip
      1        ONLINE  ONLINE       rac2                     STABLE
ora.rhpserver
      1        OFFLINE OFFLINE                               STABLE
ora.scan1.vip
      1        ONLINE  ONLINE       rac1                     STABLE
ora.scan2.vip
      1        ONLINE  ONLINE       rac2                     STABLE
ora.scan3.vip
      1        ONLINE  ONLINE       rac2                     STABLE
--------------------------------------------------------------------------------
[grid@rac1 bin]$