Saturday, May 30, 2020

CRS-4700: The Cluster Time Synchronization Service is in Observer mode.



CRS-4700: The Cluster Time Synchronization Service is in Observer mode.


[oracle@racnode1 ~]$ crsctl check ctss

CRS-4700: The Cluster Time Synchronization Service is in Observer mode.
[oracle@racnode1 ~]$
[root@racnode1 ~]#
[root@racnode1 ~]#
[root@racnode1 ~]#
[root@racnode1 ~]# systemctl status chronyd
● chronyd.service - NTP client/server
   Loaded: loaded (/usr/lib/systemd/system/chronyd.service; disabled; vendor preset: enabled)
   Active: inactive (dead)
     Docs: man:chronyd(8)
           man:chrony.conf(5)
[root@racnode1 ~]#
[root@racnode1 ~]# systemctl stop chronyd
[root@racnode1 ~]#
[root@racnode1 ~]#
[root@racnode1 ~]# systemctl disable chronyd
[root@racnode1 ~]#
[root@racnode1 ~]#
[root@racnode1 ~]# systemctl status chronyd
● chronyd.service - NTP client/server
   Loaded: loaded (/usr/lib/systemd/system/chronyd.service; disabled; vendor preset: enabled)
   Active: inactive (dead)
     Docs: man:chronyd(8)
           man:chrony.conf(5)
[root@racnode1 ~]#
[root@racnode1 ~]#
[root@racnode1 ~]# ls -lrt /etc/chrony.conf
-rw-r--r--. 1 root root 1092 Jan 29  2018 /etc/chrony.conf
[root@racnode1 ~]#
[root@racnode1 ~]#
[root@racnode1 ~]# mv /etc/chrony.conf  /etc/chrony.conf_old
[root@racnode1 ~]#
[root@racnode1 ~]# ls -lrt /etc/chrony.conf
ls: cannot access /etc/chrony.conf: No such file or directory
[root@racnode1 ~]#
[root@racnode1 ~]#
[root@racnode1 ~]# ls -lrt /etc/chrony.conf*
-rw-r--r--. 1 root root 1092 Jan 29  2018 /etc/chrony.conf_old
[root@racnode1 ~]#
[root@racnode1 ~]#

[root@racnode1 ~]# cd /u01/app/12.2.0.1/grid/bin/

[root@racnode1 bin]#
[root@racnode1 bin]# ./crsctl stop cluster -all
CRS-2673: Attempting to stop 'ora.crsd' on 'racnode1'
CRS-2790: Starting shutdown of Cluster Ready Services-managed resources on server 'racnode1'
CRS-2673: Attempting to stop 'ora.guggu.db' on 'racnode1'
CRS-2673: Attempting to stop 'ora.qosmserver' on 'racnode1'
CRS-2673: Attempting to stop 'ora.chad' on 'racnode1'
CRS-2677: Stop of 'ora.chad' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.mgmtdb' on 'racnode1'
CRS-2677: Stop of 'ora.guggu.db' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.LISTENER.lsnr' on 'racnode1'
CRS-2673: Attempting to stop 'ora.LISTENER_SCAN1.lsnr' on 'racnode1'
CRS-2673: Attempting to stop 'ora.cvu' on 'racnode1'
CRS-2673: Attempting to stop 'ora.racnode1.vip' on 'racnode1'
CRS-2677: Stop of 'ora.LISTENER_SCAN1.lsnr' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.scan1.vip' on 'racnode1'
CRS-2677: Stop of 'ora.LISTENER.lsnr' on 'racnode1' succeeded
CRS-2677: Stop of 'ora.mgmtdb' on 'racnode1' succeeded
CRS-2677: Stop of 'ora.racnode1.vip' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.MGMTLSNR' on 'racnode1'
CRS-2673: Attempting to stop 'ora.DATA.dg' on 'racnode1'
CRS-2677: Stop of 'ora.DATA.dg' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.asm' on 'racnode1'
CRS-2677: Stop of 'ora.asm' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.ASMNET1LSNR_ASM.lsnr' on 'racnode1'
CRS-2677: Stop of 'ora.cvu' on 'racnode1' succeeded
CRS-2677: Stop of 'ora.scan1.vip' on 'racnode1' succeeded
CRS-2677: Stop of 'ora.qosmserver' on 'racnode1' succeeded
CRS-2677: Stop of 'ora.MGMTLSNR' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.racnode2.vip' on 'racnode1'
CRS-2677: Stop of 'ora.ASMNET1LSNR_ASM.lsnr' on 'racnode1' succeeded
CRS-2677: Stop of 'ora.racnode2.vip' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.ons' on 'racnode1'
CRS-2677: Stop of 'ora.ons' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.net1.network' on 'racnode1'
CRS-2677: Stop of 'ora.net1.network' on 'racnode1' succeeded
CRS-2792: Shutdown of Cluster Ready Services-managed resources on 'racnode1' has completed
CRS-2677: Stop of 'ora.crsd' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.ctssd' on 'racnode1'
CRS-2673: Attempting to stop 'ora.evmd' on 'racnode1'
CRS-2673: Attempting to stop 'ora.storage' on 'racnode1'
CRS-2677: Stop of 'ora.storage' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.asm' on 'racnode1'
CRS-2677: Stop of 'ora.ctssd' on 'racnode1' succeeded
CRS-2677: Stop of 'ora.evmd' on 'racnode1' succeeded
CRS-2677: Stop of 'ora.asm' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.cluster_interconnect.haip' on 'racnode1'
CRS-2677: Stop of 'ora.cluster_interconnect.haip' on 'racnode1' succeeded
CRS-2673: Attempting to stop 'ora.cssd' on 'racnode1'
CRS-2677: Stop of 'ora.cssd' on 'racnode1' succeeded
[root@racnode1 bin]#
[root@racnode1 bin]#
[root@racnode1 bin]#



[root@racnode1 bin]#

[root@racnode1 bin]# ./crsctl start cluster -all
CRS-2672: Attempting to start 'ora.cssdmonitor' on 'racnode1'
CRS-2672: Attempting to start 'ora.evmd' on 'racnode1'
CRS-2676: Start of 'ora.cssdmonitor' on 'racnode1' succeeded
CRS-2672: Attempting to start 'ora.cssd' on 'racnode1'
CRS-2672: Attempting to start 'ora.diskmon' on 'racnode1'
CRS-2676: Start of 'ora.diskmon' on 'racnode1' succeeded
CRS-2676: Start of 'ora.evmd' on 'racnode1' succeeded
CRS-2676: Start of 'ora.cssd' on 'racnode1' succeeded
CRS-2672: Attempting to start 'ora.ctssd' on 'racnode1'
CRS-2672: Attempting to start 'ora.cluster_interconnect.haip' on 'racnode1'
CRS-2676: Start of 'ora.ctssd' on 'racnode1' succeeded
CRS-2676: Start of 'ora.cluster_interconnect.haip' on 'racnode1' succeeded
CRS-2672: Attempting to start 'ora.asm' on 'racnode1'
CRS-2676: Start of 'ora.asm' on 'racnode1' succeeded
CRS-2672: Attempting to start 'ora.storage' on 'racnode1'
CRS-2676: Start of 'ora.storage' on 'racnode1' succeeded
CRS-2672: Attempting to start 'ora.crsd' on 'racnode1'
CRS-2676: Start of 'ora.crsd' on 'racnode1' succeeded
[root@racnode1 bin]#
[root@racnode1 bin]#
[root@racnode1 bin]#


[root@racnode1 bin]# ./crsctl check ctss

CRS-4701: The Cluster Time Synchronization Service is in Active mode.
CRS-4702: Offset (in msec): 0
[root@racnode1 bin]#

[oracle@racnode1 diag]$ cluvfy comp clocksync -verbose


Verifying Clock Synchronization ...

  Node Name                             Status
  ------------------------------------  ------------------------
  racnode1                              passed

  Node Name                             State

  ------------------------------------  ------------------------
  racnode1                              Active

  Node Name     Time Offset               Status

  ------------  ------------------------  ------------------------
  racnode1      0.0                       passed
Verifying Clock Synchronization ...PASSED

Verification of Clock Synchronization across the cluster nodes was successful.


CVU operation performed:      Clock Synchronization across the cluster nodes

Date:                         May 30, 2020 9:51:15 PM
CVU home:                     /u01/app/12.2.0.1/grid/
User:                         oracle
[oracle@racnode1 diag]$


[oracle@racnode2 ~]$ cluvfy comp clocksync -verbose -n racnode1,racnode2

Verifying Clock Synchronization ...
  Node Name                             Status
  ------------------------------------  ------------------------
  racnode2                              passed
  racnode1                              passed

  Node Name                             State
  ------------------------------------  ------------------------
  racnode2                              Active
  racnode1                              Active

  Node Name     Time Offset               Status
  ------------  ------------------------  ------------------------
  racnode2      0.0                       passed
  racnode1      0.0                       passed
Verifying Clock Synchronization ...PASSED

Verification of Clock Synchronization across the cluster nodes was successful.

CVU operation performed:      Clock Synchronization across the cluster nodes
Date:                         May 31, 2020 12:24:56 PM
CVU home:                     /u01/app/12.2.0.1/grid/
User:                         oracle
[oracle@racnode2 ~]$




#./crsctl stop cluster –all
# systemctl stop ntpd
#mv /etc/ntp.conf /etc/ntp.conf.original
#systemctl disable ntpd
#./crsctl start cluster –all






Friday, May 29, 2020

Importing Data into Oracle on Amazon RDS using Oracle Data Pump and a Database Link

Import schema in AWS Oracle RDS
==============================================


Environment Details:
-------------------------------

Schema Name  : TEST

Source (Normal)
-----------------

Database Name : india
Database Version : 19c
Platform : oracle linux


Target (RDS)
--------------

Database Name : orcl
Dabase version  : 19c
Platform : AWS Oracle RDS




Step 1 : perform schema export 

expdp '/ as sysdba' directory=DATA_PUMP_DIR dumpfile=test_schema.dmp LOGFILE=test_schema.log schemas=test


example 


[oracle@anurag ~]$ expdp  directory=DATA_PUMP_DIR dumpfile=test_schema.dmp LOGFILE=test_schema.log schemas=test

Export: Release 19.0.0.0.0 - Production on Thu May 28 17:21:14 2020
Version 19.3.0.0.0

Copyright (c) 1982, 2019, Oracle and/or its affiliates.  All rights reserved.

Username: / as sysdba
Password:

Connected to: Oracle Database 19c Enterprise Edition Release 19.0.0.0.0 - Production
Starting "SYS"."SYS_EXPORT_SCHEMA_01":  /******** AS SYSDBA directory=DATA_PUMP_DIR dumpfile=test_schema.dmp LOGFILE=test_schema.log schemas=test
Processing object type SCHEMA_EXPORT/TABLE/TABLE_DATA
Processing object type SCHEMA_EXPORT/TABLE/STATISTICS/TABLE_STATISTICS
Processing object type SCHEMA_EXPORT/STATISTICS/MARKER
Processing object type SCHEMA_EXPORT/USER
Processing object type SCHEMA_EXPORT/SYSTEM_GRANT
Processing object type SCHEMA_EXPORT/ROLE_GRANT
Processing object type SCHEMA_EXPORT/DEFAULT_ROLE
Processing object type SCHEMA_EXPORT/PRE_SCHEMA/PROCACT_SCHEMA
Processing object type SCHEMA_EXPORT/TABLE/TABLE
. . exported "TEST"."TEST"                               5.539 KB       2 rows
Master table "SYS"."SYS_EXPORT_SCHEMA_01" successfully loaded/unloaded
******************************************************************************
Dump file set for SYS.SYS_EXPORT_SCHEMA_01 is:
  /u01/oracle/admin/india/dpdump/test_schema.dmp
Job "SYS"."SYS_EXPORT_SCHEMA_01" successfully completed at Thu May 28 17:21:53 2020 elapsed 0 00:00:29

[oracle@anurag ~]$


we can perform export using dataump API as well 

login as sysdba


DECLARE
hdnl NUMBER;
BEGIN
hdnl := DBMS_DATAPUMP.OPEN( operation => 'EXPORT', job_mode => 'SCHEMA', job_name=>null);
DBMS_DATAPUMP.ADD_FILE(handle => hdnl, filename => 'sample.dmp', directory => 'DATA_PUMP_DIR', filetype => dbms_datapump.ku$_file_type_dump_file); 
DBMS_DATAPUMP.ADD_FILE( handle => hdnl, filename => 'exp.log', directory => 'DATA_PUMP_DIR', filetype => dbms_datapump.ku$_file_type_log_file); DBMS_DATAPUMP.METADATA_FILTER(hdnl,'SCHEMA_EXPR','IN (''TEST'')');
DBMS_DATAPUMP.START_JOB(hdnl);
END;
/


example :

SQL>
SQL> DECLARE
  2  hdnl NUMBER;
BEGIN
hdnl := DBMS_DATAPUMP.OPEN( operation => 'EXPORT', job_mode => 'SCHEMA', job_name=>null);
DBMS_DATAPUMP.ADD_FILE(handle => hdnl, filename => 'sample.dmp', directory => 'DATA_PUMP_DIR', filetype => dbms_datapump.ku$_file_type_dump_file);   3
  4    5    6  DBMS_DATAPUMP.ADD_FILE( handle => hdnl, filename => 'exp.log', directory => 'DATA_PUMP_DIR', filetype => dbms_datapump.ku$_file_type_log_file); DBMS_DATAPUMP.METADATA_FILTER(hdnl,'SCHEMA_EXPR','IN (''TEST'')');
DBMS_DATAPUMP.START_JOB(hdnl);
END;
/  7    8    9

PL/SQL procedure successfully completed.

SQL>
SQL>



Step 2: ADD RDS database tns entry in TNSNAMES.ora file also validate using tnsping 


ORCL =
  (DESCRIPTION =
    (ADDRESS = (PROTOCOL = TCP)(HOST = aws_rds_db_orcl_endpoint_name)(PORT = 1521))
    (CONNECT_DATA =
      (SERVER = DEDICATED)
      (SERVICE_NAME = orcl)
        (UR = A)
    )
  )
  


Step 3: Create public database link to using AWS rds database endpoint (create as per your requirement like public or private)


create public database link  to_rds19c  connect to "username" identified by "password"
USING '(DESCRIPTION = (ADDRESS = (PROTOCOL = TCP)(HOST = aws_rds_db_orcl_endpoint_name)(PORT = 1521)) (CONNECT_DATA = (SERVER = DEDICATED) (SERVICE_NAME = orcl)))';

SQL> create public database link  to_rds19c  connect to "username" identified by "password"
USING '(DESCRIPTION = (ADDRESS = (PROTOCOL = TCP)(HOST = aws_rds_db_orcl_endpoint_name)(PORT = 1521)) (CONNECT_DATA = (SERVER = DEDICATED) (SERVICE_NAME = orcl)))';
  2
Database link created.

Step 4: validate created database link is working or not 

SQL>
SQL>
SQL> select * from dual@to_rds19c;

D
-
X

SQL>
SQL>

Step 4: Transfer dumpfile to AWS RDS orcl database using database link using below API

BEGIN
DBMS_FILE_TRANSFER.PUT_FILE(
source_directory_object       => 'DATA_PUMP_DIR',
source_file_name              => 'sample.dmp',
destination_directory_object  => 'DATA_PUMP_DIR',
destination_file_name         => 'sample_copied.dmp', 
destination_database          => 'to_rds19c' 
);
END;




SQL>
SQL> BEGIN
  2  DBMS_FILE_TRANSFER.PUT_FILE(
source_directory_object       => 'DATA_PUMP_DIR',
source_file_name              => 'sample.dmp',
destination_directory_object  => 'DATA_PUMP_DIR',
destination_file_name         => 'sample_copied.dmp',
destination_database          3    => 'to_rds19c'
);
END;
  4    5    6    7    8    9   10  /

PL/SQL procedure successfully completed.

SQL>
SQL>



Step  5: On AWS RDS database validate dumpfile which you transfer  using below command


select * from table(RDSADMIN.RDS_FILE_UTIL.LISTDIR('DATA_PUMP_DIR')) order by mtime;


Step 6: create schema in target AWS database (orcl) with required privilege  after that Perform import usig datapump API 


DECLARE
hdnl NUMBER;
BEGIN
hdnl := DBMS_DATAPUMP.OPEN( operation => 'IMPORT', job_mode => 'SCHEMA', job_name=>null);
DBMS_DATAPUMP.ADD_FILE( handle => hdnl, filename => 'sample_copied.dmp', directory => 'DATA_PUMP_DIR', filetype => dbms_datapump.ku$_file_type_dump_file);
DBMS_DATAPUMP.ADD_FILE( handle => hdnl, filename => 'sample_imp.log', directory => 'DATA_PUMP_DIR', filetype => dbms_datapump.ku$_file_type_log_file);
DBMS_DATAPUMP.METADATA_FILTER(hdnl,'SCHEMA_EXPR','IN (''TEST'')');
DBMS_DATAPUMP.START_JOB(hdnl);
END;
/



Step 7: To monitor import log using below command 

SELECT text FROM table(rdsadmin.rds_file_util.read_text_file('DATA_PUMP_DIR','sample_imp.log'))





* You have to enable the port for access in AWS security group 


https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-ug.pdf

Tuesday, May 26, 2020

Simulating ORA-XXXX errors manually in database alert log


Generate ORA-XXX error manually in DB alert log 

login as sysdba


SQL> exec dbms_system.ksdwrt(2,'ORA-00600: This is a test error message for monitoring and can be ignored.');

PL/SQL procedure successfully completed.

SQL>

In Alert_india.log


*************************************************************************
2020-05-26 13:45:21.335000 +00:00
ORA-00600: This is a test error message for monitoring and can be ignored.
2020-05-26 13:45:25.125000 +00:00
ORA-00600: This is a test error message for monitoring and can be ignored.

Tuesday, May 12, 2020

Create ASM diskgroup using ASMCA



Create ASM diskgroup using ASMCA



Login as Grid user or ASM owned user and set environment 



invoke asmcma utility





It will open the GUI window that helps to create diskgroup




after click on create give diskgroup name , redundancy and many more things as per requirement in my case I am creating FRA diskgroup  with external redundancy 








FRA disk group has been created 











Sunday, May 10, 2020

Steps to drop oracle database manually

Steps to drop oracle database manually





Prerequisite Steps

send mail to application team that you going to drop database 
spool the output of datafiles, redo logs, etc



Step 1: Connect to the database with sysdba privilege after that execute below query  and keep output

select name from v$datafile;
select name from v$controlfile;
select member from v$logfile;


output 

SQL> select name from v$datafile;

NAME
--------------------------------------------------------------------------------
/u01/app/oracle/oradata/ritvik/data01/system.258.1039853925
/u01/app/oracle/oradata/ritvik/data01/sysaux.257.1039853883
/u01/app/oracle/oradata/ritvik/data01/undotbs1.260.1039853971
/u01/app/oracle/oradata/ritvik/data01/example.266.1039854013
/u01/app/oracle/oradata/ritvik/data01/users.259.1039853969

SQL> select name from v$controlfile;

NAME
--------------------------------------------------------------------------------
/u01/app/oracle/oradata/ritvik/control01.ctl

SQL> select member from v$logfile;

MEMBER
--------------------------------------------------------------------------------
/u01/app/oracle/oradata/ritvik/group_3.264.1039854011
/u01/app/oracle/oradata/ritvik/group_2.263.1039854011
/u01/app/oracle/oradata/ritvik/group_1.262.1039854011

SQL>

Step 2: Shutdown the database

shut immediate;

Step 3: Start the database in Exclusive mode

startup mount exclusive restrict;
startup mount restrict;




[oracle@ip-10.0.0.1 data02]$ sqlplus "/as sysdba"

SQL*Plus: Release 12.1.0.2.0 Production on Sat May 9 16:42:01 2020

Copyright (c) 1982, 2014, Oracle.  All rights reserved.

Connected to an idle instance.

SQL> startup mount restrict;
ORACLE instance started.

Total System Global Area 2466250752 bytes
Fixed Size                  2927384 bytes
Variable Size             671089896 bytes
Database Buffers         1778384896 bytes
Redo Buffers               13848576 bytes
Database mounted.
SQL>


Step 4: Drop the database


SQL> drop database;

Database dropped.

Disconnected from Oracle Database 12c Enterprise Edition Release 12.1.0.2.0 - 64bit Production
With the Partitioning, OLAP, Advanced Analytics and Real Application Testing options
SQL>
SQL>
SQL> exit
[oracle@ip-10.0.0.1 data02]$
[oracle@ip-10.0.0.1 data02]$
[oracle@ip-10.0.0.1 data02]$ sqlplus "/as sysdba"

SQL*Plus: Release 12.1.0.2.0 Production on Sat May 9 16:43:07 2020

Copyright (c) 1982, 2014, Oracle.  All rights reserved.

Connected to an idle instance.

SQL> exit
Disconnected
[oracle@ip-10.0.0.1 data02]$
[oracle@ip-10.0.0.1 data02]$

Step 5: Post Change Steps


  •  Cross verify if all the associated datafiles, comntrolfile and online redo logs are removed
  •  Remove the directories if not required
  •  Remove the entry of the database from /etc/oratab
  •  Modify/drop any of the scripts used for this database 
  •  Remove database details from catalog database
  •  Drop backup, archive log and other script associated with the database