Wednesday, July 13, 2016

oracle impdp hacks


h1 := dbms_datapump.open(operation => 'EXPORT', job_mode => 'FULL', remote_link => 'DB_LINK_NAME', version => 'LATEST',job_name =>'MY JOB NAME');

DBMS_DATAPUMP for complete schema export



SET SERVEROUTPUT ON SIZE 1000000
DECLARE
  l_dp_handle       NUMBER;
  l_last_job_state  VARCHAR2(30) := 'UNDEFINED';
  l_job_state       VARCHAR2(30) := 'UNDEFINED';
  l_sts             KU$_STATUS;
BEGIN
  l_dp_handle := DBMS_DATAPUMP.open(
    operation   => 'EXPORT',
    job_mode    => 'SCHEMA',
    remote_link => NULL,
    job_name    => 'EMP_EXPORT',
    version     => 'LATEST');

  DBMS_DATAPUMP.add_file(
    handle    => l_dp_handle,
    filename  => 'SCOTT.dmp',
    directory => 'TEST_DIR');

  DBMS_DATAPUMP.add_file(
    handle    => l_dp_handle,
    filename  => 'SCOTT.log',
    directory => 'TEST_DIR',
    filetype  => DBMS_DATAPUMP.KU$_FILE_TYPE_LOG_FILE);

  DBMS_DATAPUMP.metadata_filter(
    handle => l_dp_handle,
    name   => 'SCHEMA_EXPR',
    value  => '= ''SCOTT''');

  DBMS_DATAPUMP.start_job(l_dp_handle);

  DBMS_DATAPUMP.detach(l_dp_handle);
END;
/



DECLARE
  ind NUMBER;              -- Loop index
  h1 NUMBER;               -- Data Pump job handle
  percent_done NUMBER;     -- Percentage of job complete
  job_state VARCHAR2(30);  -- To keep track of job state
  le ku$_LogEntry;         -- For WIP and error messages
  js ku$_JobStatus;        -- The job status from get_status
  jd ku$_JobDesc;          -- The job description from get_status
  sts ku$_Status;          -- The status object returned by get_status
BEGIN
 
-- Create a (user-named) Data Pump job to do a schema export.
 
  h1 := DBMS_DATAPUMP.OPEN('EXPORT','SCHEMA',NULL,'EXAMPLE1','LATEST');
 
-- Specify a single dump file for the job (using the handle just returned)
-- and a directory object, which must already be defined and accessible
-- to the user running this procedure.
 
  DBMS_DATAPUMP.ADD_FILE(h1,'example1.dmp','DMPDIR');
 
-- A metadata filter is used to specify the schema that will be exported.
 
  DBMS_DATAPUMP.METADATA_FILTER(h1,'SCHEMA_EXPR','IN (''HR'')');
 
-- Start the job. An exception will be generated if something is not set up
-- properly. 
 
  DBMS_DATAPUMP.START_JOB(h1);
 
-- The export job should now be running. In the following loop, the job
-- is monitored until it completes. In the meantime, progress information is
-- displayed.
 
  percent_done := 0;
  job_state := 'UNDEFINED';
  while (job_state != 'COMPLETED') and (job_state != 'STOPPED') loop
    dbms_datapump.get_status(h1,
           dbms_datapump.ku$_status_job_error +
           dbms_datapump.ku$_status_job_status +
           dbms_datapump.ku$_status_wip,-1,job_state,sts);
    js := sts.job_status;
 
-- If the percentage done changed, display the new value.
 
    if js.percent_done != percent_done
    then
      dbms_output.put_line('*** Job percent done = ' ||
                           to_char(js.percent_done));
      percent_done := js.percent_done;
    end if;
 
-- If any work-in-progress (WIP) or error messages were received for the job,
-- display them.
 
   if (bitand(sts.mask,dbms_datapump.ku$_status_wip) != 0)
    then
      le := sts.wip;
    else
      if (bitand(sts.mask,dbms_datapump.ku$_status_job_error) != 0)
      then
        le := sts.error;
      else
        le := null;
      end if;
    end if;
    if le is not null
    then
      ind := le.FIRST;
      while ind is not null loop
        dbms_output.put_line(le(ind).LogText);
        ind := le.NEXT(ind);
      end loop;
    end if;
  end loop;
 
-- Indicate that the job finished and detach from it.
 
  dbms_output.put_line('Job has completed');
  dbms_output.put_line('Final job state = ' || job_state);
  dbms_datapump.detach(h1);
END;
/




Tuesday, June 14, 2016

Oracle utl_file create file sample

SQL> CREATE DIRECTORY dump_dir AS '/u01/app/oracle/dump';

 
SQL>
DECLARE
  f1 UTL_FILE.FILE_TYPE;
BEGIN
  f1:= UTL_FILE.FOPEN('test_dir', 'test_file.txt', 'W');
  UTL_FILE.PUTF(f1, 'Writing');
  UTL_FILE.FCLOSE(f1);
END;
/

Sunday, May 8, 2016

Oracle Tablespace Growth


SELECT b.tsname tablespace_name
, MAX(b.used_size_mb) cur_used_size_mb
, round(AVG(inc_used_size_mb),2)avg_increas_mb
FROM (
  SELECT a.days, a.tsname, used_size_mb
  , used_size_mb - LAG (used_size_mb,1)  OVER ( PARTITION BY a.tsname ORDER BY a.tsname,a.days) inc_used_size_mb
  FROM (
      SELECT TO_CHAR(sp.begin_interval_time,'MM-DD-YYYY') days
       ,ts.tsname
       ,MAX(round((tsu.tablespace_usedsize* dt.block_size )/(1024*1024),2)) used_size_mb
      FROM DBA_HIST_TBSPC_SPACE_USAGE tsu, DBA_HIST_TABLESPACE_STAT ts
       ,DBA_HIST_SNAPSHOT sp, DBA_TABLESPACES dt
      WHERE tsu.tablespace_id= ts.ts# AND tsu.snap_id = sp.snap_id
       AND ts.tsname = dt.tablespace_name  AND sp.begin_interval_time > sysdate-7
      GROUP BY TO_CHAR(sp.begin_interval_time,'MM-DD-YYYY'), ts.tsname
      ORDER BY ts.tsname, days
  ) A
) b GROUP BY b.tsname ORDER BY b.tsname
/

Tuesday, February 2, 2016

Configure Oracle Wallet at Client location

SHAREDB =
  (DESCRIPTION =
    (ADDRESS = (PROTOCOL = TCP)(HOST = consdb121-scan)(PORT = 1521))
    (CONNECT_DATA =
      (SERVER = DEDICATED)
      (SERVICE_NAME = sharedb)
    )
  )


SQL> conn system/oracle@SHAREDB
Ú‘±‚³‚ê‚Ü‚µ‚½B

1.Create wallet on client

[oracle@consdb121n1 tmp]$
/u01/app/oracle/product/12.1.0/dbhome1/bin/mkstore -wrl /tmp -create
Oracle Secret Store Tool: ƒo[ƒWƒ‡ƒ“12.1.0.2
Copyright (c) 2004, 2014, Oracle and/or its affiliates. All rights
reserved.

password:Oracle123#
re-enter password :Oracle123#

2.Check that the file was created

[oracle@consdb121n1 admin]$ cd /tmp
[oracle@consdb121n1 tmp]$ ls -lrt *wallet*
-rw-rw-rw- 1 oracle oinstall   0  8ŒŽ  6 16:06 2015 ewallet.p12.lck
-rw------- 1 oracle oinstall  75  8ŒŽ  6 16:06 2015 ewallet.p12
-rw-rw-rw- 1 oracle oinstall   0  8ŒŽ  6 16:06 2015 cwallet.sso.lck
-rw------- 1 oracle oinstall 120  8ŒŽ  6 16:06 2015 cwallet.sso

3.

[oracle@consdb121n1 tmp]$ mkstore -wrl /tmp -createCredential
SHAREDB_TMP system
Oracle Secret Store Tool: ƒo[ƒWƒ‡ƒ“12.1.0.2
Copyright (c) 2004, 2014, Oracle and/or its affiliates. All rights
reserved.

secret/password is missing
secret/password:oracle
re-enter secret/password:oracle
oracle wallet password:Oracle123#
Create credential oracle.security.client.connect_string1

4.Check that the filesize has increased

[oracle@consdb121n1 tmp]$ ls -lrt *wallet*
-rw-rw-rw- 1 oracle oinstall   0  8ŒŽ  6 16:06 2015 ewallet.p12.lck
-rw-rw-rw- 1 oracle oinstall   0  8ŒŽ  6 16:06 2015 cwallet.sso.lck
-rw------- 1 oracle oinstall 536  8ŒŽ  6 16:10 2015 ewallet.p12
-rw------- 1 oracle oinstall 581  8ŒŽ  6 16:10 2015 cwallet.sso

5.Copy connectivity related files to /tmp on client

[oracle@consdb121n1 tmp]$ cat  /tmp/sqlnet.ora
SQLNET.WALLET_OVERRIDE = TRUE
SSL_CLIENT_AUTHENTICATION = FALSE
SSL_VERSION = 0

WALLET_LOCATION =
  (SOURCE =
    (METHOD = FILE)
    (METHOD_DATA =
  (DIRECTORY = /tmp)
  )
 )


cp /u01/app/oracle/product/12.1.0/dbhome1/network/admin/tnsnames.ora
/tmp/tnsnames.ora

[oracle@consdb121n1 tmp]$ cat /tmp/tnsnames.ora
# tnsnames.ora Network Configuration File:
/u01/app/oracle/product/12.1.0/dbhome1/network/admin/tnsnames.ora
# Generated by Oracle configuration tools.

SHAREDB_TMP =
  (DESCRIPTION =
    (ADDRESS = (PROTOCOL = TCP)(HOST = consdb121-scan)(PORT = 1521))
    (CONNECT_DATA =
      (SERVER = DEDICATED)
      (SERVICE_NAME = sharedb)
    )
  )


6.connection

export TNS_ADMIN=/tmp
sqlplus system/oracle@SHAREDB_TMP

7.connection

export TNS_ADMIN=/tmp
sqlplus /@SHAREDB_TMP

SQL*Plus: Release 12.1.0.2.0 Production on –Ø 8ŒŽ 6 16:40:38 2015

Copyright (c) 1982, 2014, Oracle.  All rights reserved.

ÅI³íƒƒOƒCƒ“ŽžŠÔ: –Ø 8ŒŽ  06 2015 16:39:22 +09:00


Oracle Database 12c Enterprise Edition Release 12.1.0.2.0 - 64bit
Production
With the Partitioning, Real Application Clusters, Automatic Storage
Management, OLAP,
Advanced Analytics and Real Application Testing options
‚ɐڑ±‚³‚ê‚Ü‚µ‚½B


8. Check if whether commenting-out SSL_CLIENT_AUTHENTICATION and SSL_VERSION is okay

[oracle@consdb121n1 tmp]$ cat /tmp/sqlnet.ora
SQLNET.WALLET_OVERRIDE = TRUE
#SSL_CLIENT_AUTHENTICATION = FALSE
#SSL_VERSION = 0

WALLET_LOCATION =
  (SOURCE =
    (METHOD = FILE)
    (METHOD_DATA =
  (DIRECTORY = /tmp)
  )
 )

SQL*Plus: Release 12.1.0.2.0 Production on –Ø 8ŒŽ 6 16:42:29 2015

Copyright (c) 1982, 2014, Oracle.  All rights reserved.

ÅI³íƒƒOƒCƒ“ŽžŠÔ: –Ø 8ŒŽ  06 2015 16:40:38 +09:00


Oracle Database 12c Enterprise Edition Release 12.1.0.2.0 - 64bit
Production
With the Partitioning, Real Application Clusters, Automatic Storage
Management, OLAP,
Advanced Analytics and Real Application Testing options
‚ɐڑ±‚³‚ê‚Ü‚µ‚½B

Cascaded Standby Databases in Oracle 12c

Cascaded Standby Databases in Oracle 12c
There are new Possibilities for cascading Standby Databases in Oracle 12c. The main Differents between Oracle 12c and the previous Releases are:

  1. Real-Time Cascading
  2. Far Sync Standby Database
  3. Data Guard Broker now supports cascaded Standby Database
we can only cascade a Standby Database from a Physical Standby Database.

Real-Time Cascading


It is now possible to forward Redo in Real-Time Mode from the first to the cascaded Standby Database. So the Redo Record is forwarded to the cascaded Standby Database once written into a Standby RedoLog of the first Standby Database.
Non Real-Time Cascading means that the whole Log Sequence is transferred to the terminal Standby Database(s) after a Log Switch on the Primary Database.

First of all setup a Data Guard Environment as usual to the cascading Standby Database. The Log Transport Method should be ‘SYNC’ and Standby RedoLogs must be configured on the cascading Standby Database. Once you created the cascaded Standby Database you can now setup the cascading Log Transport Services. Here are some Hints for correct Setup:

  • Primary, Cascading and Cascaded Standby Database db_unique_name must be present in the dg_config of log_archive_config on all the Databases
  • Setup log_archive_dest_n on the cascading Standby Database to serve the cascaded (terminal) Standby Databases using the Attribute ‘valid_for=(STANDBY_LOGFILES,STANDBY_ROLE)’
  • You can toggle between Real-Time and Non Real-Time Cascading using the Log Transport Method.
ASYNC = Real-Time Cascading
SYNC = Non Real-Time Cascading
  • You can only use log_archive_dest_1 until log_archive_dest_10 for Non Real-Time Cascading Destinations where all log_archive_dest_n’s can be used for Real-Time Cascading on the Cascading Standby Database
  • The Cascading Standby Standby can be in any Protection Mode
  • A Cascading Standby Database can serve one or multiple terminal Standby Databases
  • FAL_SERVER on the cascading Standby Database should be set to the Primary or any other Standby Database served by the Primary Database directly
  • FAL_SERVER on the terminal Standby Database should be set to the cascading Standby Database or the Primary Database