Saturday, November 30, 2013

temp_undo_enabled does not work as expected in non-standby database

SQL> select banner from v$version;

BANNER
--------------------------------------------------------------------------------
Oracle Database 12c Enterprise Edition Release 12.1.0.1.0 - 64bit Production
PL/SQL Release 12.1.0.1.0 - Production
CORE    12.1.0.1.0      Production
TNS for Solaris: Version 12.1.0.1.0 - Production
NLSRTL Version 12.1.0.1.0 - Production

SQL> select file_id,file_name from dba_data_files where file_id=4;

   FILE_ID FILE_NAME
---------- --------------------------------------------------
         4 /u01/app/oracle/oradata/orcl/undotbs01.dbf

SQL> select file_id,file_name from dba_temp_files where file_id=1;

   FILE_ID FILE_NAME
---------- --------------------------------------------------
         1 /u01/app/oracle/oradata/orcl/temp01.dbf

SQL> select sum(blocks) from dba_segments where owner='SH' and segment_name='SALES';

SUM(BLOCKS)
-----------
      16384

Scenario 1:  temp_undo_enabled=false (default behavior)

    
SQL> create global temporary table sales as select * from sh.sales where 1=2;

Table created.

SQL> alter session set temp_undo_enabled=false;

Session altered.

SQL> select used_ublk,start_ubafil from v$transaction where ses_addr=(select saddr from v$session where sid = sys_context('userenv','sid'));

no rows selected

SQL> insert into sales select * from sh.sales;

918843 rows created.

SQL> select used_ublk,start_ubafil from v$transaction where ses_addr=(select saddr from v$session where sid = sys_context('userenv','sid'));

USED_UBLK START_UBAFIL
---------- ------------
       300            4

SQL> delete from sales;

918843 rows deleted.

SQL> select used_ublk,start_ubafil from v$transaction where ses_addr=(select saddr from v$session where sid = sys_context('userenv','sid'));

USED_UBLK START_UBAFIL
---------- ------------
     17398            4
    
SQL> select segtype,sum(blocks) from v$tempseg_usage where session_addr=(select saddr from v$session where sid=userenv('sid')) group by segtype;

SEGTYPE   SUM(BLOCKS)
--------- -----------
DATA             4608

SQL> rollback;

Rollback complete.

SQL> select segtype,sum(blocks) from v$tempseg_usage where session_addr=(select saddr from v$session where sid=userenv('sid')) group by segtype;

SEGTYPE   SUM(BLOCKS)
--------- -----------
DATA              128

SQL> select used_ublk,start_ubafil from v$transaction where ses_addr=(select saddr from v$session where sid = sys_context('userenv','sid'));

no rows selected

Scenario 2:  temp_undo_enabled=true


SQL> drop table sales;

Table dropped.

SQL> create global temporary table sales as select * from sh.sales where 1=2;

Table created.

SQL> alter session set temp_undo_enabled=true;

Session altered.

SQL> select used_ublk,start_ubafil from v$transaction where ses_addr=(select saddr from v$session where sid = sys_context('userenv','sid'));

no rows selected

SQL> select segtype,sum(blocks) from v$tempseg_usage where session_addr=(select saddr from v$session where sid=userenv('sid')) group by segtype;

SEGTYPE   SUM(BLOCKS)
--------- -----------
DATA              128

SQL> insert into sales select * from sh.sales;

918843 rows created.

SQL> select used_ublk,start_ubafil from v$transaction where ses_addr=(select saddr from v$session where sid = sys_context('userenv','sid'));

USED_UBLK START_UBAFIL
---------- ------------
       300            4 <--- undo segment still using datafile 4

SQL> select segtype,sum(blocks) from v$tempseg_usage where session_addr=(select saddr from v$session where sid=userenv('sid')) group by segtype;

SEGTYPE   SUM(BLOCKS)
--------- -----------
DATA             4608

SQL> delete from sales;

918843 rows deleted.

SQL> select used_ublk,start_ubafil from v$transaction where ses_addr=(select saddr from v$session where sid = sys_context('userenv','sid'));

USED_UBLK START_UBAFIL
---------- ------------
     17397            4

SQL> select segtype,sum(blocks) from v$tempseg_usage where session_addr=(select saddr from v$session where sid=userenv('sid')) group by segtype;

SEGTYPE   SUM(BLOCKS)
--------- -----------
DATA             4608

SQL> commit;

Commit complete.

Scenario 3:  Readonly database

SQL> startup mount
ORACLE instance started.

Total System Global Area 1570009088 bytes
Fixed Size                  2361496 bytes
Variable Size            1040189288 bytes
Database Buffers          520093696 bytes
Redo Buffers                7364608 bytes
Database mounted.
SQL> alter database open read only;

Database altered.

SQL> conn /
Connected.
SQL> insert into sales select * from sh.sales;
insert into sales select * from sh.sales
            *
ERROR at line 1:
ORA-16000: database or pluggable database open for read-only access

Thursday, November 21, 2013

SQL Server Transaction Log Minor–ApexSQL Log

ScreenHunter_56 Jun. 03 22.00

 

ScreenHunter_56 Jun. 03 22.01

ScreenHunter_57 Jun. 03 22.01

Setup Database Mirroring between SQL Server 2005 and SQL Server 2012

SQL 2005SP4 SQL2012SP1
USE [master]
GO
CREATE LOGIN [donghua] WITH PASSWORD=N'p_ssw0rd', DEFAULT_DATABASE=[master], DEFAULT_LANGUAGE=[us_english], CHECK_EXPIRATION=OFF, CHECK_POLICY=OFF
GO
EXEC master..sp_addsrvrolemember @loginame = N'donghua', @rolename = N'sysadmin'
GO
USE [master]
GO
CREATE LOGIN [donghua] WITH PASSWORD=N'p_ssw0rd', DEFAULT_DATABASE=[master], DEFAULT_LANGUAGE=[us_english], CHECK_EXPIRATION=OFF, CHECK_POLICY=OFF
GO
ALTER SERVER ROLE [sysadmin] ADD MEMBER [donghua]
GO
create database TestDb;
go
use TestDb;
go
create table t(c1 varchar(20));
insert into t values(getdate());
go
select * from t;
Backup & Restoration
Set Full recovery model USE master;
GO
ALTER DATABASE TestDB
SET RECOVERY FULL;
GO
Backup Database BACKUP DATABASE TestDb
    TO DISK = 'C:\Backup\TestDb_Full.bak'
    WITH FORMAT
GO
Copy the backup to this Server
Restore Database RESTORE DATABASE TestDb
   FROM DISK='C:\backup\TestDb_Full.bak'
   WITH NORECOVERY,
      MOVE 'TestDb' TO
         'D:\SQL2012\MSSQL11.PROD\MSSQL\DATA\TestDb.mdf',
      MOVE 'TestDb_log' TO
         'D:\SQL2012\MSSQL11.PROD\MSSQL\DATA\TestDb_log.LDF';
GO
Backup Log BACKUP LOG TestDb
    TO DISK = 'C:\Backup\TestDb_Log.bak'
    WITH FORMAT
GO
Copy the backup to this Server
RESTORE LOG TestDb
    FROM DISK = 'C:\Backup\TestDb_Log.bak'
    WITH NORECOVERY
GO
The two server instances run in nontrusted Windows domains, so certificate-based authentication is required
On the master database, create the database master key, if needed. USE master
GO
CREATE MASTER KEY ENCRYPTION BY PASSWORD = '!QAZXSW#$%e';
GO
USE master
GO
CREATE MASTER KEY ENCRYPTION BY PASSWORD = '093j45&*he';
GO
Make a certificate for this server instance. USE master;
GO
CREATE CERTIFICATE SQL2005_cert
   WITH SUBJECT = 'SQL2005 certificate',
   START_DATE = '20121031',
   EXPIRY_DATE = '20151031';
GO
USE master;
GO
CREATE CERTIFICATE SQL2012_cert
   WITH SUBJECT = 'SQL2012 certificate',
   START_DATE = '20121031',
   EXPIRY_DATE = '20151031';
GO
Create a mirroring endpoint for server instance using the certificate. CREATE ENDPOINT Endpoint_Mirroring
   STATE = STARTED
   AS TCP (
      LISTENER_PORT=7024
      , LISTENER_IP = ALL
   )
   FOR DATABASE_MIRRORING (
      AUTHENTICATION = CERTIFICATE SQL2005_cert
      , ENCRYPTION = REQUIRED ALGORITHM AES
      , ROLE = ALL
   );
GO
CREATE ENDPOINT Endpoint_Mirroring
   STATE = STARTED
   AS TCP (
      LISTENER_PORT=7024
      , LISTENER_IP = ALL
   )
   FOR DATABASE_MIRRORING (
      AUTHENTICATION = CERTIFICATE SQL2012_cert
      , ENCRYPTION = REQUIRED ALGORITHM AES
      , ROLE = ALL
   );
GO
Back up the HOST_A certificate, and copy it to other system, HOST_B. BACKUP CERTIFICATE SQL2005_cert TO FILE = 'C:\Backup\SQL2005_cert.cer';
GO
BACKUP CERTIFICATE SQL2012_cert TO FILE = 'C:\Backup\SQL2012_cert.cer';
GO
Setup inbound connection
Create a login on HOST_A for HOST_B. USE master;
CREATE LOGIN sql2012_login WITH PASSWORD = '1Sample_Strong_Password!!#';
GO
USE master;
CREATE LOGIN sql2005_login WITH PASSWORD = '2Sample_Strong_Password!!#';
GO
Create a user for that login. CREATE USER sql2012_user FOR LOGIN sql2012_login;
GO
CREATE USER sql2005_user FOR LOGIN sql2005_login;
GO
Associate the certificate with the user. CREATE CERTIFICATE SQL2012_cert
   AUTHORIZATION sql2012_user
   FROM FILE = 'C:\backup\SQL2012_cert.cer'
GO
CREATE CERTIFICATE SQL2012_cert
   AUTHORIZATION sql2012_user
   FROM FILE = 'C:\backup\SQL2012_cert.cer'
GO
Grant CONNECT permission on the login for the remote mirroring endpoint. GRANT CONNECT ON ENDPOINT::Endpoint_Mirroring TO [sql2012_login];
GO
GRANT CONNECT ON ENDPOINT::Endpoint_Mirroring TO [sql2012_login];
GO
Setup mirror database
On the mirror server instance on HOST_B, set the server instance on HOST_A as the partner (making it the initial principal server instance). --At HOST_B, set server instance on HOST_A as partner (principal server):
ALTER DATABASE TestDb
    SET PARTNER = 'TCP://WIN2003SQL2005:7024';
GO
On the principal server instance on HOST_A, set the server instance on HOST_B as the partner (making it the initial mirror server instance) --At HOST_A, set server instance on HOST_B as partner (mirror server).
ALTER DATABASE TestDb
    SET PARTNER = 'TCP://WIN-HN1Q39OK9JF:7024';
GO
This example assumes that the session will be running in high-performance mode. To configure this session for high-performance mode, on the principal server instance (on HOST_A), set transaction safety to OFF. --Change to high-performance mode by turning off transacton safety.                                                     ALTER DATABASE TestDb
    set partner SAFETY off
GO
Setup mirror database
Create database snapshot CREATE DATABASE TestDb_SS ON
( NAME = TestDb, FILENAME = 'D:\SQL2012\MSSQL11.PROD\MSSQL\DATA\TestDb.ss' )
AS SNAPSHOT OF TestDb;
GO

Msg 946, Level 14, State 1, Line 1
Cannot open database 'TestDb_SS' version 611. Upgrade the database to the latest version.
Msg 1823, Level 16, State 7, Line 1
A database snapshot cannot be created because it failed to start.
Perform insert in SQL2005 use testdb
go
insert into t values(getdate());
go
Manual Failover USE master
go
ALTER DATABASE TestDb SET PARTNER FAILOVER
go

Msg 1477, Level 16, State 1, Line 1
The database mirroring safety level must be FULL to manually failover database "TestDb".  Set safety level to FULL and retry.
Force Service in a Database Mirroring Session  use master;
go
ALTER DATABASE TestDb SET PARTNER FORCE_SERVICE_ALLOW_DATA_LOSS
go
Msg 1455, Level 16, State 106, Line 1
The database mirroring service cannot be forced for database "TestDb" because the database is not in the correct state to become the principal database.
Use the restore command to open the DB Use master
go
ALTER DATABASE TestDb SET PARTNER OFF
go
RESTORE DATABASE TestDb WITH RECOVERY
go

-- Data is in-sync

How to install HP Vertica

[root@vertica6 ~]# yum install ./vertica-6.1.2-0.x86_64.RHEL5.rpm
Loaded plugins: fastestmirror, refresh-packagekit, security
Loading mirror speeds from cached hostfile
* base: mirrors.hostemo.com
* extras: mirror.usonyx.net
* updates: mirror.usonyx.net
base                                                                                                                                                                                                                                                   | 3.7 kB     00:00    
base/primary_db                                                                                                                                                                                                                                        | 4.4 MB     00:05    
extras                                                                                                                                                                                                                                                 | 3.4 kB     00:00    
extras/primary_db                                                                                                                                                                                                                                      |  18 kB     00:00    
updates                                                                                                                                                                                                                                                | 3.4 kB     00:00    
updates/primary_db                                                                                                                                                                                                                                     | 3.3 MB     00:01    
Setting up Install Process
Examining ./vertica-6.1.2-0.x86_64.RHEL5.rpm: vertica-6.1.2-0.x86_64
Marking ./vertica-6.1.2-0.x86_64.RHEL5.rpm to be installed
Resolving Dependencies
--> Running transaction check
---> Package vertica.x86_64 0:6.1.2-0 will be installed
--> Finished Dependency Resolution

Dependencies Resolved

==================================================================================
Package            Arch       Version      Repository                       Size
==================================================================================
Installing:
vertica            x86_64     6.1.2-0      /vertica-6.1.2-0.x86_64.RHEL5   364 M

Transaction Summary
==================================================================================
Install       1 Package(s)

Total size: 364 M
Installed size: 364 M
Is this ok [y/N]: y
Downloading Packages:
Running rpm_check_debug
Running Transaction Test
Transaction Test Succeeded
Running Transaction
  Installing : vertica-6.1.2-0.x86_64                                                                                                                                                                                                                                     1/1

Vertica Analytic Database V6.1.2-0 successfully installed on host vertica6.dbaglobe.com

----------------------------------------------------------------------------------
Important Information
----------------------------------------------------------------------------------
If you are upgrading from a previous version, you must backup your database before
continuing with this install.  After restarting your database, you will be unable 
to revert to a previous version of the software.
----------------------------------------------------------------------------------

To download the latest Vertica documentation in zip or tar format please visit the
myvertica web site.

To complete installation and configuration of the cluster,
run: /opt/vertica/sbin/install_vertica


  Verifying  : vertica-6.1.2-0.x86_64                                                                                                                                                                                                                                     1/1

Installed:
  vertica.x86_64 0:6.1.2-0                                                                                                                                                                                                                                                   

Complete!


[root@vertica6 ~]# /opt/vertica/sbin/install_vertica

Vertica Analytic Database 6.1.2-0 Installation Tool
Upgrading admintools meta data format..
scanning /opt/vertica/config/users
WARNING: No hostname list provided.  Installing to localhost
Starting installation tasks...
Getting system information for cluster (this may take a while)....
backing up admintools.conf on 127.0.0.1
Default shell on nodes:
127.0.0.1 /bin/bash
Info: the package 'pstack' is useful during troubleshooting.  Vertica recommends this package is installed.
Checking/fixing OS parameters.....

Setting vm.min_free_kbytes to 67584 ...
Info! The maximum number of open file descriptors is less than 65536
Setting open filehandle limit to 65536 ...
Info! The session setting of pam_limits.so is not set in /etc/pam.d/su
Setting session of pam_limits.so in /etc/pam.d/su ...
Info! Parameter vm.max_map_count is less than 253068
Setting vm.max_map_count to 253068 ...
Detected cpufreq module loaded on 127.0.0.1
CPU frequency scaling is enabled.  This may adversely affect the performance of your database.
Vertica recommends that cpu frequency scaling be turned off or set to 'performance'


Creating/Checking Vertica DBA group

Creating/Checking Vertica DBA user

Password for dbadmin:
Setting /etc/security/limits.conf nproc  to 3954 ...
Installing/Repairing SSH keys for dbadmin

Creating Vertica Data Directory...

Updating spread configuration...
Verifying spread configuration on whole cluster.
Creating node node0001 definition for host 127.0.0.1
... Done
Error Monitor  0 errors  3 warnings
Installation completed with warnings. 
Installation complete.

To create a database:
1. Logout and login as dbadmin.**
2. Run /opt/vertica/bin/adminTools as dbadmin
3. Select Create Database from the Configuration Menu

** The installation modified the group privileges for dbadmin.
   If you used sudo to install vertica as dbadmin, you will
   need to logout and login again before the privileges are applied.

[dbadmin@vertica6 ~]$ ls -l /opt/vertica/config/licensing/vertica_community_edition.license.key
-rw-rw-rw-. 1 root root 586 May 28 10:45 /opt/vertica/config/licensing/vertica_community_edition.license.key


[root@vertica6 ~]# su - dbadmin
[dbadmin@vertica6 ~]$
[dbadmin@vertica6 ~]$
[dbadmin@vertica6 ~]$ /opt/vertica/bin/adminTools

[dbadmin@vertica6 VMart_Schema]$ vsql -d vmart -U dbadmin -w p_ssw0rd -f vmart_define_schema.sql
[dbadmin@vertica6 VMart_Schema]$ vsql -d vmart -U dbadmin -w p_ssw0rd -f vmart_load_data.sql
Rows Loaded
-------------
        1826
(1 row)

Rows Loaded
-------------
         500
(1 row)

Rows Loaded
-------------
          50
(1 row)

Rows Loaded
-------------
         100
(1 row)

Rows Loaded
-------------
          50
(1 row)

Rows Loaded
-------------
       50000
(1 row)

Rows Loaded
-------------
       10000
(1 row)

Rows Loaded
-------------
         100
(1 row)

Rows Loaded
-------------
         100
(1 row)

Rows Loaded
-------------
        1000
(1 row)

Rows Loaded
-------------
         200
(1 row)

Rows Loaded
-------------
     5000000
(1 row)

Rows Loaded
-------------
      300000
(1 row)

Rows Loaded
-------------
     5000000
(1 row)

Rows Loaded
-------------
      300000
(1 row)

Partition, compress and drop column (ORA-39726)

Testing 1: Compression, drop column encountered "ORA-39726: unsupported add/drop column operation on compressed tables" error. After uncompress all the partitions, issue resolved.


SQL> drop table t1_compression purge;

Table dropped.

SQL> create table t1_compression (id number, name varchar2(30))
  2  partition by range (id) interval (10000) (partition p1 values less than (10000) ) compress;

Table created.

SQL> insert into t1_compression select object_id,object_name from dba_objects;

91483 rows created.

SQL> commit;

Commit complete.

SQL> col partition_name for a20
SQL> select partition_name,compression,compress_for from user_tab_partitions
  2  where table_name='T1_COMPRESSION';

PARTITION_NAME       COMPRESS COMPRESS_FOR
-------------------- -------- ------------------------------
P1                   ENABLED  BASIC
SYS_P658             ENABLED  BASIC
SYS_P662             ENABLED  BASIC
SYS_P663             ENABLED  BASIC
SYS_P665             ENABLED  BASIC
SYS_P657             ENABLED  BASIC
SYS_P661             ENABLED  BASIC
SYS_P664             ENABLED  BASIC
SYS_P660             ENABLED  BASIC
SYS_P659             ENABLED  BASIC

10 rows selected.
SQL> alter table t1_compression drop column name;
alter table t1_compression drop column name
                                       *
ERROR at line 1:
ORA-39726: unsupported add/drop column operation on compressed tables

SQL> alter table t1_compression set unused column name;

Table altered.

SQL> desc t1_compression;
 Name                                      Null?    Type
 ----------------------------------------- -------- ----------------------------
 ID                                                 NUMBER


SQL> alter table t1_compression drop unused columns;
alter table t1_compression drop unused columns
*
ERROR at line 1:
ORA-39726: unsupported add/drop column operation on compressed tables


SQL>  alter table t1_compression move partition p1 nocompress;

Table altered.

SQL>  alter table t1_compression move partition SYS_P657  nocompress;

Table altered.

SQL> select partition_name,compression,compress_for from user_tab_partitions
  2  where table_name='T1_COMPRESSION';

PARTITION_NAME       COMPRESS COMPRESS_FOR
-------------------- -------- ------------------------------
P1                   DISABLED
SYS_P658             DISABLED
SYS_P662             DISABLED
SYS_P663             DISABLED
SYS_P665             DISABLED
SYS_P657             DISABLED
SYS_P661             DISABLED
SYS_P664             DISABLED
SYS_P660             DISABLED
SYS_P659             DISABLED

10 rows selected.

SQL> alter table t1_compression drop unused columns;

Table altered.

Testing 2: Compression for OLTP, drop column without error

SQL> drop table t1_compression purge;

Table dropped.

SQL> create table t1_compression (id number, name varchar2(30))
  2  partition by range (id) interval (10000) (partition p1 values less than (10000) ) compress for oltp;

Table created.

SQL> insert into t1_compression select object_id,object_name from dba_objects;

91483 rows created.

SQL> commit;

Commit complete.

SQL> select partition_name,compression,compress_for from user_tab_partitions
  2  where table_name='T1_COMPRESSION';

PARTITION_NAME       COMPRESS COMPRESS_FOR
-------------------- -------- ------------------------------
SYS_P673             ENABLED  ADVANCED
SYS_P671             ENABLED  ADVANCED
SYS_P670             ENABLED  ADVANCED
SYS_P667             ENABLED  ADVANCED
SYS_P674             ENABLED  ADVANCED
SYS_P672             ENABLED  ADVANCED
SYS_P669             ENABLED  ADVANCED
SYS_P666             ENABLED  ADVANCED
P1                   ENABLED  ADVANCED
SYS_P668             ENABLED  ADVANCED

10 rows selected.

SQL> alter table t1_compression drop column name;

Table altered.

Sunday, November 17, 2013

Change your monitoring script to use v$sqlstats

"The column definitions for columns in V$SQLSTATS are identical to those in the V$SQL and V$SQLAREA views. However, the V$SQLSTATS view differs from V$SQL and V$SQLAREA in that it is faster, more scalable, and has a greater data retention (the statistics may still appear in this view, even after the cursor has been aged out of the shared pool)." 

Saturday, November 16, 2013

Oracle 12c New Features: truncate table ... cascade


SQL> create table departments as select * from hr.departments;

Table created.

SQL> create table employees as select * from hr.employees;

Table created.

SQL> alter table departments add constraint departments_pk primary key (department_id);

Table altered.

SQL> alter table employees add constraint emp_dept_fk foreign key (department_id) references departments(department_id);

Table altered.

SQL> truncate table departments;
truncate table departments
               *
ERROR at line 1:
ORA-02266: unique/primary keys in table referenced by enabled foreign keys


SQL> truncate table departments cascade;
truncate table departments cascade
               *
ERROR at line 1:
ORA-14705: unique or primary keys referenced by enabled foreign keys in table
"DONGHUA"."EMPLOYEES"

SQL> alter table employees drop constraint emp_dept_fk;

Table altered.

SQL> alter table employees add constraint emp_dept_fk foreign key (department_id) references departments(department_id) on delete cascade;

Table altered.

SQL> truncate table departments;
truncate table departments
               *
ERROR at line 1:
ORA-02266: unique/primary keys in table referenced by enabled foreign keys


SQL> truncate table departments cascade;

Table truncated.

SQL> select count(*) from departments;

  COUNT(*)
----------
         0

SQL>  select count(*) from employees;

  COUNT(*)
----------
         0

SQL> drop table departments purge;
drop table departments purge
           *
ERROR at line 1:
ORA-02449: unique/primary keys in table referenced by foreign keys


SQL> drop table employees purge;

Table dropped.

SQL> drop table departments purge;

Table dropped.

SQL>

12c New Features: Session Scope Sequence

SQL> create sequence s1 start with 1 increment by 1;

Sequence created.

SQL> create sequence s2 start with 1 increment by 1 session;

Sequence created.

SQL> select sequence_name,session_flag,keep_value from user_sequences
  2  where sequence_name in ('S1','S2');

SEQUENCE_NAME        S K
-------------------- - -
S1                   N N
S2                   Y N


SQL> select s1.nextval from dual;

   NEXTVAL
----------
         1

SQL>  select s2.nextval from dual;

   NEXTVAL
----------
         1

SQL> conn donghua/donghua
Connected.
SQL> select s1.nextval from dual;

   NEXTVAL
----------
         2

SQL> select s2.nextval from dual;

   NEXTVAL
----------
         1

SQL>