rem
stringlengths
0
477k
add
stringlengths
0
313k
context
stringlengths
6
599k
if( deadlockTrace ) { currentTime = System.currentTimeMillis(); timeoutLockTable = factory.makeVirtualLockTable(); } } } } if (willQuitWait) { if (SanityManager.DEBUG) { if (lockDebug != null)
if (lockDebug != null) { String type = ((deadlockData != null) ? "deadlock:" : "timeout:"); SanityManager.DEBUG_PRINT( type, "wait on lockitem caused " + type + lockDebug); } } if (deadlockData == null)
public Lock lockObject(Object compatabilitySpace, Lockable ref, Object qualifier, int timeout, Latch latch) throws StandardException { if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("memoryLeakTrace")) { if (size() > 1000) System.out.println("memoryLeakTrace:LockSet: " + size()); } } Control gc; LockControl control; Lock lockItem; String lockDebug = null; synchronized (this) { gc = getControl(ref); if (gc == null) { // object is not locked, can be granted Lock gl = new Lock(compatabilitySpace, ref, qualifier); gl.grant(); put(ref, gl); return gl; } control = gc.getLockControl(); if (control != gc) { put(ref, control); } if (SanityManager.DEBUG) { SanityManager.ASSERT(ref.equals(control.getLockable())); // ASSERT item is in the list if (getControl(control.getLockable()) != control) { SanityManager.THROWASSERT( "lockObject mismatched lock items " + getControl(control.getLockable()) + " " + control); } } lockItem = control.addLock(this, compatabilitySpace, qualifier); if (lockItem.getCount() != 0) { return lockItem; } if (timeout == C_LockFactory.NO_WAIT) { // remove all trace of lock control.giveUpWait(lockItem, this); if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("DeadlockTrace")) { SanityManager.showTrace(new Throwable()); // The following dumps the lock table as it // exists at the time a timeout is about to // cause a deadlock exception to be thrown. lockDebug = DiagnosticUtil.toDiagString(lockItem) + "\nCould not grant lock with zero timeout, here's the table" + this.toDebugString(); } } return null; } // this is where we need to release the latch if (latch != null) unlock(latch, 1); } // synchronized block boolean deadlockWait = false; int actualTimeout; if (timeout == C_LockFactory.WAIT_FOREVER) { // always check for deadlocks as there should not be any deadlockWait = true; if ((actualTimeout = deadlockTimeout) == C_LockFactory.WAIT_FOREVER) actualTimeout = Property.DEADLOCK_TIMEOUT_DEFAULT * 1000; } else { if (timeout == C_LockFactory.TIMED_WAIT) timeout = actualTimeout = waitTimeout; else actualTimeout = timeout; // five posible cases // i) timeout -1, deadlock -1 -> just wait forever, no deadlock check // ii) timeout >= 0, deadlock -1 -> just wait for timeout, no deadlock check // iii) timeout -1, deadlock >= 0 -> wait for deadlock, then deadlock check, then infinite timeout // iv) timeout >=0, deadlock < timeout -> wait for deadlock, then deadlock check, then wait for (timeout - deadlock) // v) timeout >=0, deadlock >= timeout -> just wait for timeout, no deadlock check if (deadlockTimeout >= 0) { if (actualTimeout < 0) { // infinite wait but perform a deadlock check first deadlockWait = true; actualTimeout = deadlockTimeout; } else if (deadlockTimeout < actualTimeout) { // deadlock wait followed by a timeout wait deadlockWait = true; actualTimeout = deadlockTimeout; // leave timeout as the remaining time timeout -= deadlockTimeout; } } } ActiveLock waitingLock = (ActiveLock) lockItem; lockItem = null; if (deadlockTrace) { // we want to keep a stack trace of this thread just before it goes // into wait state, no need to synchronized because Hashtable.put // is synchronized and the new throwable is local to this thread. lockTraces.put(waitingLock, new Throwable()); } int earlyWakeupCount = 0; long startWaitTime = 0; try {forever: for (;;) { byte wakeupReason = waitingLock.waitForGrant(actualTimeout); ActiveLock nextWaitingLock = null; Object[] deadlockData = null; try { boolean willQuitWait; Enumeration timeoutLockTable = null; long currentTime = 0; synchronized (this) { if (control.isGrantable(control.firstWaiter() == waitingLock, compatabilitySpace, qualifier)) { // Yes, we are granted, put us on the granted queue. control.grant(waitingLock); // Remove from the waiting queue & get next waiter nextWaitingLock = control.getNextWaiter(waitingLock, true, this); // this is where we need to re-obtain the latch, it's // safe to call this lockObject() which will get the // synchronization we already hold, because java allows // nested synchronization and it will be released // automatically if we have to wait if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } return waitingLock; } waitingLock.clearPotentiallyGranted(); // try again later willQuitWait = (wakeupReason != Constants.WAITING_LOCK_GRANT); StandardException deadlockException = null; if (((wakeupReason == Constants.WAITING_LOCK_IN_WAIT) && deadlockWait) || (wakeupReason == Constants.WAITING_LOCK_DEADLOCK)) { // check for a deadlock, even if we were woken up to because // we were selected as a victim we still check because the situation // may have changed. deadlockData = Deadlock.look(factory, this, control, waitingLock, wakeupReason); if (deadlockData == null) { // we don't have a deadlock deadlockWait = false; actualTimeout = timeout; startWaitTime = 0; willQuitWait = false; } else { willQuitWait = true; } } nextWaitingLock = control.getNextWaiter(waitingLock, willQuitWait, this); // If we were not woken by another then we have // timed out. Either deadlock out or timeout if (willQuitWait) { // Even if we deadlocked trying to get the lock, still // reget the latch so that client's need not know // latch was released. if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("DeadlockTrace")) { SanityManager.showTrace(new Throwable()); // The following dumps the lock table as it // exists at the time a timeout is about to // cause a deadlock exception to be thrown. lockDebug = DiagnosticUtil.toDiagString(waitingLock) + "\nGot deadlock/timeout, here's the table" + this.toDebugString(); } } if(!deadlockWait) { if( deadlockTrace ) { // want a copy of the LockTable and the time currentTime = System.currentTimeMillis(); timeoutLockTable = factory.makeVirtualLockTable(); } } } } // synchronized block // need to do this outside of the synchronized block as the // message text building (timeouts and deadlocks) may involve // getting locks to look up table names from identifiers. if (willQuitWait) { if (SanityManager.DEBUG) { if (lockDebug != null) { String type = (deadlockWait ? "deadlock:" : "timeout:"); SanityManager.DEBUG_PRINT( type, "wait on lockitem caused " + type + lockDebug); } } if(!deadlockWait) { if( deadlockTrace ) { //Turn ON derby.locks.deadlockTrace to build the lockTable. throw Timeout.buildException(waitingLock, timeoutLockTable, currentTime); } else { StandardException se = StandardException.newException( SQLState.LOCK_TIMEOUT); throw se; } } if (deadlockData != null) { throw Deadlock.buildException(factory, deadlockData); } } } finally { if (nextWaitingLock != null) { nextWaitingLock.wakeUp(Constants.WAITING_LOCK_GRANT); nextWaitingLock = null; } } if (actualTimeout != C_LockFactory.WAIT_FOREVER) { if (wakeupReason != Constants.WAITING_LOCK_IN_WAIT) earlyWakeupCount++; if (earlyWakeupCount > 5) { long now = System.currentTimeMillis(); if (startWaitTime != 0) { long sleepTime = now - startWaitTime; actualTimeout -= sleepTime; } startWaitTime = now; } } } // for(;;) } finally { if (deadlockTrace) { // I am out of the wait state, either I got my lock or I am the // one who is going to detect the deadlock, don't need the // stack trace anymore. lockTraces.remove(waitingLock); } } }
String type = (deadlockWait ? "deadlock:" : "timeout:"); SanityManager.DEBUG_PRINT( type, "wait on lockitem caused " + type + lockDebug); }
if (deadlockTrace) { throw Timeout.buildException( waitingLock, timeoutLockTable, currentTime); } else { StandardException se = StandardException.newException( SQLState.LOCK_TIMEOUT); throw se; } } else { throw Deadlock.buildException( factory, deadlockData); }
public Lock lockObject(Object compatabilitySpace, Lockable ref, Object qualifier, int timeout, Latch latch) throws StandardException { if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("memoryLeakTrace")) { if (size() > 1000) System.out.println("memoryLeakTrace:LockSet: " + size()); } } Control gc; LockControl control; Lock lockItem; String lockDebug = null; synchronized (this) { gc = getControl(ref); if (gc == null) { // object is not locked, can be granted Lock gl = new Lock(compatabilitySpace, ref, qualifier); gl.grant(); put(ref, gl); return gl; } control = gc.getLockControl(); if (control != gc) { put(ref, control); } if (SanityManager.DEBUG) { SanityManager.ASSERT(ref.equals(control.getLockable())); // ASSERT item is in the list if (getControl(control.getLockable()) != control) { SanityManager.THROWASSERT( "lockObject mismatched lock items " + getControl(control.getLockable()) + " " + control); } } lockItem = control.addLock(this, compatabilitySpace, qualifier); if (lockItem.getCount() != 0) { return lockItem; } if (timeout == C_LockFactory.NO_WAIT) { // remove all trace of lock control.giveUpWait(lockItem, this); if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("DeadlockTrace")) { SanityManager.showTrace(new Throwable()); // The following dumps the lock table as it // exists at the time a timeout is about to // cause a deadlock exception to be thrown. lockDebug = DiagnosticUtil.toDiagString(lockItem) + "\nCould not grant lock with zero timeout, here's the table" + this.toDebugString(); } } return null; } // this is where we need to release the latch if (latch != null) unlock(latch, 1); } // synchronized block boolean deadlockWait = false; int actualTimeout; if (timeout == C_LockFactory.WAIT_FOREVER) { // always check for deadlocks as there should not be any deadlockWait = true; if ((actualTimeout = deadlockTimeout) == C_LockFactory.WAIT_FOREVER) actualTimeout = Property.DEADLOCK_TIMEOUT_DEFAULT * 1000; } else { if (timeout == C_LockFactory.TIMED_WAIT) timeout = actualTimeout = waitTimeout; else actualTimeout = timeout; // five posible cases // i) timeout -1, deadlock -1 -> just wait forever, no deadlock check // ii) timeout >= 0, deadlock -1 -> just wait for timeout, no deadlock check // iii) timeout -1, deadlock >= 0 -> wait for deadlock, then deadlock check, then infinite timeout // iv) timeout >=0, deadlock < timeout -> wait for deadlock, then deadlock check, then wait for (timeout - deadlock) // v) timeout >=0, deadlock >= timeout -> just wait for timeout, no deadlock check if (deadlockTimeout >= 0) { if (actualTimeout < 0) { // infinite wait but perform a deadlock check first deadlockWait = true; actualTimeout = deadlockTimeout; } else if (deadlockTimeout < actualTimeout) { // deadlock wait followed by a timeout wait deadlockWait = true; actualTimeout = deadlockTimeout; // leave timeout as the remaining time timeout -= deadlockTimeout; } } } ActiveLock waitingLock = (ActiveLock) lockItem; lockItem = null; if (deadlockTrace) { // we want to keep a stack trace of this thread just before it goes // into wait state, no need to synchronized because Hashtable.put // is synchronized and the new throwable is local to this thread. lockTraces.put(waitingLock, new Throwable()); } int earlyWakeupCount = 0; long startWaitTime = 0; try {forever: for (;;) { byte wakeupReason = waitingLock.waitForGrant(actualTimeout); ActiveLock nextWaitingLock = null; Object[] deadlockData = null; try { boolean willQuitWait; Enumeration timeoutLockTable = null; long currentTime = 0; synchronized (this) { if (control.isGrantable(control.firstWaiter() == waitingLock, compatabilitySpace, qualifier)) { // Yes, we are granted, put us on the granted queue. control.grant(waitingLock); // Remove from the waiting queue & get next waiter nextWaitingLock = control.getNextWaiter(waitingLock, true, this); // this is where we need to re-obtain the latch, it's // safe to call this lockObject() which will get the // synchronization we already hold, because java allows // nested synchronization and it will be released // automatically if we have to wait if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } return waitingLock; } waitingLock.clearPotentiallyGranted(); // try again later willQuitWait = (wakeupReason != Constants.WAITING_LOCK_GRANT); StandardException deadlockException = null; if (((wakeupReason == Constants.WAITING_LOCK_IN_WAIT) && deadlockWait) || (wakeupReason == Constants.WAITING_LOCK_DEADLOCK)) { // check for a deadlock, even if we were woken up to because // we were selected as a victim we still check because the situation // may have changed. deadlockData = Deadlock.look(factory, this, control, waitingLock, wakeupReason); if (deadlockData == null) { // we don't have a deadlock deadlockWait = false; actualTimeout = timeout; startWaitTime = 0; willQuitWait = false; } else { willQuitWait = true; } } nextWaitingLock = control.getNextWaiter(waitingLock, willQuitWait, this); // If we were not woken by another then we have // timed out. Either deadlock out or timeout if (willQuitWait) { // Even if we deadlocked trying to get the lock, still // reget the latch so that client's need not know // latch was released. if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("DeadlockTrace")) { SanityManager.showTrace(new Throwable()); // The following dumps the lock table as it // exists at the time a timeout is about to // cause a deadlock exception to be thrown. lockDebug = DiagnosticUtil.toDiagString(waitingLock) + "\nGot deadlock/timeout, here's the table" + this.toDebugString(); } } if(!deadlockWait) { if( deadlockTrace ) { // want a copy of the LockTable and the time currentTime = System.currentTimeMillis(); timeoutLockTable = factory.makeVirtualLockTable(); } } } } // synchronized block // need to do this outside of the synchronized block as the // message text building (timeouts and deadlocks) may involve // getting locks to look up table names from identifiers. if (willQuitWait) { if (SanityManager.DEBUG) { if (lockDebug != null) { String type = (deadlockWait ? "deadlock:" : "timeout:"); SanityManager.DEBUG_PRINT( type, "wait on lockitem caused " + type + lockDebug); } } if(!deadlockWait) { if( deadlockTrace ) { //Turn ON derby.locks.deadlockTrace to build the lockTable. throw Timeout.buildException(waitingLock, timeoutLockTable, currentTime); } else { StandardException se = StandardException.newException( SQLState.LOCK_TIMEOUT); throw se; } } if (deadlockData != null) { throw Deadlock.buildException(factory, deadlockData); } } } finally { if (nextWaitingLock != null) { nextWaitingLock.wakeUp(Constants.WAITING_LOCK_GRANT); nextWaitingLock = null; } } if (actualTimeout != C_LockFactory.WAIT_FOREVER) { if (wakeupReason != Constants.WAITING_LOCK_IN_WAIT) earlyWakeupCount++; if (earlyWakeupCount > 5) { long now = System.currentTimeMillis(); if (startWaitTime != 0) { long sleepTime = now - startWaitTime; actualTimeout -= sleepTime; } startWaitTime = now; } } } // for(;;) } finally { if (deadlockTrace) { // I am out of the wait state, either I got my lock or I am the // one who is going to detect the deadlock, don't need the // stack trace anymore. lockTraces.remove(waitingLock); } } }
if(!deadlockWait) { if( deadlockTrace ) { throw Timeout.buildException(waitingLock, timeoutLockTable, currentTime); } else { StandardException se = StandardException.newException( SQLState.LOCK_TIMEOUT); throw se; }
} finally { if (nextWaitingLock != null) { nextWaitingLock.wakeUp(Constants.WAITING_LOCK_GRANT); nextWaitingLock = null;
public Lock lockObject(Object compatabilitySpace, Lockable ref, Object qualifier, int timeout, Latch latch) throws StandardException { if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("memoryLeakTrace")) { if (size() > 1000) System.out.println("memoryLeakTrace:LockSet: " + size()); } } Control gc; LockControl control; Lock lockItem; String lockDebug = null; synchronized (this) { gc = getControl(ref); if (gc == null) { // object is not locked, can be granted Lock gl = new Lock(compatabilitySpace, ref, qualifier); gl.grant(); put(ref, gl); return gl; } control = gc.getLockControl(); if (control != gc) { put(ref, control); } if (SanityManager.DEBUG) { SanityManager.ASSERT(ref.equals(control.getLockable())); // ASSERT item is in the list if (getControl(control.getLockable()) != control) { SanityManager.THROWASSERT( "lockObject mismatched lock items " + getControl(control.getLockable()) + " " + control); } } lockItem = control.addLock(this, compatabilitySpace, qualifier); if (lockItem.getCount() != 0) { return lockItem; } if (timeout == C_LockFactory.NO_WAIT) { // remove all trace of lock control.giveUpWait(lockItem, this); if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("DeadlockTrace")) { SanityManager.showTrace(new Throwable()); // The following dumps the lock table as it // exists at the time a timeout is about to // cause a deadlock exception to be thrown. lockDebug = DiagnosticUtil.toDiagString(lockItem) + "\nCould not grant lock with zero timeout, here's the table" + this.toDebugString(); } } return null; } // this is where we need to release the latch if (latch != null) unlock(latch, 1); } // synchronized block boolean deadlockWait = false; int actualTimeout; if (timeout == C_LockFactory.WAIT_FOREVER) { // always check for deadlocks as there should not be any deadlockWait = true; if ((actualTimeout = deadlockTimeout) == C_LockFactory.WAIT_FOREVER) actualTimeout = Property.DEADLOCK_TIMEOUT_DEFAULT * 1000; } else { if (timeout == C_LockFactory.TIMED_WAIT) timeout = actualTimeout = waitTimeout; else actualTimeout = timeout; // five posible cases // i) timeout -1, deadlock -1 -> just wait forever, no deadlock check // ii) timeout >= 0, deadlock -1 -> just wait for timeout, no deadlock check // iii) timeout -1, deadlock >= 0 -> wait for deadlock, then deadlock check, then infinite timeout // iv) timeout >=0, deadlock < timeout -> wait for deadlock, then deadlock check, then wait for (timeout - deadlock) // v) timeout >=0, deadlock >= timeout -> just wait for timeout, no deadlock check if (deadlockTimeout >= 0) { if (actualTimeout < 0) { // infinite wait but perform a deadlock check first deadlockWait = true; actualTimeout = deadlockTimeout; } else if (deadlockTimeout < actualTimeout) { // deadlock wait followed by a timeout wait deadlockWait = true; actualTimeout = deadlockTimeout; // leave timeout as the remaining time timeout -= deadlockTimeout; } } } ActiveLock waitingLock = (ActiveLock) lockItem; lockItem = null; if (deadlockTrace) { // we want to keep a stack trace of this thread just before it goes // into wait state, no need to synchronized because Hashtable.put // is synchronized and the new throwable is local to this thread. lockTraces.put(waitingLock, new Throwable()); } int earlyWakeupCount = 0; long startWaitTime = 0; try {forever: for (;;) { byte wakeupReason = waitingLock.waitForGrant(actualTimeout); ActiveLock nextWaitingLock = null; Object[] deadlockData = null; try { boolean willQuitWait; Enumeration timeoutLockTable = null; long currentTime = 0; synchronized (this) { if (control.isGrantable(control.firstWaiter() == waitingLock, compatabilitySpace, qualifier)) { // Yes, we are granted, put us on the granted queue. control.grant(waitingLock); // Remove from the waiting queue & get next waiter nextWaitingLock = control.getNextWaiter(waitingLock, true, this); // this is where we need to re-obtain the latch, it's // safe to call this lockObject() which will get the // synchronization we already hold, because java allows // nested synchronization and it will be released // automatically if we have to wait if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } return waitingLock; } waitingLock.clearPotentiallyGranted(); // try again later willQuitWait = (wakeupReason != Constants.WAITING_LOCK_GRANT); StandardException deadlockException = null; if (((wakeupReason == Constants.WAITING_LOCK_IN_WAIT) && deadlockWait) || (wakeupReason == Constants.WAITING_LOCK_DEADLOCK)) { // check for a deadlock, even if we were woken up to because // we were selected as a victim we still check because the situation // may have changed. deadlockData = Deadlock.look(factory, this, control, waitingLock, wakeupReason); if (deadlockData == null) { // we don't have a deadlock deadlockWait = false; actualTimeout = timeout; startWaitTime = 0; willQuitWait = false; } else { willQuitWait = true; } } nextWaitingLock = control.getNextWaiter(waitingLock, willQuitWait, this); // If we were not woken by another then we have // timed out. Either deadlock out or timeout if (willQuitWait) { // Even if we deadlocked trying to get the lock, still // reget the latch so that client's need not know // latch was released. if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("DeadlockTrace")) { SanityManager.showTrace(new Throwable()); // The following dumps the lock table as it // exists at the time a timeout is about to // cause a deadlock exception to be thrown. lockDebug = DiagnosticUtil.toDiagString(waitingLock) + "\nGot deadlock/timeout, here's the table" + this.toDebugString(); } } if(!deadlockWait) { if( deadlockTrace ) { // want a copy of the LockTable and the time currentTime = System.currentTimeMillis(); timeoutLockTable = factory.makeVirtualLockTable(); } } } } // synchronized block // need to do this outside of the synchronized block as the // message text building (timeouts and deadlocks) may involve // getting locks to look up table names from identifiers. if (willQuitWait) { if (SanityManager.DEBUG) { if (lockDebug != null) { String type = (deadlockWait ? "deadlock:" : "timeout:"); SanityManager.DEBUG_PRINT( type, "wait on lockitem caused " + type + lockDebug); } } if(!deadlockWait) { if( deadlockTrace ) { //Turn ON derby.locks.deadlockTrace to build the lockTable. throw Timeout.buildException(waitingLock, timeoutLockTable, currentTime); } else { StandardException se = StandardException.newException( SQLState.LOCK_TIMEOUT); throw se; } } if (deadlockData != null) { throw Deadlock.buildException(factory, deadlockData); } } } finally { if (nextWaitingLock != null) { nextWaitingLock.wakeUp(Constants.WAITING_LOCK_GRANT); nextWaitingLock = null; } } if (actualTimeout != C_LockFactory.WAIT_FOREVER) { if (wakeupReason != Constants.WAITING_LOCK_IN_WAIT) earlyWakeupCount++; if (earlyWakeupCount > 5) { long now = System.currentTimeMillis(); if (startWaitTime != 0) { long sleepTime = now - startWaitTime; actualTimeout -= sleepTime; } startWaitTime = now; } } } // for(;;) } finally { if (deadlockTrace) { // I am out of the wait state, either I got my lock or I am the // one who is going to detect the deadlock, don't need the // stack trace anymore. lockTraces.remove(waitingLock); } } }
if (deadlockData != null) { throw Deadlock.buildException(factory, deadlockData); }
public Lock lockObject(Object compatabilitySpace, Lockable ref, Object qualifier, int timeout, Latch latch) throws StandardException { if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("memoryLeakTrace")) { if (size() > 1000) System.out.println("memoryLeakTrace:LockSet: " + size()); } } Control gc; LockControl control; Lock lockItem; String lockDebug = null; synchronized (this) { gc = getControl(ref); if (gc == null) { // object is not locked, can be granted Lock gl = new Lock(compatabilitySpace, ref, qualifier); gl.grant(); put(ref, gl); return gl; } control = gc.getLockControl(); if (control != gc) { put(ref, control); } if (SanityManager.DEBUG) { SanityManager.ASSERT(ref.equals(control.getLockable())); // ASSERT item is in the list if (getControl(control.getLockable()) != control) { SanityManager.THROWASSERT( "lockObject mismatched lock items " + getControl(control.getLockable()) + " " + control); } } lockItem = control.addLock(this, compatabilitySpace, qualifier); if (lockItem.getCount() != 0) { return lockItem; } if (timeout == C_LockFactory.NO_WAIT) { // remove all trace of lock control.giveUpWait(lockItem, this); if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("DeadlockTrace")) { SanityManager.showTrace(new Throwable()); // The following dumps the lock table as it // exists at the time a timeout is about to // cause a deadlock exception to be thrown. lockDebug = DiagnosticUtil.toDiagString(lockItem) + "\nCould not grant lock with zero timeout, here's the table" + this.toDebugString(); } } return null; } // this is where we need to release the latch if (latch != null) unlock(latch, 1); } // synchronized block boolean deadlockWait = false; int actualTimeout; if (timeout == C_LockFactory.WAIT_FOREVER) { // always check for deadlocks as there should not be any deadlockWait = true; if ((actualTimeout = deadlockTimeout) == C_LockFactory.WAIT_FOREVER) actualTimeout = Property.DEADLOCK_TIMEOUT_DEFAULT * 1000; } else { if (timeout == C_LockFactory.TIMED_WAIT) timeout = actualTimeout = waitTimeout; else actualTimeout = timeout; // five posible cases // i) timeout -1, deadlock -1 -> just wait forever, no deadlock check // ii) timeout >= 0, deadlock -1 -> just wait for timeout, no deadlock check // iii) timeout -1, deadlock >= 0 -> wait for deadlock, then deadlock check, then infinite timeout // iv) timeout >=0, deadlock < timeout -> wait for deadlock, then deadlock check, then wait for (timeout - deadlock) // v) timeout >=0, deadlock >= timeout -> just wait for timeout, no deadlock check if (deadlockTimeout >= 0) { if (actualTimeout < 0) { // infinite wait but perform a deadlock check first deadlockWait = true; actualTimeout = deadlockTimeout; } else if (deadlockTimeout < actualTimeout) { // deadlock wait followed by a timeout wait deadlockWait = true; actualTimeout = deadlockTimeout; // leave timeout as the remaining time timeout -= deadlockTimeout; } } } ActiveLock waitingLock = (ActiveLock) lockItem; lockItem = null; if (deadlockTrace) { // we want to keep a stack trace of this thread just before it goes // into wait state, no need to synchronized because Hashtable.put // is synchronized and the new throwable is local to this thread. lockTraces.put(waitingLock, new Throwable()); } int earlyWakeupCount = 0; long startWaitTime = 0; try {forever: for (;;) { byte wakeupReason = waitingLock.waitForGrant(actualTimeout); ActiveLock nextWaitingLock = null; Object[] deadlockData = null; try { boolean willQuitWait; Enumeration timeoutLockTable = null; long currentTime = 0; synchronized (this) { if (control.isGrantable(control.firstWaiter() == waitingLock, compatabilitySpace, qualifier)) { // Yes, we are granted, put us on the granted queue. control.grant(waitingLock); // Remove from the waiting queue & get next waiter nextWaitingLock = control.getNextWaiter(waitingLock, true, this); // this is where we need to re-obtain the latch, it's // safe to call this lockObject() which will get the // synchronization we already hold, because java allows // nested synchronization and it will be released // automatically if we have to wait if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } return waitingLock; } waitingLock.clearPotentiallyGranted(); // try again later willQuitWait = (wakeupReason != Constants.WAITING_LOCK_GRANT); StandardException deadlockException = null; if (((wakeupReason == Constants.WAITING_LOCK_IN_WAIT) && deadlockWait) || (wakeupReason == Constants.WAITING_LOCK_DEADLOCK)) { // check for a deadlock, even if we were woken up to because // we were selected as a victim we still check because the situation // may have changed. deadlockData = Deadlock.look(factory, this, control, waitingLock, wakeupReason); if (deadlockData == null) { // we don't have a deadlock deadlockWait = false; actualTimeout = timeout; startWaitTime = 0; willQuitWait = false; } else { willQuitWait = true; } } nextWaitingLock = control.getNextWaiter(waitingLock, willQuitWait, this); // If we were not woken by another then we have // timed out. Either deadlock out or timeout if (willQuitWait) { // Even if we deadlocked trying to get the lock, still // reget the latch so that client's need not know // latch was released. if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("DeadlockTrace")) { SanityManager.showTrace(new Throwable()); // The following dumps the lock table as it // exists at the time a timeout is about to // cause a deadlock exception to be thrown. lockDebug = DiagnosticUtil.toDiagString(waitingLock) + "\nGot deadlock/timeout, here's the table" + this.toDebugString(); } } if(!deadlockWait) { if( deadlockTrace ) { // want a copy of the LockTable and the time currentTime = System.currentTimeMillis(); timeoutLockTable = factory.makeVirtualLockTable(); } } } } // synchronized block // need to do this outside of the synchronized block as the // message text building (timeouts and deadlocks) may involve // getting locks to look up table names from identifiers. if (willQuitWait) { if (SanityManager.DEBUG) { if (lockDebug != null) { String type = (deadlockWait ? "deadlock:" : "timeout:"); SanityManager.DEBUG_PRINT( type, "wait on lockitem caused " + type + lockDebug); } } if(!deadlockWait) { if( deadlockTrace ) { //Turn ON derby.locks.deadlockTrace to build the lockTable. throw Timeout.buildException(waitingLock, timeoutLockTable, currentTime); } else { StandardException se = StandardException.newException( SQLState.LOCK_TIMEOUT); throw se; } } if (deadlockData != null) { throw Deadlock.buildException(factory, deadlockData); } } } finally { if (nextWaitingLock != null) { nextWaitingLock.wakeUp(Constants.WAITING_LOCK_GRANT); nextWaitingLock = null; } } if (actualTimeout != C_LockFactory.WAIT_FOREVER) { if (wakeupReason != Constants.WAITING_LOCK_IN_WAIT) earlyWakeupCount++; if (earlyWakeupCount > 5) { long now = System.currentTimeMillis(); if (startWaitTime != 0) { long sleepTime = now - startWaitTime; actualTimeout -= sleepTime; } startWaitTime = now; } } } // for(;;) } finally { if (deadlockTrace) { // I am out of the wait state, either I got my lock or I am the // one who is going to detect the deadlock, don't need the // stack trace anymore. lockTraces.remove(waitingLock); } } }
} finally { if (nextWaitingLock != null) { nextWaitingLock.wakeUp(Constants.WAITING_LOCK_GRANT); nextWaitingLock = null; } } if (actualTimeout != C_LockFactory.WAIT_FOREVER) { if (wakeupReason != Constants.WAITING_LOCK_IN_WAIT) earlyWakeupCount++; if (earlyWakeupCount > 5) { long now = System.currentTimeMillis(); if (startWaitTime != 0) { long sleepTime = now - startWaitTime; actualTimeout -= sleepTime; } startWaitTime = now; } } } } finally { if (deadlockTrace) { lockTraces.remove(waitingLock); }
if (actualTimeout != C_LockFactory.WAIT_FOREVER) { if (wakeupReason != Constants.WAITING_LOCK_IN_WAIT) earlyWakeupCount++; if (earlyWakeupCount > 5) { long now = System.currentTimeMillis(); if (startWaitTime != 0) { long sleepTime = now - startWaitTime; actualTimeout -= sleepTime; } startWaitTime = now; } } } } finally { if (deadlockTrace) { lockTraces.remove(waitingLock); } }
public Lock lockObject(Object compatabilitySpace, Lockable ref, Object qualifier, int timeout, Latch latch) throws StandardException { if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("memoryLeakTrace")) { if (size() > 1000) System.out.println("memoryLeakTrace:LockSet: " + size()); } } Control gc; LockControl control; Lock lockItem; String lockDebug = null; synchronized (this) { gc = getControl(ref); if (gc == null) { // object is not locked, can be granted Lock gl = new Lock(compatabilitySpace, ref, qualifier); gl.grant(); put(ref, gl); return gl; } control = gc.getLockControl(); if (control != gc) { put(ref, control); } if (SanityManager.DEBUG) { SanityManager.ASSERT(ref.equals(control.getLockable())); // ASSERT item is in the list if (getControl(control.getLockable()) != control) { SanityManager.THROWASSERT( "lockObject mismatched lock items " + getControl(control.getLockable()) + " " + control); } } lockItem = control.addLock(this, compatabilitySpace, qualifier); if (lockItem.getCount() != 0) { return lockItem; } if (timeout == C_LockFactory.NO_WAIT) { // remove all trace of lock control.giveUpWait(lockItem, this); if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("DeadlockTrace")) { SanityManager.showTrace(new Throwable()); // The following dumps the lock table as it // exists at the time a timeout is about to // cause a deadlock exception to be thrown. lockDebug = DiagnosticUtil.toDiagString(lockItem) + "\nCould not grant lock with zero timeout, here's the table" + this.toDebugString(); } } return null; } // this is where we need to release the latch if (latch != null) unlock(latch, 1); } // synchronized block boolean deadlockWait = false; int actualTimeout; if (timeout == C_LockFactory.WAIT_FOREVER) { // always check for deadlocks as there should not be any deadlockWait = true; if ((actualTimeout = deadlockTimeout) == C_LockFactory.WAIT_FOREVER) actualTimeout = Property.DEADLOCK_TIMEOUT_DEFAULT * 1000; } else { if (timeout == C_LockFactory.TIMED_WAIT) timeout = actualTimeout = waitTimeout; else actualTimeout = timeout; // five posible cases // i) timeout -1, deadlock -1 -> just wait forever, no deadlock check // ii) timeout >= 0, deadlock -1 -> just wait for timeout, no deadlock check // iii) timeout -1, deadlock >= 0 -> wait for deadlock, then deadlock check, then infinite timeout // iv) timeout >=0, deadlock < timeout -> wait for deadlock, then deadlock check, then wait for (timeout - deadlock) // v) timeout >=0, deadlock >= timeout -> just wait for timeout, no deadlock check if (deadlockTimeout >= 0) { if (actualTimeout < 0) { // infinite wait but perform a deadlock check first deadlockWait = true; actualTimeout = deadlockTimeout; } else if (deadlockTimeout < actualTimeout) { // deadlock wait followed by a timeout wait deadlockWait = true; actualTimeout = deadlockTimeout; // leave timeout as the remaining time timeout -= deadlockTimeout; } } } ActiveLock waitingLock = (ActiveLock) lockItem; lockItem = null; if (deadlockTrace) { // we want to keep a stack trace of this thread just before it goes // into wait state, no need to synchronized because Hashtable.put // is synchronized and the new throwable is local to this thread. lockTraces.put(waitingLock, new Throwable()); } int earlyWakeupCount = 0; long startWaitTime = 0; try {forever: for (;;) { byte wakeupReason = waitingLock.waitForGrant(actualTimeout); ActiveLock nextWaitingLock = null; Object[] deadlockData = null; try { boolean willQuitWait; Enumeration timeoutLockTable = null; long currentTime = 0; synchronized (this) { if (control.isGrantable(control.firstWaiter() == waitingLock, compatabilitySpace, qualifier)) { // Yes, we are granted, put us on the granted queue. control.grant(waitingLock); // Remove from the waiting queue & get next waiter nextWaitingLock = control.getNextWaiter(waitingLock, true, this); // this is where we need to re-obtain the latch, it's // safe to call this lockObject() which will get the // synchronization we already hold, because java allows // nested synchronization and it will be released // automatically if we have to wait if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } return waitingLock; } waitingLock.clearPotentiallyGranted(); // try again later willQuitWait = (wakeupReason != Constants.WAITING_LOCK_GRANT); StandardException deadlockException = null; if (((wakeupReason == Constants.WAITING_LOCK_IN_WAIT) && deadlockWait) || (wakeupReason == Constants.WAITING_LOCK_DEADLOCK)) { // check for a deadlock, even if we were woken up to because // we were selected as a victim we still check because the situation // may have changed. deadlockData = Deadlock.look(factory, this, control, waitingLock, wakeupReason); if (deadlockData == null) { // we don't have a deadlock deadlockWait = false; actualTimeout = timeout; startWaitTime = 0; willQuitWait = false; } else { willQuitWait = true; } } nextWaitingLock = control.getNextWaiter(waitingLock, willQuitWait, this); // If we were not woken by another then we have // timed out. Either deadlock out or timeout if (willQuitWait) { // Even if we deadlocked trying to get the lock, still // reget the latch so that client's need not know // latch was released. if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("DeadlockTrace")) { SanityManager.showTrace(new Throwable()); // The following dumps the lock table as it // exists at the time a timeout is about to // cause a deadlock exception to be thrown. lockDebug = DiagnosticUtil.toDiagString(waitingLock) + "\nGot deadlock/timeout, here's the table" + this.toDebugString(); } } if(!deadlockWait) { if( deadlockTrace ) { // want a copy of the LockTable and the time currentTime = System.currentTimeMillis(); timeoutLockTable = factory.makeVirtualLockTable(); } } } } // synchronized block // need to do this outside of the synchronized block as the // message text building (timeouts and deadlocks) may involve // getting locks to look up table names from identifiers. if (willQuitWait) { if (SanityManager.DEBUG) { if (lockDebug != null) { String type = (deadlockWait ? "deadlock:" : "timeout:"); SanityManager.DEBUG_PRINT( type, "wait on lockitem caused " + type + lockDebug); } } if(!deadlockWait) { if( deadlockTrace ) { //Turn ON derby.locks.deadlockTrace to build the lockTable. throw Timeout.buildException(waitingLock, timeoutLockTable, currentTime); } else { StandardException se = StandardException.newException( SQLState.LOCK_TIMEOUT); throw se; } } if (deadlockData != null) { throw Deadlock.buildException(factory, deadlockData); } } } finally { if (nextWaitingLock != null) { nextWaitingLock.wakeUp(Constants.WAITING_LOCK_GRANT); nextWaitingLock = null; } } if (actualTimeout != C_LockFactory.WAIT_FOREVER) { if (wakeupReason != Constants.WAITING_LOCK_IN_WAIT) earlyWakeupCount++; if (earlyWakeupCount > 5) { long now = System.currentTimeMillis(); if (startWaitTime != 0) { long sleepTime = now - startWaitTime; actualTimeout -= sleepTime; } startWaitTime = now; } } } // for(;;) } finally { if (deadlockTrace) { // I am out of the wait state, either I got my lock or I am the // one who is going to detect the deadlock, don't need the // stack trace anymore. lockTraces.remove(waitingLock); } } }
}
public Lock lockObject(Object compatabilitySpace, Lockable ref, Object qualifier, int timeout, Latch latch) throws StandardException { if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("memoryLeakTrace")) { if (size() > 1000) System.out.println("memoryLeakTrace:LockSet: " + size()); } } Control gc; LockControl control; Lock lockItem; String lockDebug = null; synchronized (this) { gc = getControl(ref); if (gc == null) { // object is not locked, can be granted Lock gl = new Lock(compatabilitySpace, ref, qualifier); gl.grant(); put(ref, gl); return gl; } control = gc.getLockControl(); if (control != gc) { put(ref, control); } if (SanityManager.DEBUG) { SanityManager.ASSERT(ref.equals(control.getLockable())); // ASSERT item is in the list if (getControl(control.getLockable()) != control) { SanityManager.THROWASSERT( "lockObject mismatched lock items " + getControl(control.getLockable()) + " " + control); } } lockItem = control.addLock(this, compatabilitySpace, qualifier); if (lockItem.getCount() != 0) { return lockItem; } if (timeout == C_LockFactory.NO_WAIT) { // remove all trace of lock control.giveUpWait(lockItem, this); if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("DeadlockTrace")) { SanityManager.showTrace(new Throwable()); // The following dumps the lock table as it // exists at the time a timeout is about to // cause a deadlock exception to be thrown. lockDebug = DiagnosticUtil.toDiagString(lockItem) + "\nCould not grant lock with zero timeout, here's the table" + this.toDebugString(); } } return null; } // this is where we need to release the latch if (latch != null) unlock(latch, 1); } // synchronized block boolean deadlockWait = false; int actualTimeout; if (timeout == C_LockFactory.WAIT_FOREVER) { // always check for deadlocks as there should not be any deadlockWait = true; if ((actualTimeout = deadlockTimeout) == C_LockFactory.WAIT_FOREVER) actualTimeout = Property.DEADLOCK_TIMEOUT_DEFAULT * 1000; } else { if (timeout == C_LockFactory.TIMED_WAIT) timeout = actualTimeout = waitTimeout; else actualTimeout = timeout; // five posible cases // i) timeout -1, deadlock -1 -> just wait forever, no deadlock check // ii) timeout >= 0, deadlock -1 -> just wait for timeout, no deadlock check // iii) timeout -1, deadlock >= 0 -> wait for deadlock, then deadlock check, then infinite timeout // iv) timeout >=0, deadlock < timeout -> wait for deadlock, then deadlock check, then wait for (timeout - deadlock) // v) timeout >=0, deadlock >= timeout -> just wait for timeout, no deadlock check if (deadlockTimeout >= 0) { if (actualTimeout < 0) { // infinite wait but perform a deadlock check first deadlockWait = true; actualTimeout = deadlockTimeout; } else if (deadlockTimeout < actualTimeout) { // deadlock wait followed by a timeout wait deadlockWait = true; actualTimeout = deadlockTimeout; // leave timeout as the remaining time timeout -= deadlockTimeout; } } } ActiveLock waitingLock = (ActiveLock) lockItem; lockItem = null; if (deadlockTrace) { // we want to keep a stack trace of this thread just before it goes // into wait state, no need to synchronized because Hashtable.put // is synchronized and the new throwable is local to this thread. lockTraces.put(waitingLock, new Throwable()); } int earlyWakeupCount = 0; long startWaitTime = 0; try {forever: for (;;) { byte wakeupReason = waitingLock.waitForGrant(actualTimeout); ActiveLock nextWaitingLock = null; Object[] deadlockData = null; try { boolean willQuitWait; Enumeration timeoutLockTable = null; long currentTime = 0; synchronized (this) { if (control.isGrantable(control.firstWaiter() == waitingLock, compatabilitySpace, qualifier)) { // Yes, we are granted, put us on the granted queue. control.grant(waitingLock); // Remove from the waiting queue & get next waiter nextWaitingLock = control.getNextWaiter(waitingLock, true, this); // this is where we need to re-obtain the latch, it's // safe to call this lockObject() which will get the // synchronization we already hold, because java allows // nested synchronization and it will be released // automatically if we have to wait if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } return waitingLock; } waitingLock.clearPotentiallyGranted(); // try again later willQuitWait = (wakeupReason != Constants.WAITING_LOCK_GRANT); StandardException deadlockException = null; if (((wakeupReason == Constants.WAITING_LOCK_IN_WAIT) && deadlockWait) || (wakeupReason == Constants.WAITING_LOCK_DEADLOCK)) { // check for a deadlock, even if we were woken up to because // we were selected as a victim we still check because the situation // may have changed. deadlockData = Deadlock.look(factory, this, control, waitingLock, wakeupReason); if (deadlockData == null) { // we don't have a deadlock deadlockWait = false; actualTimeout = timeout; startWaitTime = 0; willQuitWait = false; } else { willQuitWait = true; } } nextWaitingLock = control.getNextWaiter(waitingLock, willQuitWait, this); // If we were not woken by another then we have // timed out. Either deadlock out or timeout if (willQuitWait) { // Even if we deadlocked trying to get the lock, still // reget the latch so that client's need not know // latch was released. if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("DeadlockTrace")) { SanityManager.showTrace(new Throwable()); // The following dumps the lock table as it // exists at the time a timeout is about to // cause a deadlock exception to be thrown. lockDebug = DiagnosticUtil.toDiagString(waitingLock) + "\nGot deadlock/timeout, here's the table" + this.toDebugString(); } } if(!deadlockWait) { if( deadlockTrace ) { // want a copy of the LockTable and the time currentTime = System.currentTimeMillis(); timeoutLockTable = factory.makeVirtualLockTable(); } } } } // synchronized block // need to do this outside of the synchronized block as the // message text building (timeouts and deadlocks) may involve // getting locks to look up table names from identifiers. if (willQuitWait) { if (SanityManager.DEBUG) { if (lockDebug != null) { String type = (deadlockWait ? "deadlock:" : "timeout:"); SanityManager.DEBUG_PRINT( type, "wait on lockitem caused " + type + lockDebug); } } if(!deadlockWait) { if( deadlockTrace ) { //Turn ON derby.locks.deadlockTrace to build the lockTable. throw Timeout.buildException(waitingLock, timeoutLockTable, currentTime); } else { StandardException se = StandardException.newException( SQLState.LOCK_TIMEOUT); throw se; } } if (deadlockData != null) { throw Deadlock.buildException(factory, deadlockData); } } } finally { if (nextWaitingLock != null) { nextWaitingLock.wakeUp(Constants.WAITING_LOCK_GRANT); nextWaitingLock = null; } } if (actualTimeout != C_LockFactory.WAIT_FOREVER) { if (wakeupReason != Constants.WAITING_LOCK_IN_WAIT) earlyWakeupCount++; if (earlyWakeupCount > 5) { long now = System.currentTimeMillis(); if (startWaitTime != 0) { long sleepTime = now - startWaitTime; actualTimeout -= sleepTime; } startWaitTime = now; } } } // for(;;) } finally { if (deadlockTrace) { // I am out of the wait state, either I got my lock or I am the // one who is going to detect the deadlock, don't need the // stack trace anymore. lockTraces.remove(waitingLock); } } }
}
void setDeadlockTrace(boolean val) { // set this without synchronization deadlockTrace = val; if (val && lockTraces == null) lockTraces = new Hashtable(); else if (!val && lockTraces != null) { lockTraces = null; } }
fileData.seek(pageOffset);
backupRaf.seek(pageOffset);
private void writeToBackup(RandomAccessFile backupRaf, long pageNumber, byte[] pageData) throws StandardException { byte[] dataToWrite; try { if (pageNumber == FIRST_ALLOC_PAGE_NUMBER) { // write header into the alloc page array regardless of dirty // bit because the alloc page have zero'ed out the borrowed // space writeHeader(pageData); if (SanityManager.DEBUG) { if (FormatIdUtil.readFormatIdInteger(pageData) != AllocPage.FORMAT_NUMBER) SanityManager.THROWASSERT( "expect " + AllocPage.FORMAT_NUMBER + "got " + FormatIdUtil.readFormatIdInteger(pageData)); } } if (dataFactory.databaseEncrypted() && pageNumber != FIRST_ALLOC_PAGE_NUMBER) { // We cannot encrypt the page in place because pageData is // still being accessed as clear text. The encryption // buffer is shared by all who access this container and can // only be used within the synchronized block. dataToWrite = encryptPage(pageData, pageSize); } else { dataToWrite = pageData; } long pageOffset = pageNumber * pageSize; fileData.seek(pageOffset); backupRaf.write(dataToWrite, 0, pageSize); } catch (IOException ioe) { // page cannot be written to the backup throw StandardException.newException( SQLState.FILE_WRITE_PAGE_EXCEPTION, ioe, getIdentity() + ":" + pageNumber); } }
xar.start(xid, XAResource.TMJOIN); printState("1st global(existing)", conn); xar.end(xid, XAResource.TMSUCCESS); printState("local", conn);
private void testSetIsolationWithStatement(Statement s, XAResource xar, Connection conn) throws SQLException, XAException { Xid xid; System.out.println("Issue SQL to change isolation in local transaction"); s.executeUpdate("set current isolation = RR"); printState("SQL to change isolation in local", conn); xid = new cdsXid(1, (byte) 35, (byte) 47); xar.start(xid, XAResource.TMNOFLAGS); printState("1st global(new)", conn); xar.end(xid, XAResource.TMSUCCESS); printState("local", conn); System.out.println("Issue SQL to change isolation in local transaction"); s.executeUpdate("set current isolation = RS"); printState("SQL to change isolation in local", conn); Xid xid2 = new cdsXid(1, (byte) 93, (byte) 103); xar.start(xid2, XAResource.TMNOFLAGS); printState("2nd global(new)", conn); xar.end(xid2, XAResource.TMSUCCESS); xar.start(xid, XAResource.TMJOIN); printState("1st global(existing)", conn); xar.end(xid, XAResource.TMSUCCESS); printState("local", conn); xar.start(xid, XAResource.TMJOIN); printState("1st global(existing)", conn); System.out.println("Issue SQL to change isolation in 1st global transaction"); s.executeUpdate("set current isolation = UR"); printState("change isolation of existing 1st global transaction", conn); xar.end(xid, XAResource.TMSUCCESS); printState("local", conn); xar.start(xid2, XAResource.TMJOIN); printState("2nd global(existing)", conn); xar.end(xid2, XAResource.TMSUCCESS); xar.rollback(xid2); printState("(After 2nd global rollback) local", conn); xar.rollback(xid); printState("(After 1st global rollback) local", conn); }
conn.setAutoCommit(false); testHoldability(conn,ResultSet.HOLD_CURSORS_OVER_COMMIT); testHoldability(conn,ResultSet.CLOSE_CURSORS_AT_COMMIT);
conn.setAutoCommit(false); if(HAVE_DRIVER_MANAGER_CLASS){ testHoldability(conn,ResultSet.HOLD_CURSORS_OVER_COMMIT); testHoldability(conn,ResultSet.CLOSE_CURSORS_AT_COMMIT); }
public static void main (String args[]) { try { /* Load the JDBC Driver class */ // use the ij utility to read the property file and // make the initial connection. ij.getPropertyArg(args); Connection conn = ij.startJBMS(); createAndPopulateTable(conn); //set autocommit to off after creating table and inserting data conn.setAutoCommit(false); testHoldability(conn,ResultSet.HOLD_CURSORS_OVER_COMMIT); testHoldability(conn,ResultSet.CLOSE_CURSORS_AT_COMMIT); testHoldCursorOnMultiTableQuery(conn); testIsolationLevelChange(conn); conn.rollback(); conn.setAutoCommit(true); Statement stmt = conn.createStatement(); TestUtil.cleanUpTest(stmt, databaseObjects); conn.close(); } catch (Exception e) { System.out.println("FAIL -- unexpected exception "+e); JDBCDisplayUtil.ShowException(System.out, e); e.printStackTrace(); } }
Connection conn_main=null; PreparedStatement ps_main=null;
Connection conn_main = null; PreparedStatement ps_main = null; stmtIsClosed = false;
void startClientTestMethods() { Connection conn_main=null; PreparedStatement ps_main=null; try { Class.forName("org.apache.derby.jdbc.ClientDriver"); conn_main = DriverManager.getConnection("jdbc:derby:" + "//localhost:1527/mydb;" + "create=true"); ps_main = conn_main.prepareStatement("select count(*) from " + "sys.systables"); conn = conn_main; ps = ps_main; t_setRowId(); t_setNString(); t_setNCharacterStream(); t_setNClob1(); t_setClob(); t_setBlob(); t_setNClob2(); t_setSQLXML(); t_setPoolable(); t_isPoolable(); } catch(SQLException sqle) { sqle.printStackTrace(); } catch(ClassNotFoundException cnfe) { cnfe.printStackTrace(); } finally { try { conn_main.close(); } catch(SQLException sqle){ sqle.printStackTrace(); } } }
t_setPoolable();
void startClientTestMethods() { Connection conn_main=null; PreparedStatement ps_main=null; try { Class.forName("org.apache.derby.jdbc.ClientDriver"); conn_main = DriverManager.getConnection("jdbc:derby:" + "//localhost:1527/mydb;" + "create=true"); ps_main = conn_main.prepareStatement("select count(*) from " + "sys.systables"); conn = conn_main; ps = ps_main; t_setRowId(); t_setNString(); t_setNCharacterStream(); t_setNClob1(); t_setClob(); t_setBlob(); t_setNClob2(); t_setSQLXML(); t_setPoolable(); t_isPoolable(); } catch(SQLException sqle) { sqle.printStackTrace(); } catch(ClassNotFoundException cnfe) { cnfe.printStackTrace(); } finally { try { conn_main.close(); } catch(SQLException sqle){ sqle.printStackTrace(); } } }
Connection conn_main=null; PreparedStatement ps_main=null;
Connection conn_main = null; PreparedStatement ps_main = null; stmtIsClosed = false;
void startEmbeddedTestMethods() { Connection conn_main=null; PreparedStatement ps_main=null; try { Class.forName("org.apache.derby.jdbc.EmbeddedDriver"); conn_main = DriverManager.getConnection("jdbc:derby:mydb1;" + "create=true"); Statement s = conn_main.createStatement(); s.execute("create table clobtable3 (n int,clobcol CLOB)"); File file = new File("extin/short.txt"); int fileLength = (int) file.length(); InputStream fin = new FileInputStream(file); PreparedStatement ps = conn_main.prepareStatement("INSERT INTO " + "clobtable3 " + "VALUES (?, ?)"); ps.setInt(1, 1000); ps.setAsciiStream(2, fin, fileLength); ps.execute(); Statement s1 = conn_main.createStatement(); s1.execute("create table blobtable3 (n int,blobcol BLOB)"); File file1 = new File("extin/short.txt"); int fileLength1 = (int) file1.length(); InputStream fin1 = new FileInputStream(file1); PreparedStatement ps1 = conn_main.prepareStatement("INSERT INTO " + "blobtable3 " + "VALUES (?, ?)"); ps1.setInt(1, 1000); ps1.setBinaryStream(2, fin1, fileLength1); ps1.execute(); conn_main.commit(); t_Clob_setMethods_Embedded(conn_main); t_Blob_setMethods_Embedded(conn_main); ps_main = conn_main.prepareStatement("select count(*) from " + "sys.systables"); conn = conn_main; ps = ps_main; t_setRowId(); t_setNString(); t_setNCharacterStream(); t_setNClob1(); t_setNClob2(); t_setSQLXML(); t_setPoolable(); t_isPoolable(); } catch(SQLException sqle) { sqle.printStackTrace(); } catch(ClassNotFoundException cnfe) { cnfe.printStackTrace(); } catch(FileNotFoundException fnfe) { fnfe.printStackTrace(); } catch(IOException ioe) { ioe.printStackTrace(); } finally { try { conn_main.close(); } catch(SQLException sqle){ sqle.printStackTrace(); } } }
PreparedStatement ps = conn_main.prepareStatement("INSERT INTO " +
ps = conn_main.prepareStatement("INSERT INTO " +
void startEmbeddedTestMethods() { Connection conn_main=null; PreparedStatement ps_main=null; try { Class.forName("org.apache.derby.jdbc.EmbeddedDriver"); conn_main = DriverManager.getConnection("jdbc:derby:mydb1;" + "create=true"); Statement s = conn_main.createStatement(); s.execute("create table clobtable3 (n int,clobcol CLOB)"); File file = new File("extin/short.txt"); int fileLength = (int) file.length(); InputStream fin = new FileInputStream(file); PreparedStatement ps = conn_main.prepareStatement("INSERT INTO " + "clobtable3 " + "VALUES (?, ?)"); ps.setInt(1, 1000); ps.setAsciiStream(2, fin, fileLength); ps.execute(); Statement s1 = conn_main.createStatement(); s1.execute("create table blobtable3 (n int,blobcol BLOB)"); File file1 = new File("extin/short.txt"); int fileLength1 = (int) file1.length(); InputStream fin1 = new FileInputStream(file1); PreparedStatement ps1 = conn_main.prepareStatement("INSERT INTO " + "blobtable3 " + "VALUES (?, ?)"); ps1.setInt(1, 1000); ps1.setBinaryStream(2, fin1, fileLength1); ps1.execute(); conn_main.commit(); t_Clob_setMethods_Embedded(conn_main); t_Blob_setMethods_Embedded(conn_main); ps_main = conn_main.prepareStatement("select count(*) from " + "sys.systables"); conn = conn_main; ps = ps_main; t_setRowId(); t_setNString(); t_setNCharacterStream(); t_setNClob1(); t_setNClob2(); t_setSQLXML(); t_setPoolable(); t_isPoolable(); } catch(SQLException sqle) { sqle.printStackTrace(); } catch(ClassNotFoundException cnfe) { cnfe.printStackTrace(); } catch(FileNotFoundException fnfe) { fnfe.printStackTrace(); } catch(IOException ioe) { ioe.printStackTrace(); } finally { try { conn_main.close(); } catch(SQLException sqle){ sqle.printStackTrace(); } } }
t_setPoolable();
void startEmbeddedTestMethods() { Connection conn_main=null; PreparedStatement ps_main=null; try { Class.forName("org.apache.derby.jdbc.EmbeddedDriver"); conn_main = DriverManager.getConnection("jdbc:derby:mydb1;" + "create=true"); Statement s = conn_main.createStatement(); s.execute("create table clobtable3 (n int,clobcol CLOB)"); File file = new File("extin/short.txt"); int fileLength = (int) file.length(); InputStream fin = new FileInputStream(file); PreparedStatement ps = conn_main.prepareStatement("INSERT INTO " + "clobtable3 " + "VALUES (?, ?)"); ps.setInt(1, 1000); ps.setAsciiStream(2, fin, fileLength); ps.execute(); Statement s1 = conn_main.createStatement(); s1.execute("create table blobtable3 (n int,blobcol BLOB)"); File file1 = new File("extin/short.txt"); int fileLength1 = (int) file1.length(); InputStream fin1 = new FileInputStream(file1); PreparedStatement ps1 = conn_main.prepareStatement("INSERT INTO " + "blobtable3 " + "VALUES (?, ?)"); ps1.setInt(1, 1000); ps1.setBinaryStream(2, fin1, fileLength1); ps1.execute(); conn_main.commit(); t_Clob_setMethods_Embedded(conn_main); t_Blob_setMethods_Embedded(conn_main); ps_main = conn_main.prepareStatement("select count(*) from " + "sys.systables"); conn = conn_main; ps = ps_main; t_setRowId(); t_setNString(); t_setNCharacterStream(); t_setNClob1(); t_setNClob2(); t_setSQLXML(); t_setPoolable(); t_isPoolable(); } catch(SQLException sqle) { sqle.printStackTrace(); } catch(ClassNotFoundException cnfe) { cnfe.printStackTrace(); } catch(FileNotFoundException fnfe) { fnfe.printStackTrace(); } catch(IOException ioe) { ioe.printStackTrace(); } finally { try { conn_main.close(); } catch(SQLException sqle){ sqle.printStackTrace(); } } }
boolean b; b = ps.isPoolable(); System.out.println("UnImplemented Exception not thrown in code"); } catch(SQLException e) { if(SQLState.NOT_IMPLEMENTED.equals (e.getSQLState())) { System.out.println("Unexpected SQLException"+e);
if (!ps.isPoolable()) System.out.println("Expected a poolable statement"); } catch(SQLException sqle) { if (ExceptionUtil.getSQLStateFromIdentifier( SQLState.ALREADY_CLOSED).equals(sqle.getSQLState()) || stmtIsClosed) { } else { System.out.println("Unexpected SQLException " + sqle); sqle.printStackTrace();
void t_isPoolable() { try { boolean b; b = ps.isPoolable(); System.out.println("UnImplemented Exception not thrown in code"); } catch(SQLException e) { if(SQLState.NOT_IMPLEMENTED.equals (e.getSQLState())) { System.out.println("Unexpected SQLException"+e); } } catch(Exception e) { System.out.println("Unexpected exception thrown in method"+e); e.printStackTrace(); } }
System.out.println("UnImplemented Exception not thrown in code"); } catch(SQLException e) { if(SQLState.NOT_IMPLEMENTED.equals (e.getSQLState())) { System.out.println("Unexpected SQLException"+e);
if (ps.isPoolable()) System.out.println("Expected a non-poolable statement"); ps.setPoolable(true); if (!ps.isPoolable()) System.out.println("Expected a poolable statement"); } catch(SQLException sqle) { if (ExceptionUtil.getSQLStateFromIdentifier( SQLState.ALREADY_CLOSED).equals(sqle.getSQLState()) || stmtIsClosed) { } else { System.out.println("Unexpected SQLException " + sqle); sqle.printStackTrace();
void t_setPoolable() { try { ps.setPoolable(false); System.out.println("UnImplemented Exception not thrown in code"); } catch(SQLException e) { if(SQLState.NOT_IMPLEMENTED.equals (e.getSQLState())) { System.out.println("Unexpected SQLException"+e); } } catch(Exception e) { System.out.println("Unexpected exception thrown in method"+e); e.printStackTrace(); } }
if (realConnection != null) SanityManager.ASSERT(realConnection.transactionIsIdle(),
if (con.realConnection != null) SanityManager.ASSERT(con.realConnection.transactionIsIdle(),
public final synchronized int prepare(Xid xid) throws XAException { checkXAActive(); // ensure immtable and correct equals method. XAXactId xid_im = new XAXactId(xid); XATransactionState tranState = getTransactionState(xid_im); if (tranState == null) { XAResourceManager rm = ra.getXAResourceManager(); ContextManager inDoubtCM = rm.find(xid); // RM also does not know about this xid. if (inDoubtCM == null) throw new XAException(XAException.XAER_NOTA); // cannot prepare in doubt transactions throw new XAException(XAException.XAER_PROTO); } synchronized (tranState) { checkUserCredentials(tranState.creatingResource); // Check the transaction is no associated with // any XAResource. switch (tranState.associationState) { case XATransactionState.T0_NOT_ASSOCIATED: break; case XATransactionState.TRO_FAIL: throw new XAException(tranState.rollbackOnlyCode); default: throw new XAException(XAException.XAER_PROTO); } if (tranState.suspendedList != null && tranState.suspendedList.size() != 0) throw new XAException(XAException.XAER_PROTO); if (tranState.isPrepared) throw new XAException(XAException.XAER_PROTO); EmbedConnection conn = tranState.conn; try { int ret = conn.xa_prepare(); if (ret == XATransactionController.XA_OK) { tranState.isPrepared = true; return XAResource.XA_OK; } else { returnConnectionToResource(tranState, xid_im); if (SanityManager.DEBUG) { if (realConnection != null) SanityManager.ASSERT(realConnection.transactionIsIdle(), "real connection should have been idle at this point"); } return XAResource.XA_RDONLY; } } catch (SQLException sqle) { throw wrapInXAException(sqle); } } }
conn.rollback();
public static void main (String args[]) { try { System.out.println("prepStmt Test Starts"); ij.getPropertyArg(args); conn = ij.startJBMS(); if (conn == null) { System.out.println("conn didn't work"); return; } Statement cleanstmt = conn.createStatement(); TestUtil.cleanUpTest(cleanstmt, testObjects); PreparedStatement ps; ResultSet rs; boolean hasResultSet; int uc; // executeUpdate() without parameters System.out.println("executeUpdate() without parameters"); ps = conn.prepareStatement("create table t1(c1 int, c2 int, c3 int)"); uc = ps.executeUpdate(); System.out.println("Update count is: " + uc); // executeUpdate() with parameters System.out.println("executeUpdate() with parameters"); ps = conn.prepareStatement("insert into t1 values (?, 5, ?)"); ps.setInt(1, 99); ps.setInt(2, 9); uc = ps.executeUpdate(); System.out.println("Update count is: " + uc); // execute() with parameters, no result set returned System.out.println("execute() with parameters, no result set returned"); ps = conn.prepareStatement("insert into t1 values (2, 6, ?), (?, 5, 8)"); ps.setInt(1, 10); ps.setInt(2, 7); hasResultSet = ps.execute(); while (hasResultSet) { rs = ps.getResultSet(); while (rs.next()) System.out.println("ERROR: should not get here!"); hasResultSet = ps.getMoreResults(); } uc = ps.getUpdateCount(); if (uc != -1) System.out.println("Update count is: " + uc); // executeQuery() without parameters System.out.println("executQuery() without parameters"); ps = conn.prepareStatement("select * from t1"); rs = ps.executeQuery(); while (rs.next()) System.out.println("got row: "+" "+rs.getInt(1)+" "+rs.getInt(2)+" "+rs.getInt(3)); System.out.println("end of rows"); // executeQuery() with parameters System.out.println("executQuery() with parameters"); ps = conn.prepareStatement("select * from t1 where c2 = ?"); ps.setInt(1, 5); rs = ps.executeQuery(); while (rs.next()) System.out.println("got row: "+" "+rs.getInt(1)+" "+rs.getInt(2)+" "+rs.getInt(3)); System.out.println("end of rows"); // execute() with parameters, with result set returned System.out.println("execute() with parameters with result set returned"); ps = conn.prepareStatement("select * from t1 where c2 = ?"); ps.setInt(1, 5); hasResultSet = ps.execute(); while (hasResultSet) { rs = ps.getResultSet(); while (rs.next()) System.out.println("got row: "+" "+rs.getInt(1)+" "+rs.getInt(2)+" "+rs.getInt(3)); hasResultSet = ps.getMoreResults(); } System.out.println("end of rows"); uc = ps.getUpdateCount(); if (uc != -1) System.out.println("Update count is: " + uc); // test different data types for input parameters of a PreparedStatement System.out.println("test different data types for input parameters of a Prepared Statement"); ps = conn.prepareStatement("create table t2(si smallint,i int, bi bigint, r real, f float, d double precision, n5_2 numeric(5,2), dec10_3 decimal(10,3), ch20 char(20),vc varchar(20), lvc long varchar,b20 char(23) for bit data, vb varchar(23) for bit data, lvb long varchar for bit data, dt date, tm time, ts timestamp not null)"); uc = ps.executeUpdate(); System.out.println("Update count is: " + uc); // byte array for binary values. byte[] ba = new byte[] {0x00,0x1,0x2,0x3,0x4,0x5,0x6,0x7,0x8,0x9,0xa,0xb,0xc, 0xd,0xe,0xf,0x10,0x11,0x12,0x13 }; ps = conn.prepareStatement("insert into t2 values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ,? , ?)"); ps.setShort(1, (short) 1); ps.setInt(2, 2); ps.setLong(3, 3); ps.setFloat(4, (float) 4.0); ps.setDouble(5, 5.0); ps.setDouble(6, 6.0); ps.setBigDecimal(7, new BigDecimal("77.77")); ps.setBigDecimal(8, new BigDecimal("8.1")); ps.setString(9, "column9string"); byte[] c10ba = new String("column10vcstring").getBytes("UTF-8"); int len = c10ba.length; ps.setAsciiStream(10, new ByteArrayInputStream(c10ba), len); byte[] c11ba = new String("column11lvcstring").getBytes("UTF-8"); len = c11ba.length; ps.setCharacterStream(11, new InputStreamReader(new ByteArrayInputStream(c11ba),"UTF-8"),len); ps.setBytes(12,ba); // Calling setBytes on the varchar for bit data type because it // Appears DB2 UDB accepts this only for the BLOB data type... // ps.setBinaryStream(13, new ByteArrayInputStream(ba), ba.length); ps.setBytes(13,ba); ps.setBytes(14,ba); ps.setDate(15, Date.valueOf("2002-04-12")); ps.setTime(16, Time.valueOf("11:44:30")); ps.setTimestamp(17, Timestamp.valueOf("2002-04-12 11:44:30.000000000")); uc = ps.executeUpdate(); System.out.println("Update count is: " + uc); // test setObject on different datatypes of the input parameters of // PreparedStatement System.out.println("test setObject on different data types for input parameters of a Prepared Statement"); ps = conn.prepareStatement("insert into t2 values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ,? , ?)"); ps.setObject(1, new Integer(1)); ps.setObject(2, new Integer(2)); ps.setObject(3, new Long(3)); ps.setObject(4, new Float(4.0)); ps.setObject(5, new Double(5.0)); ps.setObject(6, new Double(6.0)); ps.setObject(7, new BigDecimal("77.77")); ps.setObject(8, new BigDecimal("8.1")); ps.setObject(9, "column11string"); ps.setObject(10, "column10vcstring"); ps.setObject(11, "column11lvcstring"); ps.setObject(12,ba); ps.setObject(13,ba); ps.setObject(14,ba); ps.setObject(15, Date.valueOf("2002-04-12")); ps.setObject(16, Time.valueOf("11:44:30")); ps.setObject(17, Timestamp.valueOf("2002-04-12 11:44:30.000000000")); uc = ps.executeUpdate(); System.out.println("Update count is: " + uc); // test setNull on different datatypes of the input parameters of PreparedStatement System.out.println("test setNull on different data types for input parameters of a Prepared Statement"); ps = conn.prepareStatement("insert into t2 values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ,? , ?)"); ps.setNull(1, java.sql.Types.SMALLINT); ps.setNull(2, java.sql.Types.INTEGER); ps.setNull(3, java.sql.Types.BIGINT); ps.setNull(4, java.sql.Types.REAL); ps.setNull(5, java.sql.Types.FLOAT); ps.setNull(6, java.sql.Types.DOUBLE); ps.setNull(7, java.sql.Types.NUMERIC); ps.setNull(8, java.sql.Types.DECIMAL); ps.setNull(9, java.sql.Types.CHAR); ps.setNull(10, java.sql.Types.VARCHAR); ps.setNull(11, java.sql.Types.LONGVARCHAR); ps.setNull(12, java.sql.Types.BINARY); ps.setNull(13, java.sql.Types.VARBINARY); ps.setNull(14, java.sql.Types.LONGVARBINARY); ps.setNull(15, java.sql.Types.DATE); ps.setNull(16, java.sql.Types.TIME); ps.setTimestamp(17, Timestamp.valueOf("2002-04-12 11:44:31.000000000")); //slightly after hasResultSet = ps.execute(); uc = ps.getUpdateCount(); if (uc != -1) System.out.println("Update count is: " + uc); ps = conn.prepareStatement("select * from t2"); rs = ps.executeQuery(); while (rs.next()) { System.out.println("got row: "+" "+rs.getShort(1)+ " "+rs.getInt(2)+" "+rs.getLong(3)+ " "+rs.getFloat(4)+" "+rs.getDouble(5)+ " "+rs.getDouble(6)+" "+rs.getBigDecimal(7)+ " "+rs.getBigDecimal(8)+" "+rs.getString(9)+ " "+rs.getString(10)+" "+rs.getString(11)+ " "+bytesToString(rs.getBytes(12)) + " "+bytesToString(rs.getBytes(13)) + " "+bytesToString(rs.getBytes(14)) + " "+rs.getDate(15)+ " "+rs.getTime(16)+" "+rs.getTimestamp(17)); Timestamp ts = rs.getTimestamp(17); Timestamp temp = Timestamp.valueOf("2002-04-12 11:44:30.000000000"); if (ts.after(temp)) System.out.println("After first Timestamp!"); else if (ts.before(temp)) System.out.println("Before first Timestamp!"); else System.out.println("Timestamp match!"); } System.out.println("end of rows"); try { ps = conn.prepareStatement("select * from t2 where i = ?"); rs = ps.executeQuery(); } catch (SQLException e) { System.out.println("SQLState: " + e.getSQLState() + " message: " + e.getMessage()); } try { ps = conn.prepareStatement("insert into t2 values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"); ps.executeUpdate(); } catch (SQLException e) { System.out.println("SQLState: " + e.getSQLState() + " message: " + e.getMessage()); } try { int tabSize = 1000; String createBigTabSql = "create table bigtab ("; for (int i = 1; i <= tabSize; i++) { createBigTabSql += "c"+ i + " int"; if (i != tabSize) createBigTabSql += ", "; else createBigTabSql += " )"; } //System.out.println(createBigTabSql); ps = conn.prepareStatement(createBigTabSql); uc = ps.executeUpdate(); insertTab(conn, "bigtab",50); insertTab(conn, "bigtab",200); insertTab(conn, "bigtab", 300); insertTab(conn, "bigtab",500); // prepared Statement with many params (bug 4863) insertTab(conn, "bigtab", 1000); selectFromBigTab(conn); // Negative Cases System.out.println("Insert wrong column name"); insertTab(conn, "bigtab", 1001); // this one will give a sytax error System.out.println("Expected Syntax error "); insertTab(conn, "bigtab", 0); // table doesn't exist System.out.println("Expected Table does not exist "); insertTab(conn, "wrongtab",1000); } catch (SQLException e) { System.out.println("SQLState: " + e.getSQLState() + " message: " + e.getMessage()); } rs.close(); ps.close(); testBigDecimalSetObject(conn); testBigDecimalSetObjectWithScale(conn); if (!TestUtil.isJCCFramework()) { testVaryingClientParameterTypeBatch(conn); } test4975(conn); test5130(conn); test5172(conn); jira614Test(conn); jira614Test_a(conn); jira170Test(conn); jira125Test(conn); jira428Test(conn); jira1454Test(conn); jira1533Test_a(conn); jira1533Test_b(conn); conn.close(); // refresh conn before cleaning up conn = ij.startJBMS(); cleanstmt = conn.createStatement(); TestUtil.cleanUpTest(cleanstmt, testObjects); cleanstmt.close(); conn.close(); System.out.println("prepStmt Test Ends"); } catch (Exception e) { e.printStackTrace(); } }
if( storageFactory.useContextLoader) { ClassLoader cl = Thread.currentThread().getContextClassLoader(); if( cl != null && cl.getResource( path) != null) return true; } if( getClass().getResource( path) != null) { if( storageFactory.useContextLoader) storageFactory.useContextLoader = false; return true; } return false;
ClassLoader cl = Thread.currentThread().getContextClassLoader(); if (cl != null) if (cl.getResource(path) != null) return true; cl = getClass().getClassLoader(); if (cl != null) { return (cl.getResource(path) != null); } else { return ClassLoader.getSystemResource(path) != null; }
public boolean exists() { if( storageFactory.useContextLoader) { ClassLoader cl = Thread.currentThread().getContextClassLoader(); if( cl != null && cl.getResource( path) != null) return true; } if( getClass().getResource( path) != null) { if( storageFactory.useContextLoader) storageFactory.useContextLoader = false; return true; } return false; } // end of exists
InputStream is = null; if( storageFactory.useContextLoader) { ClassLoader cl = Thread.currentThread().getContextClassLoader(); is = cl.getResourceAsStream( path); if( is != null) return is; } is = getClass().getResourceAsStream( path); if( is != null && storageFactory.useContextLoader) storageFactory.useContextLoader = false; if( is == null) throw new FileNotFoundException( "Not in class path: " + path); return is;
InputStream is = null; ClassLoader cl = Thread.currentThread().getContextClassLoader(); if (cl != null) is = cl.getResourceAsStream(path); if (is == null) { cl = getClass().getClassLoader(); if (cl != null) is = cl.getResourceAsStream(path); else is = ClassLoader.getSystemResourceAsStream(path); } if (is == null) throw new FileNotFoundException(toString()); return is;
public InputStream getInputStream( ) throws FileNotFoundException { InputStream is = null; if( storageFactory.useContextLoader) { ClassLoader cl = Thread.currentThread().getContextClassLoader(); is = cl.getResourceAsStream( path); if( is != null) return is; } is = getClass().getResourceAsStream( path); if( is != null && storageFactory.useContextLoader) storageFactory.useContextLoader = false; if( is == null) throw new FileNotFoundException( "Not in class path: " + path); return is; } // end of getInputStream
java.util.Enumeration enum = getChildren().elements(); while (enum.hasMoreElements()){ childrenTime = childrenTime + ((RealBasicNoPutResultSetStatistics)enum.nextElement()).getTotalTime();
java.util.Enumeration e = getChildren().elements(); while (e.hasMoreElements()){ childrenTime = childrenTime + ((RealBasicNoPutResultSetStatistics)e.nextElement()).getTotalTime();
public long getChildrenTime(){ long childrenTime = 0; java.util.Enumeration enum = getChildren().elements(); while (enum.hasMoreElements()){ childrenTime = childrenTime + ((RealBasicNoPutResultSetStatistics)enum.nextElement()).getTotalTime(); }
return childrenTime; }
public long getChildrenTime(){ long childrenTime = 0; java.util.Enumeration enum = getChildren().elements(); while (enum.hasMoreElements()){ childrenTime = childrenTime + ((RealBasicNoPutResultSetStatistics)enum.nextElement()).getTotalTime(); }
this.colRef = (ColumnReference) colRef;
this.columnExpression = (ValueNode)colRef;
public void init(Object colRef) { this.colRef = (ColumnReference) colRef; }
if (colRef != null)
if (columnExpression != null)
public void printSubNodes(int depth) { if (SanityManager.DEBUG) { super.printSubNodes(depth); if (colRef != null) { printLabel(depth, "colRef: "); colRef.treePrint(depth + 1); } } }
colRef.treePrint(depth + 1);
columnExpression.treePrint(depth + 1);
public void printSubNodes(int depth) { if (SanityManager.DEBUG) { super.printSubNodes(depth); if (colRef != null) { printLabel(depth, "colRef: "); colRef.treePrint(depth + 1); } } }
SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, Integer.toString(numcol*20));
if(!checksumTest) { SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, Integer.toString(numcol*20)); } logFactory.flushAll();
protected void STest1() throws T_Fail, StandardException { Transaction t = t_util.t_startTransaction(); /////////////////////////////////////////// //// log switch without checkpoint here /// /////////////////////////////////////////// factory.checkpoint(); try { long cid = t_util.t_addContainer(t, 0); ContainerHandle c = t_util.t_openContainer(t, 0, cid, true); Page page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); // make a really big record - fill 80% of the page int numcol = (int)((RawStoreFactory.PAGE_SIZE_MINIMUM*8)/(10*20)); T_RawStoreRow bigrow = new T_RawStoreRow(numcol); String string1 = "01234567890123456789"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string1); // if overhead is > 80%, then reduce the row size until it fits RecordHandle rh = null; while(numcol > 0) { try { rh = t_util.t_insert(page, bigrow); break; } catch (StandardException se) { bigrow.setColumn(--numcol, (String) null); } } if (numcol == 0) throw T_Fail.testFailMsg("cannot fit any column into the page"); t_util.t_commit(t); // make a big log record - update row String string2 = "abcdefghijklmnopqrst"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string2); c = t_util.t_openContainer(t, 0, cid, true); page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); Page p2 = t_util.t_addPage(c); // do something so we get the beginXact log // record out of the way t_util.t_insert(p2, new T_RawStoreRow(REC_001)); /////////////////////////////////////////// //// log switch without checkpoint here /// /////////////////////////////////////////// factory.checkpoint(); ////////////////////////////////////////////////////////// // writing approx 1/2 log record to the end of the log - // NO MORE LOG RECORD SHOULD BE WRITTEN, ////////////////////////////////////////////////////////// SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, Integer.toString(numcol*20)); page.update(rh, bigrow.getRow(), (FormatableBitSet) null); //////////////////////////////////////////////////////// REPORT("badlog test1: cid = " + cid + " numcol " + numcol); register(key(1,1), cid); register(key(1,2), numcol); } finally { SanityManager.DEBUG_CLEAR(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); } }
if(checksumTest) simulateLogFileCorruption();
protected void STest1() throws T_Fail, StandardException { Transaction t = t_util.t_startTransaction(); /////////////////////////////////////////// //// log switch without checkpoint here /// /////////////////////////////////////////// factory.checkpoint(); try { long cid = t_util.t_addContainer(t, 0); ContainerHandle c = t_util.t_openContainer(t, 0, cid, true); Page page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); // make a really big record - fill 80% of the page int numcol = (int)((RawStoreFactory.PAGE_SIZE_MINIMUM*8)/(10*20)); T_RawStoreRow bigrow = new T_RawStoreRow(numcol); String string1 = "01234567890123456789"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string1); // if overhead is > 80%, then reduce the row size until it fits RecordHandle rh = null; while(numcol > 0) { try { rh = t_util.t_insert(page, bigrow); break; } catch (StandardException se) { bigrow.setColumn(--numcol, (String) null); } } if (numcol == 0) throw T_Fail.testFailMsg("cannot fit any column into the page"); t_util.t_commit(t); // make a big log record - update row String string2 = "abcdefghijklmnopqrst"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string2); c = t_util.t_openContainer(t, 0, cid, true); page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); Page p2 = t_util.t_addPage(c); // do something so we get the beginXact log // record out of the way t_util.t_insert(p2, new T_RawStoreRow(REC_001)); /////////////////////////////////////////// //// log switch without checkpoint here /// /////////////////////////////////////////// factory.checkpoint(); ////////////////////////////////////////////////////////// // writing approx 1/2 log record to the end of the log - // NO MORE LOG RECORD SHOULD BE WRITTEN, ////////////////////////////////////////////////////////// SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, Integer.toString(numcol*20)); page.update(rh, bigrow.getRow(), (FormatableBitSet) null); //////////////////////////////////////////////////////// REPORT("badlog test1: cid = " + cid + " numcol " + numcol); register(key(1,1), cid); register(key(1,2), numcol); } finally { SanityManager.DEBUG_CLEAR(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); } }
SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES,Integer.toString(numcol*20));
if(!checksumTest) { SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES,Integer.toString(numcol*20)); }
protected void STest2() throws T_Fail, StandardException { Transaction t = t_util.t_startTransaction(); try { long cid = t_util.t_addContainer(t, 0); ContainerHandle c = t_util.t_openContainer(t, 0, cid, true); Page page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); // make a really big record - fill 80% of the page with 20 bytes row int numcol = (int)((RawStoreFactory.PAGE_SIZE_MINIMUM*8)/(10*20)); T_RawStoreRow bigrow = new T_RawStoreRow(numcol); String string1 = "01234567890123456789"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string1); // if overhead is > 80%, then reduce the row size until it fits RecordHandle rh = null; while(numcol > 0) { try { rh = t_util.t_insert(page, bigrow); break; } catch (StandardException se) { bigrow.setColumn(--numcol, (String) null); } } if (numcol == 0) throw T_Fail.testFailMsg("cannot fit any column into the page"); rh = t_util.t_insert(page, bigrow); t_util.t_commit(t); // make a big log record - update row String string2 = "abcdefghijklmnopqrst"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string2); c = t_util.t_openContainer(t, 0, cid, true); page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); ////////////////////////////////////////////////////////// // writing approx 1/2 log record to the end of the log - // NO MORE LOG RECORD SHOULD BE WRITTEN, ////////////////////////////////////////////////////////// SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES,Integer.toString(numcol*20)); page.update(rh, bigrow.getRow(), (FormatableBitSet) null); //////////////////////////////////////////////////////// REPORT("badlog test2: cid = " + cid + " numcol " + numcol); register(key(2,1), cid); register(key(2,2), numcol); } finally { SanityManager.DEBUG_CLEAR(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); } }
if(checksumTest) simulateLogFileCorruption();
protected void STest2() throws T_Fail, StandardException { Transaction t = t_util.t_startTransaction(); try { long cid = t_util.t_addContainer(t, 0); ContainerHandle c = t_util.t_openContainer(t, 0, cid, true); Page page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); // make a really big record - fill 80% of the page with 20 bytes row int numcol = (int)((RawStoreFactory.PAGE_SIZE_MINIMUM*8)/(10*20)); T_RawStoreRow bigrow = new T_RawStoreRow(numcol); String string1 = "01234567890123456789"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string1); // if overhead is > 80%, then reduce the row size until it fits RecordHandle rh = null; while(numcol > 0) { try { rh = t_util.t_insert(page, bigrow); break; } catch (StandardException se) { bigrow.setColumn(--numcol, (String) null); } } if (numcol == 0) throw T_Fail.testFailMsg("cannot fit any column into the page"); rh = t_util.t_insert(page, bigrow); t_util.t_commit(t); // make a big log record - update row String string2 = "abcdefghijklmnopqrst"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string2); c = t_util.t_openContainer(t, 0, cid, true); page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); ////////////////////////////////////////////////////////// // writing approx 1/2 log record to the end of the log - // NO MORE LOG RECORD SHOULD BE WRITTEN, ////////////////////////////////////////////////////////// SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES,Integer.toString(numcol*20)); page.update(rh, bigrow.getRow(), (FormatableBitSet) null); //////////////////////////////////////////////////////// REPORT("badlog test2: cid = " + cid + " numcol " + numcol); register(key(2,1), cid); register(key(2,2), numcol); } finally { SanityManager.DEBUG_CLEAR(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); } }
SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, "200");
if(!checksumTest) { SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, "200"); } logFactory.flushAll();
protected void STest3() throws T_Fail, StandardException { int numtrans = 7; int numpages = 7; int i,j; // this is basically T_Recovery S203 with a couple of log switches try { T_TWC[] t = new T_TWC[numtrans]; for (i = 0; i < numtrans; i++) t[i] = t_util.t_startTransactionWithContext(); long[] cid = new long[numtrans]; ContainerHandle[] c = new ContainerHandle[numtrans]; for (i = 0; i < numtrans; i++) { cid[i] = t_util.t_addContainer(t[i], 0); t_util.t_commit(t[i]); c[i] = t_util.t_openContainer(t[i], 0, cid[i], true); } Page page[][] = new Page[numtrans][numpages]; long pagenum[][] = new long[numtrans][numpages]; for (i = 0; i < numtrans; i++) { for (j = 0; j < numpages; j++) { t[i].switchTransactionContext(); page[i][j] = t_util.t_addPage(c[i]); pagenum[i][j] = page[i][j].getPageNumber(); t[i].resetContext(); } } // set up numtrans (at least 5) transactions, each with one // container and numpages pages. Do the following test: // // 1) insert 1 row onto each page // set savepoint SP1 on first transaction (t0) // // 2) update every rows // set savepoint SP1 on all other transactions // // 3) update every rows // set savepoint SP2 on all transactions // // 4) update every rows // // 5) rollback t0 to SP1 // // check that only page[0][x] have been rolled back // past SP2 // // 6) update every row // 7) rollback SP2 on all transaction except the first // // 8) update every rows // 9) rollback t0 to SP1 // // 10) leave transactions in the following state // t0 - incomplete // t1 - abort // t2 - commit // t3 - incomplete // t4 - commit // any other transactions - incomplete //////////////////////// step 1 //////////////////////// RecordHandle[][] rh = new RecordHandle[numtrans][numpages]; T_RawStoreRow row1 = new T_RawStoreRow(REC_001); for (i = 0; i < numtrans; i++) for (j = 0; j < numpages; j++) { t[i].switchTransactionContext(); rh[i][j] = t_util.t_insert(page[i][j], row1); t[i].resetContext(); } t[0].setSavePoint(SP1, null); // sp1 //////////////////////// step 2 //////////////////////// T_RawStoreRow row2 = new T_RawStoreRow(REC_002); for (i = 0; i < numtrans; i++) for (j = 0; j < numpages; j++) { t[i].switchTransactionContext(); page[i][j].update(rh[i][j], row2.getRow(), (FormatableBitSet) null); t[i].resetContext(); } for (i = 1; i < numtrans; i++) // sp1 { t[i].setSavePoint(SP1, null); } /////////////////////////////////////////// //// log switch without checkpoint here /// /////////////////////////////////////////// factory.checkpoint(); //////////////////////// step 3 //////////////////////// T_RawStoreRow row3 = new T_RawStoreRow(REC_003); for (i = 0; i < numtrans; i++) for (j = 0; j < numpages; j++) page[i][j].update(rh[i][j], row3.getRow(), (FormatableBitSet) null); for (i = 0; i < numtrans; i++) t[i].setSavePoint(SP2, null); // sp2 //////////////////////// step 4 //////////////////////// T_RawStoreRow row4 = new T_RawStoreRow(REC_004); for (i = 0; i < numtrans; i++) { t[i].switchTransactionContext(); for (j = 0; j < numpages; j++) page[i][j].update(rh[i][j], row4.getRow(), (FormatableBitSet) null); t[i].resetContext(); } //////////////////////// step 5 //////////////////////// // unlatch relavante pages t[0].switchTransactionContext(); for (j = 0; j < numpages; j++) page[0][j].unlatch(); t[0].rollbackToSavePoint(SP1, null); // step 5 // relatch relavante pages for (j = 0; j < numpages; j++) page[0][j] = t_util.t_getPage(c[0], pagenum[0][j]); t[0].resetContext(); /////////////////////////////////////////// //// log switch without checkpoint here /// /////////////////////////////////////////// factory.checkpoint(); //////////////////////// check //////////////////////// for (i = 1; i < numtrans; i++) { t[i].switchTransactionContext(); for (j = 0; j < numpages; j++) t_util.t_checkFetch(page[i][j], rh[i][j], REC_004); t[i].resetContext(); } t[0].switchTransactionContext(); for (j = 0; j < numpages; j++) t_util.t_checkFetch(page[0][j], rh[0][j], REC_001); t[0].resetContext(); //////////////////////// step 6 //////////////////////// T_RawStoreRow row5 = new T_RawStoreRow(REC_005); for (i = 0; i < numtrans; i++) { t[i].switchTransactionContext(); for (j = 0; j < numpages; j++) page[i][j].update(rh[i][j], row5.getRow(), (FormatableBitSet) null); t[i].resetContext(); } //////////////////////// step 7 //////////////////////// for (i = 1; i < numtrans; i++) { t[i].switchTransactionContext(); for (j = 0; j < numpages; j++) page[i][j].unlatch(); t[i].rollbackToSavePoint(SP2, null); for (j = 0; j < numpages; j++) page[i][j] = t_util.t_getPage(c[i],pagenum[i][j]); t[i].resetContext(); } //////////////////////// check //////////////////////// for (i = 1; i < numtrans; i++) { t[i].switchTransactionContext(); for (j = 0; j < numpages; j++) t_util.t_checkFetch(page[i][j], rh[i][j], REC_003); t[i].resetContext(); } t[0].switchTransactionContext(); for (j = 0; j < numpages; j++) t_util.t_checkFetch(page[0][j], rh[0][j], REC_005); t[0].resetContext(); /////////////////////////////////////////// //// log switch without checkpoint here /// /////////////////////////////////////////// factory.checkpoint(); //////////////////////// step 8 //////////////////////// T_RawStoreRow row6 = new T_RawStoreRow(REC_006); for (i = 0; i < numtrans; i++) { t[i].switchTransactionContext(); for (j = 0; j < numpages; j++) page[i][j].update(rh[i][j], row6.getRow(), (FormatableBitSet) null); // step 8 t[i].resetContext(); } //////////////////////// step 9 //////////////////////// // unlatch relavante pages t[0].switchTransactionContext(); for (j = 0; j < numpages; j++) page[0][j].unlatch(); t[0].rollbackToSavePoint(SP1, null); // relatch relevant pages for (j = 0; j < numpages; j++) page[0][j] = t_util.t_getPage(c[0], pagenum[0][j]); t[0].resetContext(); //////////////////////// check //////////////////////// for (i = 1; i < numtrans; i++) { t[i].switchTransactionContext(); for (j = 0; j < numpages; j++) { t_util.t_checkFetch(page[i][j], rh[i][j], REC_006); t_util.t_checkRecordCount(page[i][j], 1, 1); } t[i].resetContext(); } t[0].switchTransactionContext(); for (j = 0; j < numpages; j++) { t_util.t_checkFetch(page[0][j], rh[0][j], REC_001); t_util.t_checkRecordCount(page[0][j], 1, 1); } t[0].resetContext(); //////////////////////// step 10 //////////////////////// // unlatch all pages for (i = 0; i < numtrans; i++) { t[i].switchTransactionContext(); for (j = 0; j < numpages; j++) page[i][j].unlatch(); t[i].resetContext(); } // t[0] incomplete t_util.t_abort(t[1]); t_util.t_commit(t[2]); // t[3] incomplete t_util.t_commit(t[4]); // reopen containers 1, 2, and 4, where were closed when the // transaction terminated. c[1] = t_util.t_openContainer(t[1], 0, cid[1], false); c[2] = t_util.t_openContainer(t[2], 0, cid[2], false); c[4] = t_util.t_openContainer(t[4], 0, cid[4], false); //////////////////////// check //////////////////////// for (j = 0; j < numpages; j++) { t[0].switchTransactionContext(); t_util.t_checkFetch(c[0], rh[0][j], REC_001); t[0].resetContext(); // t[1] has been aborted // rh[1][j] (REC_001) is deleted t[1].switchTransactionContext(); page[1][j] = t_util.t_getPage(c[1], pagenum[1][j]); t_util.t_checkRecordCount(page[1][j], 1, 0); t_util.t_checkFetchBySlot(page[1][j], Page.FIRST_SLOT_NUMBER, REC_001, true, false); page[1][j].unlatch(); t[1].resetContext(); t[2].switchTransactionContext(); t_util.t_checkFetch(c[2], rh[2][j], REC_006); t[2].resetContext(); t[3].switchTransactionContext(); t_util.t_checkFetch(c[3], rh[3][j], REC_006); t[3].resetContext(); t[4].switchTransactionContext(); t_util.t_checkFetch(c[4], rh[4][j], REC_006); t[4].resetContext(); } /////////////////////////////////////////////////////////// //// now write a 1/2 log record to the end of the log ////////////////////////////////////////////////////////// t[3].switchTransactionContext();// this is going to be an // incomplete transaction // make a full page and then copy and purge it to another page Page badPage1 = t_util.t_addPage(c[3]); Page badPage2 = t_util.t_addPage(c[3]); T_RawStoreRow row; for (i = 0, row = new T_RawStoreRow("row at slot " + i); badPage1.spaceForInsert(); i++, row = new T_RawStoreRow("row at slot " + i)) { if (t_util.t_insertAtSlot(badPage1, i, row, Page.INSERT_UNDO_WITH_PURGE) == null) break; } ////////////////////////////////////////////////////////// // writing 200 bytes of the log record to the end of the log - // NO MORE LOG RECORD SHOULD BE WRITTEN, ////////////////////////////////////////////////////////// SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, "200"); // RESOLVE: // copy and purge actually generates 2 log records, this is // actually not a good operation to use for this test. Just make // sure the first log record is > 400 or else the log will be hosed // badPage1.copyAndPurge(badPage2, 0, i, 0); t[3].resetContext(); //////////////////////////////////////////////////////// REPORT("badlog test3: numtrans " + numtrans + " numpages " + numpages); for (i = 0; i < numtrans; i++) { register(key(3, i+10), cid[i]); String str = "container " + i + ":" + find(key(3,i+10)) + " pages: "; for (j = 0; j < numpages; j++) { str += pagenum[i][j] + " "; register(key(3, (i+1)*1000+j), pagenum[i][j]); } REPORT("\t" + str); } register(key(3,1), numtrans); register(key(3,2), numpages); register(key(3,3), badPage1.getPageNumber()); register(key(3,4), badPage2.getPageNumber()); } finally { SanityManager.DEBUG_CLEAR(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); } }
if(checksumTest) simulateLogFileCorruption();
protected void STest3() throws T_Fail, StandardException { int numtrans = 7; int numpages = 7; int i,j; // this is basically T_Recovery S203 with a couple of log switches try { T_TWC[] t = new T_TWC[numtrans]; for (i = 0; i < numtrans; i++) t[i] = t_util.t_startTransactionWithContext(); long[] cid = new long[numtrans]; ContainerHandle[] c = new ContainerHandle[numtrans]; for (i = 0; i < numtrans; i++) { cid[i] = t_util.t_addContainer(t[i], 0); t_util.t_commit(t[i]); c[i] = t_util.t_openContainer(t[i], 0, cid[i], true); } Page page[][] = new Page[numtrans][numpages]; long pagenum[][] = new long[numtrans][numpages]; for (i = 0; i < numtrans; i++) { for (j = 0; j < numpages; j++) { t[i].switchTransactionContext(); page[i][j] = t_util.t_addPage(c[i]); pagenum[i][j] = page[i][j].getPageNumber(); t[i].resetContext(); } } // set up numtrans (at least 5) transactions, each with one // container and numpages pages. Do the following test: // // 1) insert 1 row onto each page // set savepoint SP1 on first transaction (t0) // // 2) update every rows // set savepoint SP1 on all other transactions // // 3) update every rows // set savepoint SP2 on all transactions // // 4) update every rows // // 5) rollback t0 to SP1 // // check that only page[0][x] have been rolled back // past SP2 // // 6) update every row // 7) rollback SP2 on all transaction except the first // // 8) update every rows // 9) rollback t0 to SP1 // // 10) leave transactions in the following state // t0 - incomplete // t1 - abort // t2 - commit // t3 - incomplete // t4 - commit // any other transactions - incomplete //////////////////////// step 1 //////////////////////// RecordHandle[][] rh = new RecordHandle[numtrans][numpages]; T_RawStoreRow row1 = new T_RawStoreRow(REC_001); for (i = 0; i < numtrans; i++) for (j = 0; j < numpages; j++) { t[i].switchTransactionContext(); rh[i][j] = t_util.t_insert(page[i][j], row1); t[i].resetContext(); } t[0].setSavePoint(SP1, null); // sp1 //////////////////////// step 2 //////////////////////// T_RawStoreRow row2 = new T_RawStoreRow(REC_002); for (i = 0; i < numtrans; i++) for (j = 0; j < numpages; j++) { t[i].switchTransactionContext(); page[i][j].update(rh[i][j], row2.getRow(), (FormatableBitSet) null); t[i].resetContext(); } for (i = 1; i < numtrans; i++) // sp1 { t[i].setSavePoint(SP1, null); } /////////////////////////////////////////// //// log switch without checkpoint here /// /////////////////////////////////////////// factory.checkpoint(); //////////////////////// step 3 //////////////////////// T_RawStoreRow row3 = new T_RawStoreRow(REC_003); for (i = 0; i < numtrans; i++) for (j = 0; j < numpages; j++) page[i][j].update(rh[i][j], row3.getRow(), (FormatableBitSet) null); for (i = 0; i < numtrans; i++) t[i].setSavePoint(SP2, null); // sp2 //////////////////////// step 4 //////////////////////// T_RawStoreRow row4 = new T_RawStoreRow(REC_004); for (i = 0; i < numtrans; i++) { t[i].switchTransactionContext(); for (j = 0; j < numpages; j++) page[i][j].update(rh[i][j], row4.getRow(), (FormatableBitSet) null); t[i].resetContext(); } //////////////////////// step 5 //////////////////////// // unlatch relavante pages t[0].switchTransactionContext(); for (j = 0; j < numpages; j++) page[0][j].unlatch(); t[0].rollbackToSavePoint(SP1, null); // step 5 // relatch relavante pages for (j = 0; j < numpages; j++) page[0][j] = t_util.t_getPage(c[0], pagenum[0][j]); t[0].resetContext(); /////////////////////////////////////////// //// log switch without checkpoint here /// /////////////////////////////////////////// factory.checkpoint(); //////////////////////// check //////////////////////// for (i = 1; i < numtrans; i++) { t[i].switchTransactionContext(); for (j = 0; j < numpages; j++) t_util.t_checkFetch(page[i][j], rh[i][j], REC_004); t[i].resetContext(); } t[0].switchTransactionContext(); for (j = 0; j < numpages; j++) t_util.t_checkFetch(page[0][j], rh[0][j], REC_001); t[0].resetContext(); //////////////////////// step 6 //////////////////////// T_RawStoreRow row5 = new T_RawStoreRow(REC_005); for (i = 0; i < numtrans; i++) { t[i].switchTransactionContext(); for (j = 0; j < numpages; j++) page[i][j].update(rh[i][j], row5.getRow(), (FormatableBitSet) null); t[i].resetContext(); } //////////////////////// step 7 //////////////////////// for (i = 1; i < numtrans; i++) { t[i].switchTransactionContext(); for (j = 0; j < numpages; j++) page[i][j].unlatch(); t[i].rollbackToSavePoint(SP2, null); for (j = 0; j < numpages; j++) page[i][j] = t_util.t_getPage(c[i],pagenum[i][j]); t[i].resetContext(); } //////////////////////// check //////////////////////// for (i = 1; i < numtrans; i++) { t[i].switchTransactionContext(); for (j = 0; j < numpages; j++) t_util.t_checkFetch(page[i][j], rh[i][j], REC_003); t[i].resetContext(); } t[0].switchTransactionContext(); for (j = 0; j < numpages; j++) t_util.t_checkFetch(page[0][j], rh[0][j], REC_005); t[0].resetContext(); /////////////////////////////////////////// //// log switch without checkpoint here /// /////////////////////////////////////////// factory.checkpoint(); //////////////////////// step 8 //////////////////////// T_RawStoreRow row6 = new T_RawStoreRow(REC_006); for (i = 0; i < numtrans; i++) { t[i].switchTransactionContext(); for (j = 0; j < numpages; j++) page[i][j].update(rh[i][j], row6.getRow(), (FormatableBitSet) null); // step 8 t[i].resetContext(); } //////////////////////// step 9 //////////////////////// // unlatch relavante pages t[0].switchTransactionContext(); for (j = 0; j < numpages; j++) page[0][j].unlatch(); t[0].rollbackToSavePoint(SP1, null); // relatch relevant pages for (j = 0; j < numpages; j++) page[0][j] = t_util.t_getPage(c[0], pagenum[0][j]); t[0].resetContext(); //////////////////////// check //////////////////////// for (i = 1; i < numtrans; i++) { t[i].switchTransactionContext(); for (j = 0; j < numpages; j++) { t_util.t_checkFetch(page[i][j], rh[i][j], REC_006); t_util.t_checkRecordCount(page[i][j], 1, 1); } t[i].resetContext(); } t[0].switchTransactionContext(); for (j = 0; j < numpages; j++) { t_util.t_checkFetch(page[0][j], rh[0][j], REC_001); t_util.t_checkRecordCount(page[0][j], 1, 1); } t[0].resetContext(); //////////////////////// step 10 //////////////////////// // unlatch all pages for (i = 0; i < numtrans; i++) { t[i].switchTransactionContext(); for (j = 0; j < numpages; j++) page[i][j].unlatch(); t[i].resetContext(); } // t[0] incomplete t_util.t_abort(t[1]); t_util.t_commit(t[2]); // t[3] incomplete t_util.t_commit(t[4]); // reopen containers 1, 2, and 4, where were closed when the // transaction terminated. c[1] = t_util.t_openContainer(t[1], 0, cid[1], false); c[2] = t_util.t_openContainer(t[2], 0, cid[2], false); c[4] = t_util.t_openContainer(t[4], 0, cid[4], false); //////////////////////// check //////////////////////// for (j = 0; j < numpages; j++) { t[0].switchTransactionContext(); t_util.t_checkFetch(c[0], rh[0][j], REC_001); t[0].resetContext(); // t[1] has been aborted // rh[1][j] (REC_001) is deleted t[1].switchTransactionContext(); page[1][j] = t_util.t_getPage(c[1], pagenum[1][j]); t_util.t_checkRecordCount(page[1][j], 1, 0); t_util.t_checkFetchBySlot(page[1][j], Page.FIRST_SLOT_NUMBER, REC_001, true, false); page[1][j].unlatch(); t[1].resetContext(); t[2].switchTransactionContext(); t_util.t_checkFetch(c[2], rh[2][j], REC_006); t[2].resetContext(); t[3].switchTransactionContext(); t_util.t_checkFetch(c[3], rh[3][j], REC_006); t[3].resetContext(); t[4].switchTransactionContext(); t_util.t_checkFetch(c[4], rh[4][j], REC_006); t[4].resetContext(); } /////////////////////////////////////////////////////////// //// now write a 1/2 log record to the end of the log ////////////////////////////////////////////////////////// t[3].switchTransactionContext();// this is going to be an // incomplete transaction // make a full page and then copy and purge it to another page Page badPage1 = t_util.t_addPage(c[3]); Page badPage2 = t_util.t_addPage(c[3]); T_RawStoreRow row; for (i = 0, row = new T_RawStoreRow("row at slot " + i); badPage1.spaceForInsert(); i++, row = new T_RawStoreRow("row at slot " + i)) { if (t_util.t_insertAtSlot(badPage1, i, row, Page.INSERT_UNDO_WITH_PURGE) == null) break; } ////////////////////////////////////////////////////////// // writing 200 bytes of the log record to the end of the log - // NO MORE LOG RECORD SHOULD BE WRITTEN, ////////////////////////////////////////////////////////// SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, "200"); // RESOLVE: // copy and purge actually generates 2 log records, this is // actually not a good operation to use for this test. Just make // sure the first log record is > 400 or else the log will be hosed // badPage1.copyAndPurge(badPage2, 0, i, 0); t[3].resetContext(); //////////////////////////////////////////////////////// REPORT("badlog test3: numtrans " + numtrans + " numpages " + numpages); for (i = 0; i < numtrans; i++) { register(key(3, i+10), cid[i]); String str = "container " + i + ":" + find(key(3,i+10)) + " pages: "; for (j = 0; j < numpages; j++) { str += pagenum[i][j] + " "; register(key(3, (i+1)*1000+j), pagenum[i][j]); } REPORT("\t" + str); } register(key(3,1), numtrans); register(key(3,2), numpages); register(key(3,3), badPage1.getPageNumber()); register(key(3,4), badPage2.getPageNumber()); } finally { SanityManager.DEBUG_CLEAR(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); } }
SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, Integer.toString(11));
if(!checksumTest) { SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, Integer.toString(11)); }
protected void STest4() throws T_Fail, StandardException { Transaction t = t_util.t_startTransaction(); try { long cid = t_util.t_addContainer(t, 0); ContainerHandle c = t_util.t_openContainer(t, 0, cid, true); Page page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); // make a really big record - fill 80% of the page int numcol = (int)((RawStoreFactory.PAGE_SIZE_MINIMUM*8)/(10*20)); T_RawStoreRow bigrow = new T_RawStoreRow(numcol); String string1 = "01234567890123456789"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string1); // if overhead is > 80%, then reduce the row size until it fits RecordHandle rh = null; while(numcol > 0) { try { rh = t_util.t_insert(page, bigrow); break; } catch (StandardException se) { bigrow.setColumn(--numcol, (String) null); } } if (numcol == 0) throw T_Fail.testFailMsg("cannot fit any column into the page"); t_util.t_commit(t); // make a big log record - update row String string2 = "abcdefghijklmnopqrst"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string2); c = t_util.t_openContainer(t, 0, cid, true); page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); Page p2 = t_util.t_addPage(c); // do something so we get the beginXact log // record out of the way t_util.t_insert(p2, new T_RawStoreRow(REC_001)); ////////////////////////////////////////////////////////// // writing approx 1/2 of log record instance to the end of the log - // NO MORE LOG RECORD SHOULD BE WRITTEN, // Length 4 bytes + 7(8) bytes of log record instance ////////////////////////////////////////////////////////// SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, Integer.toString(11)); page.update(rh, bigrow.getRow(), (FormatableBitSet) null); //////////////////////////////////////////////////////// REPORT("badlog test4: cid = " + cid + " numcol " + numcol); register(key(4,1), cid); register(key(4,2), numcol); } finally { SanityManager.DEBUG_CLEAR(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); } }
if(checksumTest) simulateLogFileCorruption();
protected void STest4() throws T_Fail, StandardException { Transaction t = t_util.t_startTransaction(); try { long cid = t_util.t_addContainer(t, 0); ContainerHandle c = t_util.t_openContainer(t, 0, cid, true); Page page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); // make a really big record - fill 80% of the page int numcol = (int)((RawStoreFactory.PAGE_SIZE_MINIMUM*8)/(10*20)); T_RawStoreRow bigrow = new T_RawStoreRow(numcol); String string1 = "01234567890123456789"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string1); // if overhead is > 80%, then reduce the row size until it fits RecordHandle rh = null; while(numcol > 0) { try { rh = t_util.t_insert(page, bigrow); break; } catch (StandardException se) { bigrow.setColumn(--numcol, (String) null); } } if (numcol == 0) throw T_Fail.testFailMsg("cannot fit any column into the page"); t_util.t_commit(t); // make a big log record - update row String string2 = "abcdefghijklmnopqrst"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string2); c = t_util.t_openContainer(t, 0, cid, true); page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); Page p2 = t_util.t_addPage(c); // do something so we get the beginXact log // record out of the way t_util.t_insert(p2, new T_RawStoreRow(REC_001)); ////////////////////////////////////////////////////////// // writing approx 1/2 of log record instance to the end of the log - // NO MORE LOG RECORD SHOULD BE WRITTEN, // Length 4 bytes + 7(8) bytes of log record instance ////////////////////////////////////////////////////////// SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, Integer.toString(11)); page.update(rh, bigrow.getRow(), (FormatableBitSet) null); //////////////////////////////////////////////////////// REPORT("badlog test4: cid = " + cid + " numcol " + numcol); register(key(4,1), cid); register(key(4,2), numcol); } finally { SanityManager.DEBUG_CLEAR(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); } }
SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, Integer.toString(3));
if(!checksumTest) { SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, Integer.toString(3)); } logFactory.flushAll();
protected void STest5() throws T_Fail, StandardException { Transaction t = t_util.t_startTransaction(); try { long cid = t_util.t_addContainer(t, 0); ContainerHandle c = t_util.t_openContainer(t, 0, cid, true); Page page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); // make a really big record - fill 80% of the page int numcol = (int)((RawStoreFactory.PAGE_SIZE_MINIMUM*8)/(10*20)); T_RawStoreRow bigrow = new T_RawStoreRow(numcol); String string1 = "01234567890123456789"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string1); // if overhead is > 80%, then reduce the row size until it fits RecordHandle rh = null; while(numcol > 0) { try { rh = t_util.t_insert(page, bigrow); break; } catch (StandardException se) { bigrow.setColumn(--numcol, (String) null); } } if (numcol == 0) throw T_Fail.testFailMsg("cannot fit any column into the page"); t_util.t_commit(t); // make a big log record - update row String string2 = "abcdefghijklmnopqrst"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string2); c = t_util.t_openContainer(t, 0, cid, true); page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); Page p2 = t_util.t_addPage(c); // do something so we get the beginXact log // record out of the way t_util.t_insert(p2, new T_RawStoreRow(REC_001)); ////////////////////////////////////////////////////////// // writing approx 3 bytes of log record to the end of the log - // NO MORE LOG RECORD SHOULD BE WRITTEN, // Length 3 bytes (4) of log record length ////////////////////////////////////////////////////////// SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, Integer.toString(3)); page.update(rh, bigrow.getRow(), (FormatableBitSet) null); //////////////////////////////////////////////////////// REPORT("badlog test5: cid = " + cid + " numcol " + numcol); register(key(5,1), cid); register(key(5,2), numcol); } finally { SanityManager.DEBUG_CLEAR(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); } }
if(checksumTest) simulateLogFileCorruption();
protected void STest5() throws T_Fail, StandardException { Transaction t = t_util.t_startTransaction(); try { long cid = t_util.t_addContainer(t, 0); ContainerHandle c = t_util.t_openContainer(t, 0, cid, true); Page page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); // make a really big record - fill 80% of the page int numcol = (int)((RawStoreFactory.PAGE_SIZE_MINIMUM*8)/(10*20)); T_RawStoreRow bigrow = new T_RawStoreRow(numcol); String string1 = "01234567890123456789"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string1); // if overhead is > 80%, then reduce the row size until it fits RecordHandle rh = null; while(numcol > 0) { try { rh = t_util.t_insert(page, bigrow); break; } catch (StandardException se) { bigrow.setColumn(--numcol, (String) null); } } if (numcol == 0) throw T_Fail.testFailMsg("cannot fit any column into the page"); t_util.t_commit(t); // make a big log record - update row String string2 = "abcdefghijklmnopqrst"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string2); c = t_util.t_openContainer(t, 0, cid, true); page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); Page p2 = t_util.t_addPage(c); // do something so we get the beginXact log // record out of the way t_util.t_insert(p2, new T_RawStoreRow(REC_001)); ////////////////////////////////////////////////////////// // writing approx 3 bytes of log record to the end of the log - // NO MORE LOG RECORD SHOULD BE WRITTEN, // Length 3 bytes (4) of log record length ////////////////////////////////////////////////////////// SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, Integer.toString(3)); page.update(rh, bigrow.getRow(), (FormatableBitSet) null); //////////////////////////////////////////////////////// REPORT("badlog test5: cid = " + cid + " numcol " + numcol); register(key(5,1), cid); register(key(5,2), numcol); } finally { SanityManager.DEBUG_CLEAR(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); } }
SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, Integer.toString((1997/2) + 16));
if(!checksumTest) { SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, Integer.toString((1997/2) + 16)); } logFactory.flushAll(); page.update(rh, bigrow.getRow(), (FormatableBitSet) null);
protected void STest6() throws T_Fail, StandardException { Transaction t = t_util.t_startTransaction(); try { long cid = t_util.t_addContainer(t, 0); ContainerHandle c = t_util.t_openContainer(t, 0, cid, true); Page page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); // make a really big record - fill 80% of the page int numcol = (int)((RawStoreFactory.PAGE_SIZE_MINIMUM*8)/(10*20)); T_RawStoreRow bigrow = new T_RawStoreRow(numcol); String string1 = "01234567890123456789"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string1); // if overhead is > 80%, then reduce the row size until it fits RecordHandle rh = null; while(numcol > 0) { try { rh = t_util.t_insert(page, bigrow); break; } catch (StandardException se) { bigrow.setColumn(--numcol, (String) null); } } if (numcol == 0) throw T_Fail.testFailMsg("cannot fit any column into the page"); t_util.t_commit(t); // make a big log record - update row String string2 = "abcdefghijklmnopqrst"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string2); c = t_util.t_openContainer(t, 0, cid, true); page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); Page p2 = t_util.t_addPage(c); // do something so we get the beginXact log // record out of the way t_util.t_insert(p2, new T_RawStoreRow(REC_001)); ////////////////////////////////////////////////////////// // writing (1997/2 (data)+ 16(log records ov)) bytes of log record to the end of the log - // NO MORE LOG RECORD SHOULD BE WRITTEN, ////////////////////////////////////////////////////////// SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, Integer.toString((1997/2) + 16)); page.update(rh, bigrow.getRow(), (FormatableBitSet) null); //////////////////////////////////////////////////////// REPORT("badlog test6: cid = " + cid + " numcol " + numcol); register(key(6,1), cid); register(key(6,2), numcol); } finally { SanityManager.DEBUG_CLEAR(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); } }
page.update(rh, bigrow.getRow(), (FormatableBitSet) null);
if(checksumTest) simulateLogFileCorruption();
protected void STest6() throws T_Fail, StandardException { Transaction t = t_util.t_startTransaction(); try { long cid = t_util.t_addContainer(t, 0); ContainerHandle c = t_util.t_openContainer(t, 0, cid, true); Page page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); // make a really big record - fill 80% of the page int numcol = (int)((RawStoreFactory.PAGE_SIZE_MINIMUM*8)/(10*20)); T_RawStoreRow bigrow = new T_RawStoreRow(numcol); String string1 = "01234567890123456789"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string1); // if overhead is > 80%, then reduce the row size until it fits RecordHandle rh = null; while(numcol > 0) { try { rh = t_util.t_insert(page, bigrow); break; } catch (StandardException se) { bigrow.setColumn(--numcol, (String) null); } } if (numcol == 0) throw T_Fail.testFailMsg("cannot fit any column into the page"); t_util.t_commit(t); // make a big log record - update row String string2 = "abcdefghijklmnopqrst"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string2); c = t_util.t_openContainer(t, 0, cid, true); page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); Page p2 = t_util.t_addPage(c); // do something so we get the beginXact log // record out of the way t_util.t_insert(p2, new T_RawStoreRow(REC_001)); ////////////////////////////////////////////////////////// // writing (1997/2 (data)+ 16(log records ov)) bytes of log record to the end of the log - // NO MORE LOG RECORD SHOULD BE WRITTEN, ////////////////////////////////////////////////////////// SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, Integer.toString((1997/2) + 16)); page.update(rh, bigrow.getRow(), (FormatableBitSet) null); //////////////////////////////////////////////////////// REPORT("badlog test6: cid = " + cid + " numcol " + numcol); register(key(6,1), cid); register(key(6,2), numcol); } finally { SanityManager.DEBUG_CLEAR(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); } }
SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, Integer.toString(1997+15));
if(!checksumTest) { SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, Integer.toString(1997+15)); } logFactory.flushAll(); page.update(rh, bigrow.getRow(), (FormatableBitSet) null);
protected void STest7() throws T_Fail, StandardException { Transaction t = t_util.t_startTransaction(); try { long cid = t_util.t_addContainer(t, 0); ContainerHandle c = t_util.t_openContainer(t, 0, cid, true); Page page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); // make a really big record - fill 80% of the page int numcol = (int)((RawStoreFactory.PAGE_SIZE_MINIMUM*8)/(10*20)); T_RawStoreRow bigrow = new T_RawStoreRow(numcol); String string1 = "01234567890123456789"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string1); // if overhead is > 80%, then reduce the row size until it fits RecordHandle rh = null; while(numcol > 0) { try { rh = t_util.t_insert(page, bigrow); break; } catch (StandardException se) { bigrow.setColumn(--numcol, (String) null); } } if (numcol == 0) throw T_Fail.testFailMsg("cannot fit any column into the page"); t_util.t_commit(t); // make a big log record - update row String string2 = "abcdefghijklmnopqrst"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string2); c = t_util.t_openContainer(t, 0, cid, true); page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); Page p2 = t_util.t_addPage(c); // do something so we get the beginXact log // record out of the way t_util.t_insert(p2, new T_RawStoreRow(REC_001)); ////////////////////////////////////////////////////////// // writing only 3 bytes of end length of the log record to the end of the log - //i.e: instead of (1997(data) + 16 (log records overhead)) write (1997 + 15) // NO MORE LOG RECORD SHOULD BE WRITTEN, ////////////////////////////////////////////////////////// SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, Integer.toString(1997+15)); page.update(rh, bigrow.getRow(), (FormatableBitSet) null); //////////////////////////////////////////////////////// REPORT("badlog test7: cid = " + cid + " numcol " + numcol); register(key(7,1), cid); register(key(7,2), numcol); } finally { SanityManager.DEBUG_CLEAR(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); } }
page.update(rh, bigrow.getRow(), (FormatableBitSet) null);
if(checksumTest) simulateLogFileCorruption();
protected void STest7() throws T_Fail, StandardException { Transaction t = t_util.t_startTransaction(); try { long cid = t_util.t_addContainer(t, 0); ContainerHandle c = t_util.t_openContainer(t, 0, cid, true); Page page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); // make a really big record - fill 80% of the page int numcol = (int)((RawStoreFactory.PAGE_SIZE_MINIMUM*8)/(10*20)); T_RawStoreRow bigrow = new T_RawStoreRow(numcol); String string1 = "01234567890123456789"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string1); // if overhead is > 80%, then reduce the row size until it fits RecordHandle rh = null; while(numcol > 0) { try { rh = t_util.t_insert(page, bigrow); break; } catch (StandardException se) { bigrow.setColumn(--numcol, (String) null); } } if (numcol == 0) throw T_Fail.testFailMsg("cannot fit any column into the page"); t_util.t_commit(t); // make a big log record - update row String string2 = "abcdefghijklmnopqrst"; // 20 char string for (int i = 0; i < numcol; i++) bigrow.setColumn(i, string2); c = t_util.t_openContainer(t, 0, cid, true); page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER); Page p2 = t_util.t_addPage(c); // do something so we get the beginXact log // record out of the way t_util.t_insert(p2, new T_RawStoreRow(REC_001)); ////////////////////////////////////////////////////////// // writing only 3 bytes of end length of the log record to the end of the log - //i.e: instead of (1997(data) + 16 (log records overhead)) write (1997 + 15) // NO MORE LOG RECORD SHOULD BE WRITTEN, ////////////////////////////////////////////////////////// SanityManager.DEBUG_SET(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); System.getProperties().put(LogToFile.TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES, Integer.toString(1997+15)); page.update(rh, bigrow.getRow(), (FormatableBitSet) null); //////////////////////////////////////////////////////// REPORT("badlog test7: cid = " + cid + " numcol " + numcol); register(key(7,1), cid); register(key(7,2), numcol); } finally { SanityManager.DEBUG_CLEAR(LogToFile.TEST_LOG_INCOMPLETE_LOG_WRITE); } }
param = PropertyUtil.getSystemProperty(TEST_BAD_CHECKSUM_LOG); checksumTest = Boolean.valueOf(param).booleanValue(); if(checksumTest) { infoPath = "extinout/T_RecoverBadChecksumLog.info"; testService = "BadChecksumLogTest"; }
private void getConfig() { String param; param = PropertyUtil.getSystemProperty(TEST_BADLOG_SETUP); setup = Boolean.valueOf(param).booleanValue(); param = PropertyUtil.getSystemProperty(TEST_BADLOG1); test1 = Boolean.valueOf(param).booleanValue(); param = PropertyUtil.getSystemProperty(TEST_BADLOG2); test2 = Boolean.valueOf(param).booleanValue(); param = PropertyUtil.getSystemProperty(TEST_BADLOG3); test3 = Boolean.valueOf(param).booleanValue(); param = PropertyUtil.getSystemProperty(TEST_BADLOG4); test4 = Boolean.valueOf(param).booleanValue(); param = PropertyUtil.getSystemProperty(TEST_BADLOG5); test5 = Boolean.valueOf(param).booleanValue(); param = PropertyUtil.getSystemProperty(TEST_BADLOG6); test6 = Boolean.valueOf(param).booleanValue(); param = PropertyUtil.getSystemProperty(TEST_BADLOG7); test7 = Boolean.valueOf(param).booleanValue(); }
REPORT("\n\t\tRunning bad log test 1");
REPORT(message + " 1");
public void runTests() throws T_Fail { getConfig(); int tests = 0; if (setup) tests++; if (test1) tests++; if (test2) tests++; if (test3) tests++; if (test4) tests++; if (test5) tests++; if (test6) tests++; if (test7) tests++; if (tests != 1) throw T_Fail.testFailMsg("One & only one of the bad log recovery test should be run"); if (!SanityManager.DEBUG) { REPORT("recoverBadLog cannot be run on an insane server"); return; } try { contextService = ContextService.getFactory(); File ifile = new File(infoPath); // // no checkpoint log record in any of the log files - unless this value // is reset. LogToFile.TEST_LOG_SWITCH_LOG // this will cause recovery to switch log without checkpointing // SanityManager.DEBUG_SET(LogToFile.TEST_LOG_SWITCH_LOG); // don't want background checkpoint process to be running SanityManager.DEBUG_SET(DaemonService.DaemonOff); // see if we are testing encryption startParams = T_Util.setEncryptionParam(startParams); if (setup) // the first test cleans up and start from fresh { // remove the service directory to ensure a clean run REPORT("_______________________________________________________"); REPORT("\n\t\tcleaning up database for recovering from bad logs"); REPORT("_______________________________________________________"); // don't automatic boot this service if it gets left around if (startParams == null) startParams = new Properties(); startParams.put(Property.NO_AUTO_BOOT, Boolean.TRUE.toString()); // remove the service directory to ensure a clean run startParams.put(Property.DELETE_ON_CREATE, Boolean.TRUE.toString()); factory = (RawStoreFactory) Monitor.createPersistentService(getModuleToTestProtocolName(), testService, startParams); // create a database with nothing // delete the info file if (ifile.exists()) ifile.delete(); return; // don't run anything now } else // not setup, recover it { REPORT("_______________________________________________________"); if (test1) REPORT("\n\t\tRunning bad log test 1"); if (test2) REPORT("\n\t\tRunning bad log test 2"); if (test3) REPORT("\n\t\tRunning bad log test 3"); if (test4) REPORT("\n\t\tRunning bad log test 4"); if (test5) REPORT("\n\t\tRunning bad log test 5"); if (test6) REPORT("\n\t\tRunning bad log test 6"); if (test7) REPORT("\n\t\tRunning bad log test 7"); REPORT("_______________________________________________________"); //if external input output files does not exist ,create one File ifdir = new File("extinout"); if(!ifdir.exists()) ifdir.mkdirs(); try { // make sure it does exist infofile = new RandomAccessFile(ifile, "rw"); } catch (IOException ioe) { System.out.println("Cannot write to temporary file " + infoPath + ". Please make sure it is correct, if not, please set the property " + "TestBadLogInfo=<where temp files should go>"); throw T_Fail.exceptionFail(ioe); } if (!Monitor.startPersistentService(testService, startParams)) throw T_Fail.testFailMsg("Monitor didn't know how to restart service: " + testService); factory = (RawStoreFactory) Monitor.findService(getModuleToTestProtocolName(), testService); } } catch (StandardException mse) { throw T_Fail.exceptionFail(mse); } if (factory == null) { throw T_Fail.testFailMsg(getModuleToTestProtocolName() + " service not started."); } lf = factory.getLockFactory(); if (lf == null) { throw T_Fail.testFailMsg("LockFactory.MODULE not found"); } // get a utility helper t_util = new T_Util(factory, lf, contextService); try { // these tests can be run in any order RTest1(); RTest2(); RTest3(); RTest4(); RTest5(); RTest6(); RTest7(); if (test1) STest1(); if (test2) STest2(); if (test3) STest3(); if (test4) STest4(); if(test5) STest5(); if(test6) STest6(); if(test7) STest7(); if (infofile != null) infofile.close(); } catch (StandardException se) { throw T_Fail.exceptionFail(se); } catch (IOException ioe) { throw T_Fail.exceptionFail(ioe); } }
REPORT("\n\t\tRunning bad log test 2");
REPORT(message + " 2");
public void runTests() throws T_Fail { getConfig(); int tests = 0; if (setup) tests++; if (test1) tests++; if (test2) tests++; if (test3) tests++; if (test4) tests++; if (test5) tests++; if (test6) tests++; if (test7) tests++; if (tests != 1) throw T_Fail.testFailMsg("One & only one of the bad log recovery test should be run"); if (!SanityManager.DEBUG) { REPORT("recoverBadLog cannot be run on an insane server"); return; } try { contextService = ContextService.getFactory(); File ifile = new File(infoPath); // // no checkpoint log record in any of the log files - unless this value // is reset. LogToFile.TEST_LOG_SWITCH_LOG // this will cause recovery to switch log without checkpointing // SanityManager.DEBUG_SET(LogToFile.TEST_LOG_SWITCH_LOG); // don't want background checkpoint process to be running SanityManager.DEBUG_SET(DaemonService.DaemonOff); // see if we are testing encryption startParams = T_Util.setEncryptionParam(startParams); if (setup) // the first test cleans up and start from fresh { // remove the service directory to ensure a clean run REPORT("_______________________________________________________"); REPORT("\n\t\tcleaning up database for recovering from bad logs"); REPORT("_______________________________________________________"); // don't automatic boot this service if it gets left around if (startParams == null) startParams = new Properties(); startParams.put(Property.NO_AUTO_BOOT, Boolean.TRUE.toString()); // remove the service directory to ensure a clean run startParams.put(Property.DELETE_ON_CREATE, Boolean.TRUE.toString()); factory = (RawStoreFactory) Monitor.createPersistentService(getModuleToTestProtocolName(), testService, startParams); // create a database with nothing // delete the info file if (ifile.exists()) ifile.delete(); return; // don't run anything now } else // not setup, recover it { REPORT("_______________________________________________________"); if (test1) REPORT("\n\t\tRunning bad log test 1"); if (test2) REPORT("\n\t\tRunning bad log test 2"); if (test3) REPORT("\n\t\tRunning bad log test 3"); if (test4) REPORT("\n\t\tRunning bad log test 4"); if (test5) REPORT("\n\t\tRunning bad log test 5"); if (test6) REPORT("\n\t\tRunning bad log test 6"); if (test7) REPORT("\n\t\tRunning bad log test 7"); REPORT("_______________________________________________________"); //if external input output files does not exist ,create one File ifdir = new File("extinout"); if(!ifdir.exists()) ifdir.mkdirs(); try { // make sure it does exist infofile = new RandomAccessFile(ifile, "rw"); } catch (IOException ioe) { System.out.println("Cannot write to temporary file " + infoPath + ". Please make sure it is correct, if not, please set the property " + "TestBadLogInfo=<where temp files should go>"); throw T_Fail.exceptionFail(ioe); } if (!Monitor.startPersistentService(testService, startParams)) throw T_Fail.testFailMsg("Monitor didn't know how to restart service: " + testService); factory = (RawStoreFactory) Monitor.findService(getModuleToTestProtocolName(), testService); } } catch (StandardException mse) { throw T_Fail.exceptionFail(mse); } if (factory == null) { throw T_Fail.testFailMsg(getModuleToTestProtocolName() + " service not started."); } lf = factory.getLockFactory(); if (lf == null) { throw T_Fail.testFailMsg("LockFactory.MODULE not found"); } // get a utility helper t_util = new T_Util(factory, lf, contextService); try { // these tests can be run in any order RTest1(); RTest2(); RTest3(); RTest4(); RTest5(); RTest6(); RTest7(); if (test1) STest1(); if (test2) STest2(); if (test3) STest3(); if (test4) STest4(); if(test5) STest5(); if(test6) STest6(); if(test7) STest7(); if (infofile != null) infofile.close(); } catch (StandardException se) { throw T_Fail.exceptionFail(se); } catch (IOException ioe) { throw T_Fail.exceptionFail(ioe); } }
REPORT("\n\t\tRunning bad log test 3");
REPORT(message + " 3");
public void runTests() throws T_Fail { getConfig(); int tests = 0; if (setup) tests++; if (test1) tests++; if (test2) tests++; if (test3) tests++; if (test4) tests++; if (test5) tests++; if (test6) tests++; if (test7) tests++; if (tests != 1) throw T_Fail.testFailMsg("One & only one of the bad log recovery test should be run"); if (!SanityManager.DEBUG) { REPORT("recoverBadLog cannot be run on an insane server"); return; } try { contextService = ContextService.getFactory(); File ifile = new File(infoPath); // // no checkpoint log record in any of the log files - unless this value // is reset. LogToFile.TEST_LOG_SWITCH_LOG // this will cause recovery to switch log without checkpointing // SanityManager.DEBUG_SET(LogToFile.TEST_LOG_SWITCH_LOG); // don't want background checkpoint process to be running SanityManager.DEBUG_SET(DaemonService.DaemonOff); // see if we are testing encryption startParams = T_Util.setEncryptionParam(startParams); if (setup) // the first test cleans up and start from fresh { // remove the service directory to ensure a clean run REPORT("_______________________________________________________"); REPORT("\n\t\tcleaning up database for recovering from bad logs"); REPORT("_______________________________________________________"); // don't automatic boot this service if it gets left around if (startParams == null) startParams = new Properties(); startParams.put(Property.NO_AUTO_BOOT, Boolean.TRUE.toString()); // remove the service directory to ensure a clean run startParams.put(Property.DELETE_ON_CREATE, Boolean.TRUE.toString()); factory = (RawStoreFactory) Monitor.createPersistentService(getModuleToTestProtocolName(), testService, startParams); // create a database with nothing // delete the info file if (ifile.exists()) ifile.delete(); return; // don't run anything now } else // not setup, recover it { REPORT("_______________________________________________________"); if (test1) REPORT("\n\t\tRunning bad log test 1"); if (test2) REPORT("\n\t\tRunning bad log test 2"); if (test3) REPORT("\n\t\tRunning bad log test 3"); if (test4) REPORT("\n\t\tRunning bad log test 4"); if (test5) REPORT("\n\t\tRunning bad log test 5"); if (test6) REPORT("\n\t\tRunning bad log test 6"); if (test7) REPORT("\n\t\tRunning bad log test 7"); REPORT("_______________________________________________________"); //if external input output files does not exist ,create one File ifdir = new File("extinout"); if(!ifdir.exists()) ifdir.mkdirs(); try { // make sure it does exist infofile = new RandomAccessFile(ifile, "rw"); } catch (IOException ioe) { System.out.println("Cannot write to temporary file " + infoPath + ". Please make sure it is correct, if not, please set the property " + "TestBadLogInfo=<where temp files should go>"); throw T_Fail.exceptionFail(ioe); } if (!Monitor.startPersistentService(testService, startParams)) throw T_Fail.testFailMsg("Monitor didn't know how to restart service: " + testService); factory = (RawStoreFactory) Monitor.findService(getModuleToTestProtocolName(), testService); } } catch (StandardException mse) { throw T_Fail.exceptionFail(mse); } if (factory == null) { throw T_Fail.testFailMsg(getModuleToTestProtocolName() + " service not started."); } lf = factory.getLockFactory(); if (lf == null) { throw T_Fail.testFailMsg("LockFactory.MODULE not found"); } // get a utility helper t_util = new T_Util(factory, lf, contextService); try { // these tests can be run in any order RTest1(); RTest2(); RTest3(); RTest4(); RTest5(); RTest6(); RTest7(); if (test1) STest1(); if (test2) STest2(); if (test3) STest3(); if (test4) STest4(); if(test5) STest5(); if(test6) STest6(); if(test7) STest7(); if (infofile != null) infofile.close(); } catch (StandardException se) { throw T_Fail.exceptionFail(se); } catch (IOException ioe) { throw T_Fail.exceptionFail(ioe); } }
REPORT("\n\t\tRunning bad log test 4");
REPORT(message + " 4");
public void runTests() throws T_Fail { getConfig(); int tests = 0; if (setup) tests++; if (test1) tests++; if (test2) tests++; if (test3) tests++; if (test4) tests++; if (test5) tests++; if (test6) tests++; if (test7) tests++; if (tests != 1) throw T_Fail.testFailMsg("One & only one of the bad log recovery test should be run"); if (!SanityManager.DEBUG) { REPORT("recoverBadLog cannot be run on an insane server"); return; } try { contextService = ContextService.getFactory(); File ifile = new File(infoPath); // // no checkpoint log record in any of the log files - unless this value // is reset. LogToFile.TEST_LOG_SWITCH_LOG // this will cause recovery to switch log without checkpointing // SanityManager.DEBUG_SET(LogToFile.TEST_LOG_SWITCH_LOG); // don't want background checkpoint process to be running SanityManager.DEBUG_SET(DaemonService.DaemonOff); // see if we are testing encryption startParams = T_Util.setEncryptionParam(startParams); if (setup) // the first test cleans up and start from fresh { // remove the service directory to ensure a clean run REPORT("_______________________________________________________"); REPORT("\n\t\tcleaning up database for recovering from bad logs"); REPORT("_______________________________________________________"); // don't automatic boot this service if it gets left around if (startParams == null) startParams = new Properties(); startParams.put(Property.NO_AUTO_BOOT, Boolean.TRUE.toString()); // remove the service directory to ensure a clean run startParams.put(Property.DELETE_ON_CREATE, Boolean.TRUE.toString()); factory = (RawStoreFactory) Monitor.createPersistentService(getModuleToTestProtocolName(), testService, startParams); // create a database with nothing // delete the info file if (ifile.exists()) ifile.delete(); return; // don't run anything now } else // not setup, recover it { REPORT("_______________________________________________________"); if (test1) REPORT("\n\t\tRunning bad log test 1"); if (test2) REPORT("\n\t\tRunning bad log test 2"); if (test3) REPORT("\n\t\tRunning bad log test 3"); if (test4) REPORT("\n\t\tRunning bad log test 4"); if (test5) REPORT("\n\t\tRunning bad log test 5"); if (test6) REPORT("\n\t\tRunning bad log test 6"); if (test7) REPORT("\n\t\tRunning bad log test 7"); REPORT("_______________________________________________________"); //if external input output files does not exist ,create one File ifdir = new File("extinout"); if(!ifdir.exists()) ifdir.mkdirs(); try { // make sure it does exist infofile = new RandomAccessFile(ifile, "rw"); } catch (IOException ioe) { System.out.println("Cannot write to temporary file " + infoPath + ". Please make sure it is correct, if not, please set the property " + "TestBadLogInfo=<where temp files should go>"); throw T_Fail.exceptionFail(ioe); } if (!Monitor.startPersistentService(testService, startParams)) throw T_Fail.testFailMsg("Monitor didn't know how to restart service: " + testService); factory = (RawStoreFactory) Monitor.findService(getModuleToTestProtocolName(), testService); } } catch (StandardException mse) { throw T_Fail.exceptionFail(mse); } if (factory == null) { throw T_Fail.testFailMsg(getModuleToTestProtocolName() + " service not started."); } lf = factory.getLockFactory(); if (lf == null) { throw T_Fail.testFailMsg("LockFactory.MODULE not found"); } // get a utility helper t_util = new T_Util(factory, lf, contextService); try { // these tests can be run in any order RTest1(); RTest2(); RTest3(); RTest4(); RTest5(); RTest6(); RTest7(); if (test1) STest1(); if (test2) STest2(); if (test3) STest3(); if (test4) STest4(); if(test5) STest5(); if(test6) STest6(); if(test7) STest7(); if (infofile != null) infofile.close(); } catch (StandardException se) { throw T_Fail.exceptionFail(se); } catch (IOException ioe) { throw T_Fail.exceptionFail(ioe); } }
REPORT("\n\t\tRunning bad log test 5");
REPORT(message + " 5");
public void runTests() throws T_Fail { getConfig(); int tests = 0; if (setup) tests++; if (test1) tests++; if (test2) tests++; if (test3) tests++; if (test4) tests++; if (test5) tests++; if (test6) tests++; if (test7) tests++; if (tests != 1) throw T_Fail.testFailMsg("One & only one of the bad log recovery test should be run"); if (!SanityManager.DEBUG) { REPORT("recoverBadLog cannot be run on an insane server"); return; } try { contextService = ContextService.getFactory(); File ifile = new File(infoPath); // // no checkpoint log record in any of the log files - unless this value // is reset. LogToFile.TEST_LOG_SWITCH_LOG // this will cause recovery to switch log without checkpointing // SanityManager.DEBUG_SET(LogToFile.TEST_LOG_SWITCH_LOG); // don't want background checkpoint process to be running SanityManager.DEBUG_SET(DaemonService.DaemonOff); // see if we are testing encryption startParams = T_Util.setEncryptionParam(startParams); if (setup) // the first test cleans up and start from fresh { // remove the service directory to ensure a clean run REPORT("_______________________________________________________"); REPORT("\n\t\tcleaning up database for recovering from bad logs"); REPORT("_______________________________________________________"); // don't automatic boot this service if it gets left around if (startParams == null) startParams = new Properties(); startParams.put(Property.NO_AUTO_BOOT, Boolean.TRUE.toString()); // remove the service directory to ensure a clean run startParams.put(Property.DELETE_ON_CREATE, Boolean.TRUE.toString()); factory = (RawStoreFactory) Monitor.createPersistentService(getModuleToTestProtocolName(), testService, startParams); // create a database with nothing // delete the info file if (ifile.exists()) ifile.delete(); return; // don't run anything now } else // not setup, recover it { REPORT("_______________________________________________________"); if (test1) REPORT("\n\t\tRunning bad log test 1"); if (test2) REPORT("\n\t\tRunning bad log test 2"); if (test3) REPORT("\n\t\tRunning bad log test 3"); if (test4) REPORT("\n\t\tRunning bad log test 4"); if (test5) REPORT("\n\t\tRunning bad log test 5"); if (test6) REPORT("\n\t\tRunning bad log test 6"); if (test7) REPORT("\n\t\tRunning bad log test 7"); REPORT("_______________________________________________________"); //if external input output files does not exist ,create one File ifdir = new File("extinout"); if(!ifdir.exists()) ifdir.mkdirs(); try { // make sure it does exist infofile = new RandomAccessFile(ifile, "rw"); } catch (IOException ioe) { System.out.println("Cannot write to temporary file " + infoPath + ". Please make sure it is correct, if not, please set the property " + "TestBadLogInfo=<where temp files should go>"); throw T_Fail.exceptionFail(ioe); } if (!Monitor.startPersistentService(testService, startParams)) throw T_Fail.testFailMsg("Monitor didn't know how to restart service: " + testService); factory = (RawStoreFactory) Monitor.findService(getModuleToTestProtocolName(), testService); } } catch (StandardException mse) { throw T_Fail.exceptionFail(mse); } if (factory == null) { throw T_Fail.testFailMsg(getModuleToTestProtocolName() + " service not started."); } lf = factory.getLockFactory(); if (lf == null) { throw T_Fail.testFailMsg("LockFactory.MODULE not found"); } // get a utility helper t_util = new T_Util(factory, lf, contextService); try { // these tests can be run in any order RTest1(); RTest2(); RTest3(); RTest4(); RTest5(); RTest6(); RTest7(); if (test1) STest1(); if (test2) STest2(); if (test3) STest3(); if (test4) STest4(); if(test5) STest5(); if(test6) STest6(); if(test7) STest7(); if (infofile != null) infofile.close(); } catch (StandardException se) { throw T_Fail.exceptionFail(se); } catch (IOException ioe) { throw T_Fail.exceptionFail(ioe); } }
REPORT("\n\t\tRunning bad log test 6");
REPORT(message + " 6");
public void runTests() throws T_Fail { getConfig(); int tests = 0; if (setup) tests++; if (test1) tests++; if (test2) tests++; if (test3) tests++; if (test4) tests++; if (test5) tests++; if (test6) tests++; if (test7) tests++; if (tests != 1) throw T_Fail.testFailMsg("One & only one of the bad log recovery test should be run"); if (!SanityManager.DEBUG) { REPORT("recoverBadLog cannot be run on an insane server"); return; } try { contextService = ContextService.getFactory(); File ifile = new File(infoPath); // // no checkpoint log record in any of the log files - unless this value // is reset. LogToFile.TEST_LOG_SWITCH_LOG // this will cause recovery to switch log without checkpointing // SanityManager.DEBUG_SET(LogToFile.TEST_LOG_SWITCH_LOG); // don't want background checkpoint process to be running SanityManager.DEBUG_SET(DaemonService.DaemonOff); // see if we are testing encryption startParams = T_Util.setEncryptionParam(startParams); if (setup) // the first test cleans up and start from fresh { // remove the service directory to ensure a clean run REPORT("_______________________________________________________"); REPORT("\n\t\tcleaning up database for recovering from bad logs"); REPORT("_______________________________________________________"); // don't automatic boot this service if it gets left around if (startParams == null) startParams = new Properties(); startParams.put(Property.NO_AUTO_BOOT, Boolean.TRUE.toString()); // remove the service directory to ensure a clean run startParams.put(Property.DELETE_ON_CREATE, Boolean.TRUE.toString()); factory = (RawStoreFactory) Monitor.createPersistentService(getModuleToTestProtocolName(), testService, startParams); // create a database with nothing // delete the info file if (ifile.exists()) ifile.delete(); return; // don't run anything now } else // not setup, recover it { REPORT("_______________________________________________________"); if (test1) REPORT("\n\t\tRunning bad log test 1"); if (test2) REPORT("\n\t\tRunning bad log test 2"); if (test3) REPORT("\n\t\tRunning bad log test 3"); if (test4) REPORT("\n\t\tRunning bad log test 4"); if (test5) REPORT("\n\t\tRunning bad log test 5"); if (test6) REPORT("\n\t\tRunning bad log test 6"); if (test7) REPORT("\n\t\tRunning bad log test 7"); REPORT("_______________________________________________________"); //if external input output files does not exist ,create one File ifdir = new File("extinout"); if(!ifdir.exists()) ifdir.mkdirs(); try { // make sure it does exist infofile = new RandomAccessFile(ifile, "rw"); } catch (IOException ioe) { System.out.println("Cannot write to temporary file " + infoPath + ". Please make sure it is correct, if not, please set the property " + "TestBadLogInfo=<where temp files should go>"); throw T_Fail.exceptionFail(ioe); } if (!Monitor.startPersistentService(testService, startParams)) throw T_Fail.testFailMsg("Monitor didn't know how to restart service: " + testService); factory = (RawStoreFactory) Monitor.findService(getModuleToTestProtocolName(), testService); } } catch (StandardException mse) { throw T_Fail.exceptionFail(mse); } if (factory == null) { throw T_Fail.testFailMsg(getModuleToTestProtocolName() + " service not started."); } lf = factory.getLockFactory(); if (lf == null) { throw T_Fail.testFailMsg("LockFactory.MODULE not found"); } // get a utility helper t_util = new T_Util(factory, lf, contextService); try { // these tests can be run in any order RTest1(); RTest2(); RTest3(); RTest4(); RTest5(); RTest6(); RTest7(); if (test1) STest1(); if (test2) STest2(); if (test3) STest3(); if (test4) STest4(); if(test5) STest5(); if(test6) STest6(); if(test7) STest7(); if (infofile != null) infofile.close(); } catch (StandardException se) { throw T_Fail.exceptionFail(se); } catch (IOException ioe) { throw T_Fail.exceptionFail(ioe); } }
REPORT("\n\t\tRunning bad log test 7");
REPORT(message + " 7");
public void runTests() throws T_Fail { getConfig(); int tests = 0; if (setup) tests++; if (test1) tests++; if (test2) tests++; if (test3) tests++; if (test4) tests++; if (test5) tests++; if (test6) tests++; if (test7) tests++; if (tests != 1) throw T_Fail.testFailMsg("One & only one of the bad log recovery test should be run"); if (!SanityManager.DEBUG) { REPORT("recoverBadLog cannot be run on an insane server"); return; } try { contextService = ContextService.getFactory(); File ifile = new File(infoPath); // // no checkpoint log record in any of the log files - unless this value // is reset. LogToFile.TEST_LOG_SWITCH_LOG // this will cause recovery to switch log without checkpointing // SanityManager.DEBUG_SET(LogToFile.TEST_LOG_SWITCH_LOG); // don't want background checkpoint process to be running SanityManager.DEBUG_SET(DaemonService.DaemonOff); // see if we are testing encryption startParams = T_Util.setEncryptionParam(startParams); if (setup) // the first test cleans up and start from fresh { // remove the service directory to ensure a clean run REPORT("_______________________________________________________"); REPORT("\n\t\tcleaning up database for recovering from bad logs"); REPORT("_______________________________________________________"); // don't automatic boot this service if it gets left around if (startParams == null) startParams = new Properties(); startParams.put(Property.NO_AUTO_BOOT, Boolean.TRUE.toString()); // remove the service directory to ensure a clean run startParams.put(Property.DELETE_ON_CREATE, Boolean.TRUE.toString()); factory = (RawStoreFactory) Monitor.createPersistentService(getModuleToTestProtocolName(), testService, startParams); // create a database with nothing // delete the info file if (ifile.exists()) ifile.delete(); return; // don't run anything now } else // not setup, recover it { REPORT("_______________________________________________________"); if (test1) REPORT("\n\t\tRunning bad log test 1"); if (test2) REPORT("\n\t\tRunning bad log test 2"); if (test3) REPORT("\n\t\tRunning bad log test 3"); if (test4) REPORT("\n\t\tRunning bad log test 4"); if (test5) REPORT("\n\t\tRunning bad log test 5"); if (test6) REPORT("\n\t\tRunning bad log test 6"); if (test7) REPORT("\n\t\tRunning bad log test 7"); REPORT("_______________________________________________________"); //if external input output files does not exist ,create one File ifdir = new File("extinout"); if(!ifdir.exists()) ifdir.mkdirs(); try { // make sure it does exist infofile = new RandomAccessFile(ifile, "rw"); } catch (IOException ioe) { System.out.println("Cannot write to temporary file " + infoPath + ". Please make sure it is correct, if not, please set the property " + "TestBadLogInfo=<where temp files should go>"); throw T_Fail.exceptionFail(ioe); } if (!Monitor.startPersistentService(testService, startParams)) throw T_Fail.testFailMsg("Monitor didn't know how to restart service: " + testService); factory = (RawStoreFactory) Monitor.findService(getModuleToTestProtocolName(), testService); } } catch (StandardException mse) { throw T_Fail.exceptionFail(mse); } if (factory == null) { throw T_Fail.testFailMsg(getModuleToTestProtocolName() + " service not started."); } lf = factory.getLockFactory(); if (lf == null) { throw T_Fail.testFailMsg("LockFactory.MODULE not found"); } // get a utility helper t_util = new T_Util(factory, lf, contextService); try { // these tests can be run in any order RTest1(); RTest2(); RTest3(); RTest4(); RTest5(); RTest6(); RTest7(); if (test1) STest1(); if (test2) STest2(); if (test3) STest3(); if (test4) STest4(); if(test5) STest5(); if(test6) STest6(); if(test7) STest7(); if (infofile != null) infofile.close(); } catch (StandardException se) { throw T_Fail.exceptionFail(se); } catch (IOException ioe) { throw T_Fail.exceptionFail(ioe); } }
logFactory =(LogToFile) Monitor.findServiceModule(factory, factory.getLogFactoryModule());
public void runTests() throws T_Fail { getConfig(); int tests = 0; if (setup) tests++; if (test1) tests++; if (test2) tests++; if (test3) tests++; if (test4) tests++; if (test5) tests++; if (test6) tests++; if (test7) tests++; if (tests != 1) throw T_Fail.testFailMsg("One & only one of the bad log recovery test should be run"); if (!SanityManager.DEBUG) { REPORT("recoverBadLog cannot be run on an insane server"); return; } try { contextService = ContextService.getFactory(); File ifile = new File(infoPath); // // no checkpoint log record in any of the log files - unless this value // is reset. LogToFile.TEST_LOG_SWITCH_LOG // this will cause recovery to switch log without checkpointing // SanityManager.DEBUG_SET(LogToFile.TEST_LOG_SWITCH_LOG); // don't want background checkpoint process to be running SanityManager.DEBUG_SET(DaemonService.DaemonOff); // see if we are testing encryption startParams = T_Util.setEncryptionParam(startParams); if (setup) // the first test cleans up and start from fresh { // remove the service directory to ensure a clean run REPORT("_______________________________________________________"); REPORT("\n\t\tcleaning up database for recovering from bad logs"); REPORT("_______________________________________________________"); // don't automatic boot this service if it gets left around if (startParams == null) startParams = new Properties(); startParams.put(Property.NO_AUTO_BOOT, Boolean.TRUE.toString()); // remove the service directory to ensure a clean run startParams.put(Property.DELETE_ON_CREATE, Boolean.TRUE.toString()); factory = (RawStoreFactory) Monitor.createPersistentService(getModuleToTestProtocolName(), testService, startParams); // create a database with nothing // delete the info file if (ifile.exists()) ifile.delete(); return; // don't run anything now } else // not setup, recover it { REPORT("_______________________________________________________"); if (test1) REPORT("\n\t\tRunning bad log test 1"); if (test2) REPORT("\n\t\tRunning bad log test 2"); if (test3) REPORT("\n\t\tRunning bad log test 3"); if (test4) REPORT("\n\t\tRunning bad log test 4"); if (test5) REPORT("\n\t\tRunning bad log test 5"); if (test6) REPORT("\n\t\tRunning bad log test 6"); if (test7) REPORT("\n\t\tRunning bad log test 7"); REPORT("_______________________________________________________"); //if external input output files does not exist ,create one File ifdir = new File("extinout"); if(!ifdir.exists()) ifdir.mkdirs(); try { // make sure it does exist infofile = new RandomAccessFile(ifile, "rw"); } catch (IOException ioe) { System.out.println("Cannot write to temporary file " + infoPath + ". Please make sure it is correct, if not, please set the property " + "TestBadLogInfo=<where temp files should go>"); throw T_Fail.exceptionFail(ioe); } if (!Monitor.startPersistentService(testService, startParams)) throw T_Fail.testFailMsg("Monitor didn't know how to restart service: " + testService); factory = (RawStoreFactory) Monitor.findService(getModuleToTestProtocolName(), testService); } } catch (StandardException mse) { throw T_Fail.exceptionFail(mse); } if (factory == null) { throw T_Fail.testFailMsg(getModuleToTestProtocolName() + " service not started."); } lf = factory.getLockFactory(); if (lf == null) { throw T_Fail.testFailMsg("LockFactory.MODULE not found"); } // get a utility helper t_util = new T_Util(factory, lf, contextService); try { // these tests can be run in any order RTest1(); RTest2(); RTest3(); RTest4(); RTest5(); RTest6(); RTest7(); if (test1) STest1(); if (test2) STest2(); if (test3) STest3(); if (test4) STest4(); if(test5) STest5(); if(test6) STest6(); if(test7) STest7(); if (infofile != null) infofile.close(); } catch (StandardException se) { throw T_Fail.exceptionFail(se); } catch (IOException ioe) { throw T_Fail.exceptionFail(ioe); } }
if ( ! (rc.getExpression().isParameterNode()))
if ( ! (rc.getExpression().requiresTypeFromContext()))
int getParamColumnTypes(DataTypeDescriptor[] types, RowResultSetNode rrsn) { int numTypes = 0; /* Look for columns where we have not found a non-? yet. */ for (int i = 0; i < types.length; i++) { if (types[i] == null) { ResultColumn rc = (ResultColumn) rrsn.getResultColumns().elementAt(i); if ( ! (rc.getExpression().isParameterNode())) { types[i] = rc.getExpressionType(); numTypes++; } } } return numTypes; }
if (rc.getExpression().isParameterNode())
if (rc.getExpression().requiresTypeFromContext())
void setParamColumnTypes(DataTypeDescriptor[] types, RowResultSetNode rrsn) throws StandardException { /* ** Look for ? parameters in the result column list ** of each RowResultSetNode */ ResultColumnList rrcl = rrsn.getResultColumns(); int rrclSize = rrcl.size(); for (int index = 0; index < rrclSize; index++) { ResultColumn rc = (ResultColumn) rrcl.elementAt(index); if (rc.getExpression().isParameterNode()) { /* ** We found a ? - set its type to the type from the ** type array. */ ((ParameterNode) rc.getExpression()).setDescriptor( types[index]); } } }
((ParameterNode) rc.getExpression()).setDescriptor( types[index]);
rc.getExpression().setType(types[index]);
void setParamColumnTypes(DataTypeDescriptor[] types, RowResultSetNode rrsn) throws StandardException { /* ** Look for ? parameters in the result column list ** of each RowResultSetNode */ ResultColumnList rrcl = rrsn.getResultColumns(); int rrclSize = rrcl.size(); for (int index = 0; index < rrclSize; index++) { ResultColumn rc = (ResultColumn) rrcl.elementAt(index); if (rc.getExpression().isParameterNode()) { /* ** We found a ? - set its type to the type from the ** type array. */ ((ParameterNode) rc.getExpression()).setDescriptor( types[index]); } } }
if (methodParms != null) optimizeDomainValueConversion();
public JavaValueNode bindExpression( FromList fromList, SubqueryList subqueryList, Vector aggregateVector) throws StandardException { // for a function we can get called recursively if (alreadyBound) return this; bindParameters(fromList, subqueryList, aggregateVector); /* If javaClassName is null then we assume that the current methodName * is an alias and we must go to sysmethods to * get the real method and java class names for this alias. */ if (javaClassName == null) { CompilerContext cc = getCompilerContext(); // look for a routine if (ad == null) { String schemaName = procedureName != null ? procedureName.getSchemaName() : null; boolean noSchema = schemaName == null; SchemaDescriptor sd = getSchemaDescriptor(schemaName, schemaName != null); resolveRoutine(fromList, subqueryList, aggregateVector, sd); if (ad == null && noSchema && !forCallStatement) { // Resolve to a built-in SYSFUN function but only // if this is a function call and the call // was not qualified. E.g. COS(angle). The // SYSFUN functions are not in SYSALIASES but // an in-memory table, set up in DataDictioanryImpl. sd = getSchemaDescriptor("SYSFUN", true); resolveRoutine(fromList, subqueryList, aggregateVector, sd); } } /* Throw exception if no alias found */ if (ad == null) { Object errName; if (procedureName == null) errName = methodName; else errName = procedureName; throw StandardException.newException(SQLState.LANG_NO_SUCH_METHOD_ALIAS, errName); } /* Query is dependent on the AliasDescriptor */ cc.createDependency(ad); methodName = ad.getAliasInfo().getMethodName(); javaClassName = ad.getJavaClassName(); } javaClassName = verifyClassExist(javaClassName, true); /* Resolve the method call */ resolveMethodCall(javaClassName, true); alreadyBound = true; // If this is a function call with a variable length // return type, then we need to push a CAST node. if (routineInfo != null) { TypeDescriptor returnType = routineInfo.getReturnType(); if (returnType != null) { TypeId returnTypeId = TypeId.getBuiltInTypeId(returnType.getJDBCTypeId()); if (returnTypeId.variableLength()) { // Cast the return using a cast node, but have to go // into the SQL domain, and back to the Java domain. DataTypeDescriptor returnValueDtd = new DataTypeDescriptor( returnTypeId, returnType.getPrecision(), returnType.getScale(), returnType.isNullable(), returnType.getMaximumWidth() ); ValueNode returnValueToSQL = (ValueNode) getNodeFactory().getNode( C_NodeTypes.JAVA_TO_SQL_VALUE_NODE, this, getContextManager()); ValueNode returnValueCastNode = (ValueNode) getNodeFactory().getNode( C_NodeTypes.CAST_NODE, returnValueToSQL, returnValueDtd, getContextManager()); JavaValueNode returnValueToJava = (JavaValueNode) getNodeFactory().getNode( C_NodeTypes.SQL_TO_JAVA_VALUE_NODE, returnValueCastNode, getContextManager()); return returnValueToJava.bindExpression(fromList, subqueryList, aggregateVector); } } } getCompilerContext().addRequiredRoutinePriv(ad); return this; }
return new TestSuite(RowIdNotImplementedTest.class, "RowIdNotImplementedTest suite");
return TestConfiguration.defaultSuite(RowIdNotImplementedTest.class);
public static Test suite() { return new TestSuite(RowIdNotImplementedTest.class, "RowIdNotImplementedTest suite"); }
TransactionController.MODE_RECORD,
TransactionController.MODE_TABLE,
public void compressConglomerate( TransactionManager xact_manager, Transaction rawtran) throws StandardException { OpenConglomerate open_conglom = null; HeapController heapcontroller = null; try { open_conglom = new OpenHeap(); // Open table in intended exclusive mode in the top level // transaction, this will stop any ddl from happening until // purge of whole table is finished. if (open_conglom.init( (ContainerHandle) null, this, this.format_ids, xact_manager, rawtran, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, rawtran.newLockingPolicy( LockingPolicy.MODE_RECORD, TransactionController.ISOLATION_REPEATABLE_READ, true), null) == null) { throw StandardException.newException( SQLState.HEAP_CONTAINER_NOT_FOUND, new Long(id.getContainerId())); } heapcontroller = new HeapController(); heapcontroller.init(open_conglom); open_conglom.getContainer().compressContainer(); } finally { if (open_conglom != null) open_conglom.close(); } return; }
LockingPolicy.MODE_RECORD,
LockingPolicy.MODE_CONTAINER,
public void compressConglomerate( TransactionManager xact_manager, Transaction rawtran) throws StandardException { OpenConglomerate open_conglom = null; HeapController heapcontroller = null; try { open_conglom = new OpenHeap(); // Open table in intended exclusive mode in the top level // transaction, this will stop any ddl from happening until // purge of whole table is finished. if (open_conglom.init( (ContainerHandle) null, this, this.format_ids, xact_manager, rawtran, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, rawtran.newLockingPolicy( LockingPolicy.MODE_RECORD, TransactionController.ISOLATION_REPEATABLE_READ, true), null) == null) { throw StandardException.newException( SQLState.HEAP_CONTAINER_NOT_FOUND, new Long(id.getContainerId())); } heapcontroller = new HeapController(); heapcontroller.init(open_conglom); open_conglom.getContainer().compressContainer(); } finally { if (open_conglom != null) open_conglom.close(); } return; }
public void backup(String backupDir) throws SQLException {
public void backup(String backupDir, boolean wait) throws SQLException {
public void backup(String backupDir) throws SQLException { try { af.backup(backupDir); } catch (StandardException se) { throw PublicAPI.wrapStandardException(se); } }
af.backup(backupDir);
af.backup(backupDir, wait);
public void backup(String backupDir) throws SQLException { try { af.backup(backupDir); } catch (StandardException se) { throw PublicAPI.wrapStandardException(se); } }
public void backupAndEnableLogArchiveMode(String backupDir, boolean deleteOnlineArchivedLogFiles) throws SQLException
public void backupAndEnableLogArchiveMode( String backupDir, boolean deleteOnlineArchivedLogFiles, boolean wait) throws SQLException
public void backupAndEnableLogArchiveMode(String backupDir, boolean deleteOnlineArchivedLogFiles) throws SQLException { try { af.backupAndEnableLogArchiveMode(backupDir, deleteOnlineArchivedLogFiles); } catch (StandardException se) { throw PublicAPI.wrapStandardException(se); } }
af.backupAndEnableLogArchiveMode(backupDir, deleteOnlineArchivedLogFiles);
af.backupAndEnableLogArchiveMode(backupDir, deleteOnlineArchivedLogFiles, wait);
public void backupAndEnableLogArchiveMode(String backupDir, boolean deleteOnlineArchivedLogFiles) throws SQLException { try { af.backupAndEnableLogArchiveMode(backupDir, deleteOnlineArchivedLogFiles); } catch (StandardException se) { throw PublicAPI.wrapStandardException(se); } }
rs = s_i_r.executeQuery("select * from t where 1=0"); rs.afterLast(); if (rs.isAfterLast()) { System.out.println("afterLast() on empty RS should be no-op"); } rs.beforeFirst(); if (rs.isBeforeFirst()) { System.out.println("beforeFirst() on empty RS should be no-op"); } rs.close();
static boolean scrollInsensitivePositive( Connection conn) throws SQLException { boolean passed = true; PreparedStatement ps_i_r = null; PreparedStatement ps_i_u = null; ResultSet rs; SQLWarning warning; Statement s_i_r = null; // insensitive, read only Statement s_i_u = null; // insensitive, updatable s_i_r = conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); // We should not have gotten any warnings // and should have gotten a scroll insensitive cursor warning = conn.getWarnings(); while (warning != null) { System.out.println("unexpected warning = " + warning); warning = warning.getNextWarning(); passed = false; } conn.clearWarnings(); // run a query rs = s_i_r.executeQuery("select * from t"); // verify scroll insensitive and read only if (rs.getType() != ResultSet.TYPE_SCROLL_INSENSITIVE) { System.out.println( "rs.getType() expected to return TYPE_SCROLL_INSENSITIVE, not " + rs.getType()); passed = false; } if (rs.getConcurrency() != ResultSet.CONCUR_READ_ONLY) { System.out.println( "rs.getConcurrency() expected to return CONCUR_READ_ONLY, not " + rs.getConcurrency()); passed = false; } // We should be positioned before the 1st row if (! rs.isBeforeFirst()) { System.out.println("expected to be before the 1st row"); passed = false; } if (rs.absolute(0)) { System.out.println("absolute(0) expected to return false"); passed = false; } if (! rs.isBeforeFirst()) { System.out.println("still expected to be before the 1st row"); passed = false; } // go to first row if (! rs.first()) { System.out.println("expected first() to succeed"); passed = false; } if (rs.getInt(1) != 2) { System.out.println( "rs.getInt(1) expected to return 2, not " + rs.getInt(1)); passed = false; } if (! rs.isFirst()) { System.out.println("expected to be on the 1st row"); passed = false; } // move to before first rs.beforeFirst(); if (! rs.isBeforeFirst()) { System.out.println("expected to be before the 1st row"); passed = false; } // move to last row if (! rs.last()) { System.out.println("expected last() to succeed"); passed = false; } if (! rs.isLast()) { System.out.println("expected to be on the last row"); passed = false; } if (rs.isAfterLast()) { System.out.println("not expected to be after the last row"); passed = false; } if (rs.getInt(1) != 6) { System.out.println( "rs.getInt(1) expected to return 6, not " + rs.getInt(1)); passed = false; } if (rs.next()) { System.out.println("not expected to find another row"); passed = false; } if (! rs.isAfterLast()) { System.out.println("expected to be after the last row"); passed = false; } // We're after the last row, verify that only isAfterLast() // returns true if (rs.isLast()) { System.out.println("not expected to be on the last row"); passed = false; } if (rs.isFirst()) { System.out.println("not expected to be on the first row"); passed = false; } if (rs.isBeforeFirst()) { System.out.println("not expected to be before the first row"); passed = false; } // get/setFetchDirection() if (rs.getFetchDirection() != ResultSet.FETCH_FORWARD) { System.out.println( "getFetchDirection() expected to return FETCH_FORWARD, not " + rs.getFetchDirection()); passed = false; } rs.setFetchDirection(ResultSet.FETCH_UNKNOWN); if (rs.getFetchDirection() != ResultSet.FETCH_UNKNOWN) { System.out.println( "getFetchDirection() expected to return FETCH_UNKNOWN, not " + rs.getFetchDirection()); passed = false; } // get/setFetchSize() if ( (rs.getFetchSize() != 1 && !isDerbyNetClient) || (rs.getFetchSize() != 64 && isDerbyNetClient)) { if (!isDerbyNetClient) { System.out.println( "getFetchSize() expected to return 1, not " + rs.getFetchSize()); } else { System.out.println( "getFetchSize() expected to return 64, not " + rs.getFetchSize()); } passed = false; } rs.setFetchSize(5); if (rs.getFetchSize() != 5) { System.out.println( "getFetchSize() expected to return 5, not " + rs.getFetchSize()); passed = false; } // setFetchSize() to 0 should have no effect. // for client server, fetchSize should have to 64 rs.setFetchSize(0); if ( (rs.getFetchSize() != 5 && !isDerbyNetClient) || (rs.getFetchSize() != 64 && isDerbyNetClient)) { if (!isDerbyNetClient) { System.out.println( "getFetchSize() expected to return 5, not " + rs.getFetchSize()); } else { System.out.println( "getFetchSize() expected to return 64, not " + rs.getFetchSize()); } passed = false; } // done rs.close(); // Scroll insensitive and updatable s_i_u = conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); // We should have gotten 1 warning // and a read only scroll insensitive cursor warning = conn.getWarnings(); while (warning != null) { System.out.println("warning = " + warning); warning = warning.getNextWarning(); } conn.clearWarnings(); s_i_u.close(); ps_i_r = conn.prepareStatement( "select * from t", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); // We should not have gotten any warnings // and should have gotten a prepared scroll insensitive cursor warning = conn.getWarnings(); while (warning != null) { System.out.println("unexpected warning = " + warning); warning = warning.getNextWarning(); passed = false; } conn.clearWarnings(); rs = ps_i_r.executeQuery(); // make sure it's scrollable rs.last(); rs.close(); ps_i_r.close(); ps_i_u = conn.prepareStatement( "select * from t", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); // We should have gotten 1 warning // and a read only scroll insensitive cursor warning = conn.getWarnings(); while (warning != null) { System.out.println("warning = " + warning); warning = warning.getNextWarning(); } conn.clearWarnings(); ps_i_u.close(); // Check setMaxRows()/getMaxRows() if (s_i_r.getMaxRows() != 0) { System.out.println("getMaxRows() expected to return 0"); passed = false; } s_i_r.setMaxRows(5); if (s_i_r.getMaxRows() != 5) { System.out.println("getMaxRows() expected to return 5"); passed = false; } rs = s_i_r.executeQuery("values 1, 2, 3, 4, 5, 6"); if (rs == null) { System.out.println("rs expected to be non-null."); passed = false; } // Iterate straight thru RS, expect only 5 rows. for (int index = 1; index < 6; index++) { if (! rs.next()) { System.out.println("rs.next() failed, index = " + index); passed = false; break; } } // We should not see another row (only 5, not 6) if (rs.next()) { System.out.println("rs.next() failed, should not have seen 6th row."); passed = false; } rs.close(); // Jump around and verify setMaxRows() works. rs = s_i_r.executeQuery("values 1, 2, 3, 4, 5, 6"); if (rs == null) { System.out.println("rs expected to be non-null."); passed = false; } if (!rs.last()) { System.out.println("rs.last() failed."); passed = false; } // Iterate backwards thru RS, expect only 4 more (5 total) rows. for (int index = 1; index < 5; index++) { if (! rs.previous()) { System.out.println("rs.previous() failed, index = " + index); passed = false; break; } } // We should not see another row (only 5, not 6) if (rs.previous()) { System.out.println("rs.previous() failed, should not have seen 6th row."); passed = false; } rs.close(); rs = s_i_r.executeQuery("values 1, 2, 3, 4, 5, 6"); if (rs == null) { System.out.println("rs expected to be non-null."); passed = false; } rs.afterLast(); // Iterate backwards thru RS, expect only 5 rows. for (int index = 1; index < 6; index++) { if (! rs.previous()) { System.out.println("rs.previous() failed, index = " + index); passed = false; break; } } // We should not see another row (only 5, not 6) if (rs.previous()) { System.out.println("rs.previous() failed, should not have seen 6th row."); passed = false; } rs.close(); // Verify setting maxRows back to 0 works. s_i_r.setMaxRows(0); rs = s_i_r.executeQuery("values 1, 2, 3, 4, 5, 6"); if (rs == null) { System.out.println("rs expected to be non-null."); passed = false; } // Iterate straight thru RS, expect 6 rows. for (int index = 1; index < 7; index++) { if (! rs.next()) { System.out.println("rs.next() failed, index = " + index); passed = false; break; } } // We should not see another row if (rs.next()) { System.out.println("rs.next() failed, should not have seen another row."); passed = false; } rs.close(); return passed; }
testStoredProcEscapeSyntax(con); testAutoCommitFailure(con); con.close();
private static void runTests(Connection con) throws Exception { testDatabaseMetaDataMethods(con); }
try { if (!met.supportsStoredFunctionsUsingCallSyntax()) { System.out.println ("FAIL: supportsStoredFunctionsUsingCallSyntax() " + "should return true"); } } catch (SQLException e) { System.out.println("supportsStoredFunctionsUsingCallSyntax():"); dumpSQLExceptions(e);
if (!met.supportsStoredFunctionsUsingCallSyntax()) { System.out.println ("FAIL: supportsStoredFunctionsUsingCallSyntax() " + "should return true");
private static void testDatabaseMetaDataMethods(Connection con) throws Exception { con.setAutoCommit(true); // make sure it is true Statement s = con.createStatement(); DatabaseMetaData met = con.getMetaData(); try { if (!met.supportsStoredFunctionsUsingCallSyntax()) { System.out.println ("FAIL: supportsStoredFunctionsUsingCallSyntax() " + "should return true"); } } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("supportsStoredFunctionsUsingCallSyntax():"); dumpSQLExceptions(e); } try { if (met.autoCommitFailureClosesAllResultSets()) { System.out.println ("FAIL: autoCommitFailureClosesAllResultSets() " + "should return false"); } } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("autoCommitFailureClosesAllResultSets():"); dumpSQLExceptions(e); } try { if (met.providesQueryObjectGenerator()) { System.out.println ("FAIL: providesQueryObjectGenerator() should " + "return false"); } } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("providesQueryObjectGenerator():"); dumpSQLExceptions(e); } try { RowIdLifetime lifetime = met.getRowIdLifetime(); if (lifetime != RowIdLifetime.ROWID_UNSUPPORTED) { System.out.println("FAIL: getRowIdLifetime() should return " + "ROWID_UNSUPPORTED, but got " + lifetime); } } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getRowIdLifetime():"); dumpSQLExceptions(e); } try { checkEmptyRS(met.getClientInfoProperties()); } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getClientInfoProperties():"); dumpSQLExceptions(e); } // Create some functions in the default schema (app) to make // the output from getFunctions() and getFunctionParameters // more interesting s.execute("CREATE FUNCTION DUMMY1 ( X SMALLINT ) RETURNS SMALLINT "+ "PARAMETER STYLE JAVA NO SQL LANGUAGE JAVA EXTERNAL "+ "NAME 'java.some.func'"); s.execute("CREATE FUNCTION DUMMY2 ( X INTEGER, Y SMALLINT ) RETURNS"+ " INTEGER PARAMETER STYLE JAVA NO SQL LANGUAGE JAVA "+ "EXTERNAL NAME 'java.some.func'"); s.execute("CREATE FUNCTION DUMMY3 ( X VARCHAR(16), Y INTEGER ) "+ "RETURNS VARCHAR(16) PARAMETER STYLE JAVA NO SQL LANGUAGE"+ " JAVA EXTERNAL NAME 'java.some.func'"); s.execute("CREATE FUNCTION DUMMY4 ( X VARCHAR(128), Y INTEGER ) "+ "RETURNS INTEGER PARAMETER STYLE JAVA NO SQL LANGUAGE "+ "JAVA EXTERNAL NAME 'java.some.func'"); try { checkEmptyRS(met.getFunctionParameters(null,null,null,null)); } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getFunctionParameters():"); dumpSQLExceptions(e); } catch (AbstractMethodError ame) { // TODO: No implementation on client yet, so catch // AbstractMethodError for now. Remove when implemented. System.out.println("getFunctionParameters():"); ame.printStackTrace(System.out); } try { // Any function in any schema in any catalog dumpRS(met.getFunctions(null, null, null)); // Any function in any schema in "Dummy // Catalog". Same as above since the catalog // argument is ignored (is always null) dumpRS(met.getFunctions("Dummy Catalog", null, null)); // Any function in a schema starting with "SYS" dumpRS(met.getFunctions(null, "SYS%", null)); // All functions containing "GET" in any schema // (and any catalog) dumpRS(met.getFunctions(null, null, "%GET%")); // Any function that belongs to NO schema and // NO catalog (none) checkEmptyRS(met.getFunctions("", "", null)); } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getFunctions():"); dumpSQLExceptions(e); } catch (AbstractMethodError ame) { // TODO: No implementation on client yet, so catch // AbstractMethodError for now. Remove when implemented. System.out.println("getClientInfoProperties():"); ame.printStackTrace(System.out); } try { // // Test the new getSchemas() with no schema qualifiers // dumpRS(met.getSchemas(null, null)); // // Test the new getSchemas() with a schema wildcard qualifier // dumpRS(met.getSchemas(null, "SYS%")); // // Test the new getSchemas() with an exact match // dumpRS(met.getSchemas(null, "APP")); // // Make sure that getSchemas() returns an empty result // set when a schema is passed with no match // checkEmptyRS(met.getSchemas(null, "BLAH")); } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getSchemas():"); dumpSQLExceptions(e); } s.close(); con.close(); }
try { if (met.autoCommitFailureClosesAllResultSets()) { System.out.println ("FAIL: autoCommitFailureClosesAllResultSets() " + "should return false"); } } catch (SQLException e) { System.out.println("autoCommitFailureClosesAllResultSets():"); dumpSQLExceptions(e);
if (met.autoCommitFailureClosesAllResultSets()) { System.out.println ("FAIL: autoCommitFailureClosesAllResultSets() " + "should return false");
private static void testDatabaseMetaDataMethods(Connection con) throws Exception { con.setAutoCommit(true); // make sure it is true Statement s = con.createStatement(); DatabaseMetaData met = con.getMetaData(); try { if (!met.supportsStoredFunctionsUsingCallSyntax()) { System.out.println ("FAIL: supportsStoredFunctionsUsingCallSyntax() " + "should return true"); } } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("supportsStoredFunctionsUsingCallSyntax():"); dumpSQLExceptions(e); } try { if (met.autoCommitFailureClosesAllResultSets()) { System.out.println ("FAIL: autoCommitFailureClosesAllResultSets() " + "should return false"); } } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("autoCommitFailureClosesAllResultSets():"); dumpSQLExceptions(e); } try { if (met.providesQueryObjectGenerator()) { System.out.println ("FAIL: providesQueryObjectGenerator() should " + "return false"); } } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("providesQueryObjectGenerator():"); dumpSQLExceptions(e); } try { RowIdLifetime lifetime = met.getRowIdLifetime(); if (lifetime != RowIdLifetime.ROWID_UNSUPPORTED) { System.out.println("FAIL: getRowIdLifetime() should return " + "ROWID_UNSUPPORTED, but got " + lifetime); } } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getRowIdLifetime():"); dumpSQLExceptions(e); } try { checkEmptyRS(met.getClientInfoProperties()); } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getClientInfoProperties():"); dumpSQLExceptions(e); } // Create some functions in the default schema (app) to make // the output from getFunctions() and getFunctionParameters // more interesting s.execute("CREATE FUNCTION DUMMY1 ( X SMALLINT ) RETURNS SMALLINT "+ "PARAMETER STYLE JAVA NO SQL LANGUAGE JAVA EXTERNAL "+ "NAME 'java.some.func'"); s.execute("CREATE FUNCTION DUMMY2 ( X INTEGER, Y SMALLINT ) RETURNS"+ " INTEGER PARAMETER STYLE JAVA NO SQL LANGUAGE JAVA "+ "EXTERNAL NAME 'java.some.func'"); s.execute("CREATE FUNCTION DUMMY3 ( X VARCHAR(16), Y INTEGER ) "+ "RETURNS VARCHAR(16) PARAMETER STYLE JAVA NO SQL LANGUAGE"+ " JAVA EXTERNAL NAME 'java.some.func'"); s.execute("CREATE FUNCTION DUMMY4 ( X VARCHAR(128), Y INTEGER ) "+ "RETURNS INTEGER PARAMETER STYLE JAVA NO SQL LANGUAGE "+ "JAVA EXTERNAL NAME 'java.some.func'"); try { checkEmptyRS(met.getFunctionParameters(null,null,null,null)); } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getFunctionParameters():"); dumpSQLExceptions(e); } catch (AbstractMethodError ame) { // TODO: No implementation on client yet, so catch // AbstractMethodError for now. Remove when implemented. System.out.println("getFunctionParameters():"); ame.printStackTrace(System.out); } try { // Any function in any schema in any catalog dumpRS(met.getFunctions(null, null, null)); // Any function in any schema in "Dummy // Catalog". Same as above since the catalog // argument is ignored (is always null) dumpRS(met.getFunctions("Dummy Catalog", null, null)); // Any function in a schema starting with "SYS" dumpRS(met.getFunctions(null, "SYS%", null)); // All functions containing "GET" in any schema // (and any catalog) dumpRS(met.getFunctions(null, null, "%GET%")); // Any function that belongs to NO schema and // NO catalog (none) checkEmptyRS(met.getFunctions("", "", null)); } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getFunctions():"); dumpSQLExceptions(e); } catch (AbstractMethodError ame) { // TODO: No implementation on client yet, so catch // AbstractMethodError for now. Remove when implemented. System.out.println("getClientInfoProperties():"); ame.printStackTrace(System.out); } try { // // Test the new getSchemas() with no schema qualifiers // dumpRS(met.getSchemas(null, null)); // // Test the new getSchemas() with a schema wildcard qualifier // dumpRS(met.getSchemas(null, "SYS%")); // // Test the new getSchemas() with an exact match // dumpRS(met.getSchemas(null, "APP")); // // Make sure that getSchemas() returns an empty result // set when a schema is passed with no match // checkEmptyRS(met.getSchemas(null, "BLAH")); } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getSchemas():"); dumpSQLExceptions(e); } s.close(); con.close(); }
try { if (met.providesQueryObjectGenerator()) { System.out.println ("FAIL: providesQueryObjectGenerator() should " + "return false"); } } catch (SQLException e) { System.out.println("providesQueryObjectGenerator():"); dumpSQLExceptions(e);
if (met.providesQueryObjectGenerator()) { System.out.println ("FAIL: providesQueryObjectGenerator() should " + "return false");
private static void testDatabaseMetaDataMethods(Connection con) throws Exception { con.setAutoCommit(true); // make sure it is true Statement s = con.createStatement(); DatabaseMetaData met = con.getMetaData(); try { if (!met.supportsStoredFunctionsUsingCallSyntax()) { System.out.println ("FAIL: supportsStoredFunctionsUsingCallSyntax() " + "should return true"); } } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("supportsStoredFunctionsUsingCallSyntax():"); dumpSQLExceptions(e); } try { if (met.autoCommitFailureClosesAllResultSets()) { System.out.println ("FAIL: autoCommitFailureClosesAllResultSets() " + "should return false"); } } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("autoCommitFailureClosesAllResultSets():"); dumpSQLExceptions(e); } try { if (met.providesQueryObjectGenerator()) { System.out.println ("FAIL: providesQueryObjectGenerator() should " + "return false"); } } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("providesQueryObjectGenerator():"); dumpSQLExceptions(e); } try { RowIdLifetime lifetime = met.getRowIdLifetime(); if (lifetime != RowIdLifetime.ROWID_UNSUPPORTED) { System.out.println("FAIL: getRowIdLifetime() should return " + "ROWID_UNSUPPORTED, but got " + lifetime); } } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getRowIdLifetime():"); dumpSQLExceptions(e); } try { checkEmptyRS(met.getClientInfoProperties()); } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getClientInfoProperties():"); dumpSQLExceptions(e); } // Create some functions in the default schema (app) to make // the output from getFunctions() and getFunctionParameters // more interesting s.execute("CREATE FUNCTION DUMMY1 ( X SMALLINT ) RETURNS SMALLINT "+ "PARAMETER STYLE JAVA NO SQL LANGUAGE JAVA EXTERNAL "+ "NAME 'java.some.func'"); s.execute("CREATE FUNCTION DUMMY2 ( X INTEGER, Y SMALLINT ) RETURNS"+ " INTEGER PARAMETER STYLE JAVA NO SQL LANGUAGE JAVA "+ "EXTERNAL NAME 'java.some.func'"); s.execute("CREATE FUNCTION DUMMY3 ( X VARCHAR(16), Y INTEGER ) "+ "RETURNS VARCHAR(16) PARAMETER STYLE JAVA NO SQL LANGUAGE"+ " JAVA EXTERNAL NAME 'java.some.func'"); s.execute("CREATE FUNCTION DUMMY4 ( X VARCHAR(128), Y INTEGER ) "+ "RETURNS INTEGER PARAMETER STYLE JAVA NO SQL LANGUAGE "+ "JAVA EXTERNAL NAME 'java.some.func'"); try { checkEmptyRS(met.getFunctionParameters(null,null,null,null)); } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getFunctionParameters():"); dumpSQLExceptions(e); } catch (AbstractMethodError ame) { // TODO: No implementation on client yet, so catch // AbstractMethodError for now. Remove when implemented. System.out.println("getFunctionParameters():"); ame.printStackTrace(System.out); } try { // Any function in any schema in any catalog dumpRS(met.getFunctions(null, null, null)); // Any function in any schema in "Dummy // Catalog". Same as above since the catalog // argument is ignored (is always null) dumpRS(met.getFunctions("Dummy Catalog", null, null)); // Any function in a schema starting with "SYS" dumpRS(met.getFunctions(null, "SYS%", null)); // All functions containing "GET" in any schema // (and any catalog) dumpRS(met.getFunctions(null, null, "%GET%")); // Any function that belongs to NO schema and // NO catalog (none) checkEmptyRS(met.getFunctions("", "", null)); } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getFunctions():"); dumpSQLExceptions(e); } catch (AbstractMethodError ame) { // TODO: No implementation on client yet, so catch // AbstractMethodError for now. Remove when implemented. System.out.println("getClientInfoProperties():"); ame.printStackTrace(System.out); } try { // // Test the new getSchemas() with no schema qualifiers // dumpRS(met.getSchemas(null, null)); // // Test the new getSchemas() with a schema wildcard qualifier // dumpRS(met.getSchemas(null, "SYS%")); // // Test the new getSchemas() with an exact match // dumpRS(met.getSchemas(null, "APP")); // // Make sure that getSchemas() returns an empty result // set when a schema is passed with no match // checkEmptyRS(met.getSchemas(null, "BLAH")); } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getSchemas():"); dumpSQLExceptions(e); } s.close(); con.close(); }
try { RowIdLifetime lifetime = met.getRowIdLifetime(); if (lifetime != RowIdLifetime.ROWID_UNSUPPORTED) { System.out.println("FAIL: getRowIdLifetime() should return " + "ROWID_UNSUPPORTED, but got " + lifetime); } } catch (SQLException e) { System.out.println("getRowIdLifetime():"); dumpSQLExceptions(e);
RowIdLifetime lifetime = met.getRowIdLifetime(); if (lifetime != RowIdLifetime.ROWID_UNSUPPORTED) { System.out.println("FAIL: getRowIdLifetime() should return " + "ROWID_UNSUPPORTED, but got " + lifetime);
private static void testDatabaseMetaDataMethods(Connection con) throws Exception { con.setAutoCommit(true); // make sure it is true Statement s = con.createStatement(); DatabaseMetaData met = con.getMetaData(); try { if (!met.supportsStoredFunctionsUsingCallSyntax()) { System.out.println ("FAIL: supportsStoredFunctionsUsingCallSyntax() " + "should return true"); } } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("supportsStoredFunctionsUsingCallSyntax():"); dumpSQLExceptions(e); } try { if (met.autoCommitFailureClosesAllResultSets()) { System.out.println ("FAIL: autoCommitFailureClosesAllResultSets() " + "should return false"); } } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("autoCommitFailureClosesAllResultSets():"); dumpSQLExceptions(e); } try { if (met.providesQueryObjectGenerator()) { System.out.println ("FAIL: providesQueryObjectGenerator() should " + "return false"); } } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("providesQueryObjectGenerator():"); dumpSQLExceptions(e); } try { RowIdLifetime lifetime = met.getRowIdLifetime(); if (lifetime != RowIdLifetime.ROWID_UNSUPPORTED) { System.out.println("FAIL: getRowIdLifetime() should return " + "ROWID_UNSUPPORTED, but got " + lifetime); } } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getRowIdLifetime():"); dumpSQLExceptions(e); } try { checkEmptyRS(met.getClientInfoProperties()); } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getClientInfoProperties():"); dumpSQLExceptions(e); } // Create some functions in the default schema (app) to make // the output from getFunctions() and getFunctionParameters // more interesting s.execute("CREATE FUNCTION DUMMY1 ( X SMALLINT ) RETURNS SMALLINT "+ "PARAMETER STYLE JAVA NO SQL LANGUAGE JAVA EXTERNAL "+ "NAME 'java.some.func'"); s.execute("CREATE FUNCTION DUMMY2 ( X INTEGER, Y SMALLINT ) RETURNS"+ " INTEGER PARAMETER STYLE JAVA NO SQL LANGUAGE JAVA "+ "EXTERNAL NAME 'java.some.func'"); s.execute("CREATE FUNCTION DUMMY3 ( X VARCHAR(16), Y INTEGER ) "+ "RETURNS VARCHAR(16) PARAMETER STYLE JAVA NO SQL LANGUAGE"+ " JAVA EXTERNAL NAME 'java.some.func'"); s.execute("CREATE FUNCTION DUMMY4 ( X VARCHAR(128), Y INTEGER ) "+ "RETURNS INTEGER PARAMETER STYLE JAVA NO SQL LANGUAGE "+ "JAVA EXTERNAL NAME 'java.some.func'"); try { checkEmptyRS(met.getFunctionParameters(null,null,null,null)); } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getFunctionParameters():"); dumpSQLExceptions(e); } catch (AbstractMethodError ame) { // TODO: No implementation on client yet, so catch // AbstractMethodError for now. Remove when implemented. System.out.println("getFunctionParameters():"); ame.printStackTrace(System.out); } try { // Any function in any schema in any catalog dumpRS(met.getFunctions(null, null, null)); // Any function in any schema in "Dummy // Catalog". Same as above since the catalog // argument is ignored (is always null) dumpRS(met.getFunctions("Dummy Catalog", null, null)); // Any function in a schema starting with "SYS" dumpRS(met.getFunctions(null, "SYS%", null)); // All functions containing "GET" in any schema // (and any catalog) dumpRS(met.getFunctions(null, null, "%GET%")); // Any function that belongs to NO schema and // NO catalog (none) checkEmptyRS(met.getFunctions("", "", null)); } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getFunctions():"); dumpSQLExceptions(e); } catch (AbstractMethodError ame) { // TODO: No implementation on client yet, so catch // AbstractMethodError for now. Remove when implemented. System.out.println("getClientInfoProperties():"); ame.printStackTrace(System.out); } try { // // Test the new getSchemas() with no schema qualifiers // dumpRS(met.getSchemas(null, null)); // // Test the new getSchemas() with a schema wildcard qualifier // dumpRS(met.getSchemas(null, "SYS%")); // // Test the new getSchemas() with an exact match // dumpRS(met.getSchemas(null, "APP")); // // Make sure that getSchemas() returns an empty result // set when a schema is passed with no match // checkEmptyRS(met.getSchemas(null, "BLAH")); } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getSchemas():"); dumpSQLExceptions(e); } s.close(); con.close(); }
con.close();
private static void testDatabaseMetaDataMethods(Connection con) throws Exception { con.setAutoCommit(true); // make sure it is true Statement s = con.createStatement(); DatabaseMetaData met = con.getMetaData(); try { if (!met.supportsStoredFunctionsUsingCallSyntax()) { System.out.println ("FAIL: supportsStoredFunctionsUsingCallSyntax() " + "should return true"); } } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("supportsStoredFunctionsUsingCallSyntax():"); dumpSQLExceptions(e); } try { if (met.autoCommitFailureClosesAllResultSets()) { System.out.println ("FAIL: autoCommitFailureClosesAllResultSets() " + "should return false"); } } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("autoCommitFailureClosesAllResultSets():"); dumpSQLExceptions(e); } try { if (met.providesQueryObjectGenerator()) { System.out.println ("FAIL: providesQueryObjectGenerator() should " + "return false"); } } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("providesQueryObjectGenerator():"); dumpSQLExceptions(e); } try { RowIdLifetime lifetime = met.getRowIdLifetime(); if (lifetime != RowIdLifetime.ROWID_UNSUPPORTED) { System.out.println("FAIL: getRowIdLifetime() should return " + "ROWID_UNSUPPORTED, but got " + lifetime); } } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getRowIdLifetime():"); dumpSQLExceptions(e); } try { checkEmptyRS(met.getClientInfoProperties()); } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getClientInfoProperties():"); dumpSQLExceptions(e); } // Create some functions in the default schema (app) to make // the output from getFunctions() and getFunctionParameters // more interesting s.execute("CREATE FUNCTION DUMMY1 ( X SMALLINT ) RETURNS SMALLINT "+ "PARAMETER STYLE JAVA NO SQL LANGUAGE JAVA EXTERNAL "+ "NAME 'java.some.func'"); s.execute("CREATE FUNCTION DUMMY2 ( X INTEGER, Y SMALLINT ) RETURNS"+ " INTEGER PARAMETER STYLE JAVA NO SQL LANGUAGE JAVA "+ "EXTERNAL NAME 'java.some.func'"); s.execute("CREATE FUNCTION DUMMY3 ( X VARCHAR(16), Y INTEGER ) "+ "RETURNS VARCHAR(16) PARAMETER STYLE JAVA NO SQL LANGUAGE"+ " JAVA EXTERNAL NAME 'java.some.func'"); s.execute("CREATE FUNCTION DUMMY4 ( X VARCHAR(128), Y INTEGER ) "+ "RETURNS INTEGER PARAMETER STYLE JAVA NO SQL LANGUAGE "+ "JAVA EXTERNAL NAME 'java.some.func'"); try { checkEmptyRS(met.getFunctionParameters(null,null,null,null)); } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getFunctionParameters():"); dumpSQLExceptions(e); } catch (AbstractMethodError ame) { // TODO: No implementation on client yet, so catch // AbstractMethodError for now. Remove when implemented. System.out.println("getFunctionParameters():"); ame.printStackTrace(System.out); } try { // Any function in any schema in any catalog dumpRS(met.getFunctions(null, null, null)); // Any function in any schema in "Dummy // Catalog". Same as above since the catalog // argument is ignored (is always null) dumpRS(met.getFunctions("Dummy Catalog", null, null)); // Any function in a schema starting with "SYS" dumpRS(met.getFunctions(null, "SYS%", null)); // All functions containing "GET" in any schema // (and any catalog) dumpRS(met.getFunctions(null, null, "%GET%")); // Any function that belongs to NO schema and // NO catalog (none) checkEmptyRS(met.getFunctions("", "", null)); } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getFunctions():"); dumpSQLExceptions(e); } catch (AbstractMethodError ame) { // TODO: No implementation on client yet, so catch // AbstractMethodError for now. Remove when implemented. System.out.println("getClientInfoProperties():"); ame.printStackTrace(System.out); } try { // // Test the new getSchemas() with no schema qualifiers // dumpRS(met.getSchemas(null, null)); // // Test the new getSchemas() with a schema wildcard qualifier // dumpRS(met.getSchemas(null, "SYS%")); // // Test the new getSchemas() with an exact match // dumpRS(met.getSchemas(null, "APP")); // // Make sure that getSchemas() returns an empty result // set when a schema is passed with no match // checkEmptyRS(met.getSchemas(null, "BLAH")); } catch (SQLException e) { // TODO: remove try/catch once method is implemented! System.out.println("getSchemas():"); dumpSQLExceptions(e); } s.close(); con.close(); }
checkRoutinePermissions(conn);
private void runPhase(int version, int phase) throws Exception{ System.out.println("\n\nSTART - phase " + PHASES[phase]); URLClassLoader classLoader = null; switch(version) { case OLD_RELEASE: classLoader = oldClassLoader; break; case NEW_RELEASE: classLoader = newClassLoader; break; default: System.out.println("ERROR: Specified an invalid release type"); return; } boolean passed = true; Connection conn = null; setClassLoader(classLoader); conn = getConnection(classLoader, phase); if(conn != null) { passed = caseVersionCheck(version, conn); passed = caseReusableRecordIdSequenceNumber(conn, phase, oldMajorVersion, oldMinorVersion) && passed; passed = caseInitialize(conn, phase) && passed; passed = caseProcedures(conn, phase, oldMajorVersion, oldMinorVersion) && passed; passed = caseTriggerVTI(conn, phase, oldMajorVersion, oldMinorVersion) && passed; passed = caseGrantRevoke(conn, phase, classLoader, false) && passed; // Test grant/revoke feature with sql authorization if(phase == PH_HARD_UPGRADE) { setSQLAuthorization(conn, true); conn = restartDatabase(classLoader); passed = caseGrantRevoke(conn, phase, classLoader, true) && passed; checkSysSchemas(conn); } runMetadataTest(classLoader, conn); conn.close(); shutdownDatabase(classLoader); } setNullClassLoader(); System.out.println("END - " + (passed ? "PASS" : "FAIL") + " - phase " + PHASES[phase]); }
throw StandardException.newException(SQLState.LANG_INVALID_COL_REF_NON_GROUPED_SELECT_LIST, cr.getFullColumnName());
throw StandardException.newException(SQLState.LANG_INVALID_COL_REF_NON_GROUPED_SELECT_LIST, cr.getSQLColumnName());
public Visitable visit(Visitable node) throws StandardException { if (node instanceof ColumnReference) { ColumnReference cr = (ColumnReference)node; if (groupByList == null) { throw StandardException.newException(SQLState.LANG_INVALID_COL_REF_NON_GROUPED_SELECT_LIST, cr.getFullColumnName()); } if (groupByList.containsColumnReference(cr) == null) { throw StandardException.newException(SQLState.LANG_INVALID_COL_REF_GROUPED_SELECT_LIST, cr.getFullColumnName()); } } /* ** Subqueries are only valid if they do not have ** correlations and are expression subqueries. RESOLVE: ** this permits VARIANT expressions in the subquery -- ** should this be allowed? may be confusing to ** users to complain about: ** ** select max(x), (select sum(y).toString() from y) from x */ else if (node instanceof SubqueryNode) { SubqueryNode subq = (SubqueryNode)node; if ((subq.getSubqueryType() != SubqueryNode.EXPRESSION_SUBQUERY) || subq.hasCorrelatedCRs()) { throw StandardException.newException( (groupByList == null) ? SQLState.LANG_INVALID_NON_GROUPED_SELECT_LIST : SQLState.LANG_INVALID_GROUPED_SELECT_LIST); } /* ** TEMPORARY RESTRICTION: we cannot handle an aggregate ** in the subquery */ HasNodeVisitor visitor = new HasNodeVisitor(AggregateNode.class); subq.accept(visitor); if (visitor.hasNode()) { throw StandardException.newException( (groupByList == null) ? SQLState.LANG_INVALID_NON_GROUPED_SELECT_LIST : SQLState.LANG_INVALID_GROUPED_SELECT_LIST); } } return node; }
throw StandardException.newException(SQLState.LANG_INVALID_COL_REF_GROUPED_SELECT_LIST, cr.getFullColumnName());
throw StandardException.newException(SQLState.LANG_INVALID_COL_REF_GROUPED_SELECT_LIST, cr.getSQLColumnName());
public Visitable visit(Visitable node) throws StandardException { if (node instanceof ColumnReference) { ColumnReference cr = (ColumnReference)node; if (groupByList == null) { throw StandardException.newException(SQLState.LANG_INVALID_COL_REF_NON_GROUPED_SELECT_LIST, cr.getFullColumnName()); } if (groupByList.containsColumnReference(cr) == null) { throw StandardException.newException(SQLState.LANG_INVALID_COL_REF_GROUPED_SELECT_LIST, cr.getFullColumnName()); } } /* ** Subqueries are only valid if they do not have ** correlations and are expression subqueries. RESOLVE: ** this permits VARIANT expressions in the subquery -- ** should this be allowed? may be confusing to ** users to complain about: ** ** select max(x), (select sum(y).toString() from y) from x */ else if (node instanceof SubqueryNode) { SubqueryNode subq = (SubqueryNode)node; if ((subq.getSubqueryType() != SubqueryNode.EXPRESSION_SUBQUERY) || subq.hasCorrelatedCRs()) { throw StandardException.newException( (groupByList == null) ? SQLState.LANG_INVALID_NON_GROUPED_SELECT_LIST : SQLState.LANG_INVALID_GROUPED_SELECT_LIST); } /* ** TEMPORARY RESTRICTION: we cannot handle an aggregate ** in the subquery */ HasNodeVisitor visitor = new HasNodeVisitor(AggregateNode.class); subq.accept(visitor); if (visitor.hasNode()) { throw StandardException.newException( (groupByList == null) ? SQLState.LANG_INVALID_NON_GROUPED_SELECT_LIST : SQLState.LANG_INVALID_GROUPED_SELECT_LIST); } } return node; }
public static String fileExists(String fileName) throws Exception
public static String fileExists(String fileName) throws PrivilegedActionException
public static String fileExists(String fileName) throws Exception { File fl = new File(fileName); if(fl.exists()) { return "true"; }else { return "false"; } }
File fl = new File(fileName); if(fl.exists()) { return "true"; }else { return "false"; }
final File fl = new File(fileName); return (String) AccessController.doPrivileged(new PrivilegedExceptionAction() { public Object run() { if(fl.exists()) { return "true"; }else { return "false"; } } });
public static String fileExists(String fileName) throws Exception { File fl = new File(fileName); if(fl.exists()) { return "true"; }else { return "false"; } }
File src = new File(location, name); File dst = new File(location, newName); if(!src.renameTo(dst)) { throw new Exception("unable to rename File: " + src.getAbsolutePath() + " To: " + dst.getAbsolutePath()); } }
final File src = new File(location, name); final File dst = new File(location, newName); AccessController.doPrivileged(new PrivilegedExceptionAction() { public Object run() throws Exception { if(!src.renameTo(dst)) { throw new Exception("unable to rename File: " + src.getAbsolutePath() + " To: " + dst.getAbsolutePath()); } return null; } }); }
public static void renameFile(String location, String name , String newName) throws Exception { File src = new File(location, name); File dst = new File(location, newName); if(!src.renameTo(dst)) { throw new Exception("unable to rename File: " + src.getAbsolutePath() + " To: " + dst.getAbsolutePath()); } }
((ParameterNode) operand).setDescriptor( DataTypeDescriptor.getBuiltInDataTypeDescriptor(parameterType, true,
operand.setType(DataTypeDescriptor.getBuiltInDataTypeDescriptor(parameterType, true,
void bindParameter() throws StandardException { /* ** According to the SQL standard, if XXX_length has a ? operand, ** its type is varchar with the implementation-defined maximum length ** for a varchar. */ ((ParameterNode) operand).setDescriptor( DataTypeDescriptor.getBuiltInDataTypeDescriptor(parameterType, true, parameterWidth)); }
public <T> T createQueryObject(Class<T> ifc) throws SQLException { throw new java.lang.UnsupportedOperationException();
public <T extends BaseQuery> T createQueryObject(Class<T> ifc) throws SQLException { return QueryObjectFactory.createDefaultQueryObject (ifc, this);
public <T> T createQueryObject(Class<T> ifc) throws SQLException { throw new java.lang.UnsupportedOperationException(); }
if (generatedKey == null) { throw StandardException.newException( externalKey.length() % 2 == 0 ? SQLState.ENCRYPTION_ILLEGAL_EXKEY_CHARS : SQLState.ENCRYPTION_INVALID_EXKEY_LENGTH); }
public void boot(boolean create, Properties properties) throws StandardException { boolean provider_or_algo_specified = false; boolean storeProperties = create; String externalKey = properties.getProperty(Attribute.CRYPTO_EXTERNAL_KEY); if (externalKey != null) { storeProperties = false; } cryptoProvider = properties.getProperty(Attribute.CRYPTO_PROVIDER); if (cryptoProvider == null) { // JDK 1.3 does not create providers by itself. if (JVMInfo.JDK_ID == JVMInfo.J2SE_13) { String vendor; try { vendor = System.getProperty("java.vendor", ""); } catch (SecurityException se) { vendor = ""; } vendor = StringUtil.SQLToUpperCase(vendor); if (vendor.startsWith("IBM ")) cryptoProvider = "com.ibm.crypto.provider.IBMJCE"; else if (vendor.startsWith("SUN ")) cryptoProvider = "com.sun.crypto.provider.SunJCE"; } } else { provider_or_algo_specified = true; // explictly putting the properties back into the properties // saves then in service.properties at create time. // if (storeProperties) // properties.put(Attribute.CRYPTO_PROVIDER, cryptoProvider); int dotPos = cryptoProvider.lastIndexOf('.'); if (dotPos == -1) cryptoProviderShort = cryptoProvider; else cryptoProviderShort = cryptoProvider.substring(dotPos+1); } cryptoAlgorithm = properties.getProperty(Attribute.CRYPTO_ALGORITHM); if (cryptoAlgorithm == null) cryptoAlgorithm = DEFAULT_ALGORITHM; else { provider_or_algo_specified = true; } // explictly putting the properties back into the properties // saves then in service.properties at create time. if (storeProperties) properties.put(Attribute.CRYPTO_ALGORITHM, cryptoAlgorithm); int firstSlashPos = cryptoAlgorithm.indexOf('/'); int lastSlashPos = cryptoAlgorithm.lastIndexOf('/'); if (firstSlashPos < 0 || lastSlashPos < 0 || firstSlashPos == lastSlashPos) throw StandardException.newException(SQLState.ENCRYPTION_BAD_ALG_FORMAT, cryptoAlgorithm); cryptoAlgorithmShort = cryptoAlgorithm.substring(0,firstSlashPos); if (provider_or_algo_specified) { // Track 3715 - disable use of provider/aglo specification if // jce environment is not 1.2.1. The ExemptionMechanism class // exists in jce1.2.1 and not in jce1.2, so try and load the // class and if you can't find it don't allow the encryption. // This is a requirement from the government to give cloudscape // export clearance for 3.6. Note that the check is not needed // if no provider/algo is specified, in that case we default to // a DES weak encryption algorithm which also is allowed for // export (this is how 3.5 got it's clearance). try { Class c = Class.forName("javax.crypto.ExemptionMechanism"); } catch (Throwable t) { throw StandardException.newException( SQLState.ENCRYPTION_BAD_JCE); } } // If connecting to an existing database and Attribute.CRYPTO_KEY_LENGTH is set // then obtain the encoded key length values without padding bytes and retrieve // the keylength in bits if boot password mechanism is used // note: Attribute.CRYPTO_KEY_LENGTH is set during creation time to a supported // key length in the connection url. Internally , two values are stored in this property // if encryptionKey is used, this property will have only the encoded key length // if boot password mechanism is used, this property will have the following // keylengthBits-EncodedKeyLength if(!create) { // if available, parse the keylengths stored in Attribute.CRYPTO_KEY_LENGTH if(properties.getProperty(Attribute.CRYPTO_KEY_LENGTH) != null) { String keyLengths = properties.getProperty(Attribute.CRYPTO_KEY_LENGTH); int pos = keyLengths.lastIndexOf('-'); encodedKeyLength = Integer.parseInt(keyLengths.substring(pos+1)); if(pos != -1) keyLengthBits = Integer.parseInt(keyLengths.substring(0,pos)); } } // case 1 - if 'encryptionKey' is not set and 'encryptionKeyLength' is set, then use // the 'encryptionKeyLength' property value as the keyLength in bits. // case 2 - 'encryptionKey' property is not set and 'encryptionKeyLength' is not set, then // use the defaults keylength: 56bits for DES, 168 for DESede and 128 for any other encryption // algorithm if (externalKey == null && create) { if(properties.getProperty(Attribute.CRYPTO_KEY_LENGTH) != null) { keyLengthBits = Integer.parseInt(properties.getProperty(Attribute.CRYPTO_KEY_LENGTH)); } else if (cryptoAlgorithmShort.equals(DES)) { keyLengthBits = 56; } else if (cryptoAlgorithmShort.equals(DESede) || cryptoAlgorithmShort.equals(TripleDES)) { keyLengthBits = 168; } else { keyLengthBits = 128; } } // check the feedback mode String feedbackMode = cryptoAlgorithm.substring(firstSlashPos+1,lastSlashPos); if (!feedbackMode.equals("CBC") && !feedbackMode.equals("CFB") && !feedbackMode.equals("ECB") && !feedbackMode.equals("OFB")) throw StandardException.newException(SQLState.ENCRYPTION_BAD_FEEDBACKMODE, feedbackMode); // check the NoPadding mode is used String padding = cryptoAlgorithm.substring(lastSlashPos+1,cryptoAlgorithm.length()); if (!padding.equals("NoPadding")) throw StandardException.newException(SQLState.ENCRYPTION_BAD_PADDING, padding); Throwable t; try { if (cryptoProvider != null) { // provider package should be set by property if (Security.getProvider(cryptoProviderShort) == null) { action = 1; // add provider through privileged block. java.security.AccessController.doPrivileged(this); } } // need this to check the boot password messageDigest = MessageDigest.getInstance(MESSAGE_DIGEST); byte[] generatedKey; if (externalKey != null) { // incorrect to specify external key and boot password if (properties.getProperty(Attribute.BOOT_PASSWORD) != null) throw StandardException.newException(SQLState.SERVICE_WRONG_BOOT_PASSWORD); generatedKey = org.apache.derby.iapi.util.StringUtil.fromHexString(externalKey, 0, externalKey.length()); } else { generatedKey = handleBootPassword(create, properties); if(create) properties.put(Attribute.CRYPTO_KEY_LENGTH,keyLengthBits+"-"+generatedKey.length); } // Make a key and IV object out of the generated key mainSecretKey = generateKey(generatedKey); mainIV = generateIV(generatedKey); if (create) { properties.put(Attribute.DATA_ENCRYPTION, "true"); // Set two new properties to allow for future changes to the log and data encryption // schemes. This property is introduced in version 10 , value starts at 1. properties.put(RawStoreFactory.DATA_ENCRYPT_ALGORITHM_VERSION,String.valueOf(1)); properties.put(RawStoreFactory.LOG_ENCRYPT_ALGORITHM_VERSION,String.valueOf(1)); } return; } catch (java.security.PrivilegedActionException pae) { t = pae.getException(); } catch (NoSuchAlgorithmException nsae) { t = nsae; } catch (SecurityException se) { t = se; } catch (LinkageError le) { t = le; } catch (ClassCastException cce) { t = cce; } throw StandardException.newException(SQLState.MISSING_ENCRYPTION_PROVIDER, t); }
Integer.valueOf(beginLogFileNumber).intValue();
Long.valueOf(beginLogFileNumber).longValue();
public void recover( RawStoreFactory rsf, DataFactory df, TransactionFactory tf) throws StandardException { if (SanityManager.DEBUG) { SanityManager.ASSERT(rsf != null, "raw store factory == null"); SanityManager.ASSERT(df != null, "data factory == null"); } checkCorrupt(); rawStoreFactory = rsf; dataFactory = df; // initialize the log writer only after the rawstorefactory is available, // log writer requires encryption block size info from rawstore factory // to encrypt checksum log records. if (firstLog != null) logOut = new LogAccessFile(this, firstLog, logBufferSize); // we don't want to set ReadOnlyDB before recovery has a chance to look // at the latest checkpoint and determine that the database is shutdown // cleanly. If the medium is read only but there are logs that need // to be redone or in flight transactions, we are hosed. The logs that // are redone will leave dirty pages in the cache. if (recoveryNeeded) { try { ///////////////////////////////////////////////////////////// // // During boot time, the log control file is accessed and // logFileNumber is determined. LogOut is not set up. // LogFileNumber is the log file the latest checkpoint lives in, // or 1. It may not be the latest log file (the system may have // crashed between the time a new log was generated and the // checkpoint log written), that can only be determined at the // end of recovery redo. // ///////////////////////////////////////////////////////////// FileLogger logger = (FileLogger)getLogger(); ///////////////////////////////////////////////////////////// // // try to find the most recent checkpoint // ///////////////////////////////////////////////////////////// if (checkpointInstant != LogCounter.INVALID_LOG_INSTANT) { currentCheckpoint = findCheckpoint(checkpointInstant, logger); } // if we are only interested in dumping the log, start from the // beginning of the first log file if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON(DUMP_LOG_ONLY)) { currentCheckpoint = null; System.out.println("Dump log only"); // unless otherwise specified, 1st log file starts at 1 String beginLogFileNumber = PropertyUtil.getSystemProperty( DUMP_LOG_FROM_LOG_FILE); if (beginLogFileNumber != null) { logFileNumber = Integer.valueOf(beginLogFileNumber).intValue(); } else { logFileNumber = 1; } } } if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("setCheckpoint")) { currentCheckpoint = null; System.out.println("Set Checkpoint."); // unless otherwise specified, 1st log file starts at 1 String checkpointStartLogStr = PropertyUtil.getSystemProperty( "derby.storage.checkpointStartLog"); String checkpointStartOffsetStr = PropertyUtil.getSystemProperty( "derby.storage.checkpointStartOffset"); if ((checkpointStartLogStr != null) && (checkpointStartOffsetStr != null)) { checkpointInstant = LogCounter.makeLogInstantAsLong( Long.valueOf(checkpointStartLogStr).longValue(), Long.valueOf(checkpointStartOffsetStr).longValue()); } else { SanityManager.THROWASSERT( "must set derby.storage.checkpointStartLog and derby.storage.checkpointStartOffset, if setting setCheckpoint."); } currentCheckpoint = findCheckpoint(checkpointInstant, logger); } } long redoLWM = LogCounter.INVALID_LOG_INSTANT; long undoLWM = LogCounter.INVALID_LOG_INSTANT; long ttabInstant = LogCounter.INVALID_LOG_INSTANT; StreamLogScan redoScan = null; if (currentCheckpoint != null) { Formatable transactionTable = null; // RESOLVE: sku // currentCheckpoint.getTransactionTable(); // need to set the transaction table before the undo tf.useTransactionTable(transactionTable); redoLWM = currentCheckpoint.redoLWM(); undoLWM = currentCheckpoint.undoLWM(); if (transactionTable != null) ttabInstant = checkpointInstant; if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON(DBG_FLAG)) { SanityManager.DEBUG(DBG_FLAG, "Found checkpoint at " + LogCounter.toDebugString(checkpointInstant) + " " + currentCheckpoint.toString()); } } firstLogFileNumber = LogCounter.getLogFileNumber(redoLWM); // figure out where the first interesting log file is. if (LogCounter.getLogFileNumber(undoLWM) < firstLogFileNumber) { firstLogFileNumber = LogCounter.getLogFileNumber(undoLWM); } // if the checkpoint record doesn't have a transaction // table, we need to rebuild it by scanning the log from // the undoLWM. If it does have a transaction table, we // only need to scan the log from the redoLWM redoScan = (StreamLogScan) openForwardsScan(undoLWM, (LogInstant)null); } else { // no checkpoint tf.useTransactionTable((Formatable)null); long start = LogCounter.makeLogInstantAsLong( logFileNumber, LOG_FILE_HEADER_SIZE); // no checkpoint, start redo from the beginning of the // file - assume this is the first log file firstLogFileNumber = logFileNumber; redoScan = (StreamLogScan) openForwardsScan(start, (LogInstant)null); } // open a transaction that is used for redo and rollback RawTransaction recoveryTransaction = tf.startTransaction( rsf, ContextService.getFactory().getCurrentContextManager(), AccessFactoryGlobals.USER_TRANS_NAME); // make this transaction aware that it is a recovery transaction // and don't spew forth post commit work while replaying the log recoveryTransaction.recoveryTransaction(); ///////////////////////////////////////////////////////////// // // Redo loop - in FileLogger // ///////////////////////////////////////////////////////////// // // set log factory state to inRedo so that if redo caused any // dirty page to be written from the cache, it won't flush the // log since the end of the log has not been determined and we // know the log record that caused the page to change has // already been written to the log. We need the page write to // go thru the log factory because if the redo has a problem, // the log factory is corrupt and the only way we know not to // write out the page in a checkpoint is if it check with the // log factory, and that is done via a flush - we use the WAL // protocol to stop corrupt pages from writing to the disk. // inRedo = true; long logEnd = logger.redo( recoveryTransaction, tf, redoScan, redoLWM, ttabInstant); inRedo = false; // if we are only interested in dumping the log, don't alter // the database and prevent anyone from using the log if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON(LogToFile.DUMP_LOG_ONLY)) { Monitor.logMessage("_____________________________________________________"); Monitor.logMessage("\n\t\t Log dump finished"); Monitor.logMessage("_____________________________________________________"); // just in case, it has not been set anyway logOut = null; return; } } ///////////////////////////////////////////////////////////// // // determine where the log ends // ///////////////////////////////////////////////////////////// StorageRandomAccessFile theLog = null; // if logend == LogCounter.INVALID_LOG_SCAN, that means there // is no log record in the log - most likely it is corrupted in // some way ... if (logEnd == LogCounter.INVALID_LOG_INSTANT) { Monitor.logTextMessage(MessageId.LOG_LOG_NOT_FOUND); StorageFile logFile = getLogFileName(logFileNumber); if (privExists(logFile)) { // if we can delete this strange corrupted file, do so, // otherwise, skip it if (!privDelete(logFile)) { logFile = getLogFileName(++logFileNumber); } } try { theLog = privRandomAccessFile(logFile, "rw"); } catch (IOException ioe) { theLog = null; } if (theLog == null || !privCanWrite(logFile)) { if (theLog != null) theLog.close(); theLog = null; ReadOnlyDB = true; } else { try { // no previous log file or previous log position if (!initLogFile( theLog, logFileNumber, LogCounter.INVALID_LOG_INSTANT)) { throw markCorrupt( StandardException.newException( SQLState.LOG_SEGMENT_NOT_EXIST, logFile.getPath())); } } catch (IOException ioe) { throw markCorrupt( StandardException.newException( SQLState.LOG_IO_ERROR, ioe)); } // successfully init'd the log file - set up markers, // and position at the end of the log. endPosition = theLog.getFilePointer(); lastFlush = endPosition; //if write sync is true , prellocate the log file //and reopen the file in rws mode. if(isWriteSynced) { //extend the file by wring zeros to it preAllocateNewLogFile(theLog); theLog.close(); theLog= privRandomAccessFile(logFile, "rws"); //postion the log at the current end postion theLog.seek(endPosition); } if (SanityManager.DEBUG) { SanityManager.ASSERT( endPosition == LOG_FILE_HEADER_SIZE, "empty log file has wrong size"); } //because we already incrementing the log number //here, no special log switch required for //backup recoveries. logSwitchRequired = false; } } else { // logEnd is the instant of the next log record in the log // it is used to determine the last known good position of // the log logFileNumber = LogCounter.getLogFileNumber(logEnd); ReadOnlyDB = df.isReadOnly(); StorageFile logFile = getLogFileName(logFileNumber); if (!ReadOnlyDB) { // if datafactory doesn't think it is readonly, we can // do some futher test of our own try { if(isWriteSynced) theLog = privRandomAccessFile(logFile, "rws"); else theLog = privRandomAccessFile(logFile, "rw"); } catch (IOException ioe) { theLog = null; } if (theLog == null || !privCanWrite(logFile)) { if (theLog != null) theLog.close(); theLog = null; ReadOnlyDB = true; } } if (!ReadOnlyDB) { endPosition = LogCounter.getLogFilePosition(logEnd); // // The end of the log is at endPosition. Which is where // the next log should be appending. // // if the last log record ends before the end of the // log file, then this log file has a fuzzy end. // Zap all the bytes to between endPosition to EOF to 0. // // the end log marker is 4 bytes (of zeros) // // if endPosition + 4 == logOut.length, we have a // properly terminated log file // // if endPosition + 4 is > logOut.length, there are 0, // 1, 2, or 3 bytes of 'fuzz' at the end of the log. We // can ignore that because it is guaranteed to be // overwritten by the next log record. // // if endPosition + 4 is < logOut.length, we have a // partial log record at the end of the log. // // We need to overwrite all of the incomplete log // record, because if we start logging but cannot // 'consume' all the bad log, then the log will truly // be corrupted if the next 4 bytes (the length of the // log record) after that is small enough that the next // time the database is recovered, it will be // interpreted that the whole log record is in the log // and will try to objectify, only to get classNotFound // error or worse. // //find out if log had incomplete log records at the end. if (redoScan.isLogEndFuzzy()) { theLog.seek(endPosition); long eof = theLog.length(); Monitor.logTextMessage(MessageId.LOG_INCOMPLETE_LOG_RECORD, logFile, new Long(endPosition), new Long(eof)); /* Write zeros from incomplete log record to end of file */ long nWrites = (eof - endPosition)/logBufferSize; int rBytes = (int)((eof - endPosition) % logBufferSize); byte zeroBuf[]= new byte[logBufferSize]; //write the zeros to file while(nWrites-- > 0) theLog.write(zeroBuf); if(rBytes !=0) theLog.write(zeroBuf, 0, rBytes); if(!isWriteSynced) syncFile(theLog); } if (SanityManager.DEBUG) { if (theLog.length() != endPosition) { SanityManager.ASSERT( theLog.length() > endPosition, "log end > log file length, bad scan"); } } // set the log to the true end position, // and not the end of the file lastFlush = endPosition; theLog.seek(endPosition); } } if (theLog != null) logOut = new LogAccessFile(this, theLog, logBufferSize); if(logSwitchRequired) switchLogFile(); boolean noInFlightTransactions = tf.noActiveUpdateTransaction(); if (ReadOnlyDB) { // in the unlikely event that someone detects we are // dealing with a read only db, check to make sure the // database is quiesce when it was copied with no unflushed // dirty buffer if (!noInFlightTransactions) { throw StandardException.newException( SQLState.LOG_READ_ONLY_DB_NEEDS_UNDO); } } ///////////////////////////////////////////////////////////// // // Undo loop - in transaction factory. It just gets one // transaction at a time from the transaction table and calls // undo, no different from runtime. // ///////////////////////////////////////////////////////////// if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) SanityManager.DEBUG(LogToFile.DBG_FLAG, "About to call undo(), transaction table =" + tf.getTransactionTable()); } if (!noInFlightTransactions) { if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) SanityManager.DEBUG(LogToFile.DBG_FLAG, "In recovery undo, rollback inflight transactions"); } tf.rollbackAllTransactions(recoveryTransaction, rsf); if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) SanityManager.DEBUG( LogToFile.DBG_FLAG, "finish recovery undo,"); } } else { if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) SanityManager.DEBUG(LogToFile.DBG_FLAG, "No in flight transaction, no recovery undo work"); } } ///////////////////////////////////////////////////////////// // // XA prepared xact loop - in transaction factory. At this // point only prepared transactions should be left in the // transaction table, all others should have been aborted or // committed and removed from the transaction table. It just // gets one transaction at a time from the transaction table, // creates a real context and transaction, reclaims locks, // and leaves the new xact in the transaction table. // ///////////////////////////////////////////////////////////// if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) SanityManager.DEBUG(LogToFile.DBG_FLAG, "About to call rePrepare(), transaction table =" + tf.getTransactionTable()); } tf.handlePreparedXacts(rsf); if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) SanityManager.DEBUG(LogToFile.DBG_FLAG, "Finished rePrepare(), transaction table =" + tf.getTransactionTable()); } ///////////////////////////////////////////////////////////// // // End of recovery. // ///////////////////////////////////////////////////////////// // recovery is finished. Close the transaction recoveryTransaction.close(); // notify the dataFactory that recovery is completed, // but before the checkpoint is written. dataFactory.postRecovery(); ////////////////////////////////////////////////////////////// // set the transaction factory short id, we have seen all the // trasactions in the log, and at the minimum, the checkpoint // transaction will be there. Set the shortId to the next // value. ////////////////////////////////////////////////////////////// tf.resetTranId(); // do a checkpoint (will flush the log) if there is any rollback // if can't checkpoint for some reasons, flush log and carry on if (!ReadOnlyDB) { boolean needCheckpoint = true; // if we can figure out there there is very little in the // log (less than 1000 bytes), we haven't done any // rollbacks, then don't checkpoint. Otherwise checkpoint. if (currentCheckpoint != null && noInFlightTransactions && redoLWM != LogCounter.INVALID_LOG_INSTANT && undoLWM != LogCounter.INVALID_LOG_INSTANT) { if ((logFileNumber == LogCounter.getLogFileNumber(redoLWM)) && (logFileNumber == LogCounter.getLogFileNumber(undoLWM)) && (endPosition < (LogCounter.getLogFilePosition(redoLWM) + 1000))) needCheckpoint = false; } if (needCheckpoint && !checkpoint(rsf, df, tf, false)) flush(logFileNumber, endPosition); } logger.close(); recoveryNeeded = false; } catch (IOException ioe) { if (SanityManager.DEBUG) ioe.printStackTrace(); throw markCorrupt( StandardException.newException(SQLState.LOG_IO_ERROR, ioe)); } catch (ClassNotFoundException cnfe) { throw markCorrupt( StandardException.newException( SQLState.LOG_CORRUPTED, cnfe)); } catch (StandardException se) { throw markCorrupt(se); } catch (Throwable th) { if (SanityManager.DEBUG) { SanityManager.showTrace(th); th.printStackTrace(); } throw markCorrupt( StandardException.newException( SQLState.LOG_RECOVERY_FAILED, th)); } } else { tf.useTransactionTable((Formatable)null); // set the transaction factory short id tf.resetTranId(); } // done with recovery ///////////////////////////////////////////////////////////// // setup checktpoint daemon ///////////////////////////////////////////////////////////// checkpointDaemon = rawStoreFactory.getDaemon(); if (checkpointDaemon != null) { myClientNumber = checkpointDaemon.subscribe(this, true /*onDemandOnly */); } }
clientMessageIds.add("J104");
static void initClientMessageIds() { // Add message ids that don't start with XJ here clientMessageIds.add(SQLState.NO_CURRENT_CONNECTION); clientMessageIds.add(SQLState.NOT_IMPLEMENTED); clientMessageIds.add(SQLState.CANNOT_CLOSE_ACTIVE_CONNECTION); clientMessageIds.add(SQLState.XACT_SAVEPOINT_RELEASE_ROLLBACK_FAIL); clientMessageIds.add(SQLState.UNSUPPORTED_ENCODING); clientMessageIds.add(SQLState.LANG_FORMAT_EXCEPTION); clientMessageIds.add(SQLState.LANG_DATA_TYPE_GET_MISMATCH); clientMessageIds.add(SQLState.LANG_DATA_TYPE_SET_MISMATCH); clientMessageIds.add(SQLState.LANG_DATE_SYNTAX_EXCEPTION); clientMessageIds.add(SQLState.CHARACTER_CONVERTER_NOT_AVAILABLE); clientMessageIds.add(SQLState.LANG_OUTSIDE_RANGE_FOR_DATATYPE); clientMessageIds.add(SQLState.LANG_STATEMENT_CLOSED_NO_REASON); clientMessageIds.add(SQLState.LANG_INVALID_COLUMN_POSITION); clientMessageIds.add(SQLState.INVALID_COLUMN_NAME); clientMessageIds.add("J104"); clientMessageIds.add(SQLState.HOLDABLE_RESULT_SET_NOT_AVAILABLE); clientMessageIds.add(SQLState.LANG_RETURN_OUTPUT_PARAM_CANNOT_BE_SET); clientMessageIds.add(SQLState.LANG_NULL_INTO_NON_NULL); clientMessageIds.add(SQLState.JDBC_METHOD_NOT_IMPLEMENTED); clientMessageIds.add(SQLState.JDBC_METHOD_NOT_SUPPORTED_BY_SERVER); clientMessageIds.add(SQLState.DRDA_NO_AUTOCOMMIT_UNDER_XA); clientMessageIds.add(SQLState.DRDA_INVALID_XA_STATE_ON_COMMIT_OR_ROLLBACK); clientMessageIds.add(SQLState.HOLDABLE_RESULT_SET_NOT_AVAILABLE); clientMessageIds.add(SQLState.INVALID_RESULTSET_TYPE); clientMessageIds.add(SQLState.INVALID_RESULTSET_CONCURRENCY); clientMessageIds.add(SQLState.SCROLL_SENSITIVE_NOT_SUPPORTED); clientMessageIds.add(SQLState.INSENSITIVE_UPDATABLE_NOT_SUPPORTED); clientMessageIds.add(SQLState.UNABLE_TO_OBTAIN_MESSAGE_TEXT_FROM_SERVER ); clientMessageIds.add(SQLState.NUMBER_OF_ROWS_TOO_LARGE_FOR_INT); clientMessageIds.add(SQLState.NOGETCONN_ON_CLOSED_POOLED_CONNECTION); clientMessageIds.add(SQLState.LOB_METHOD_ON_CLOSED_CONNECTION); clientMessageIds.add(SQLState.LANG_INVALID_PARAM_POSITION); clientMessageIds.add(SQLState.LANG_MISSING_PARMS); clientMessageIds.add(SQLState.LANG_NO_CURRENT_ROW); clientMessageIds.add(SQLState.LANG_STREAM_RETRIEVED_ALREADY); clientMessageIds.add(SQLState.CLIENT_RESULT_SET_NOT_OPEN); }
clientMessageIds.add(SQLState.CONNECTION_FAILED_ON_RESET); clientMessageIds.add(SQLState.DECIMAL_TOO_MANY_DIGITS); clientMessageIds.add(SQLState.NUMERIC_OVERFLOW); clientMessageIds.add(SQLState.UNSUPPORTED_HOLDABILITY_PROPERTY); clientMessageIds.add(SQLState.CANCEL_NOT_SUPPORTED_BY_SERVER); clientMessageIds.add(SQLState.LANG_INVALID_CALL_STATEMENT); clientMessageIds.add(SQLState.LOSS_OF_PRECISION_EXCEPTION); clientMessageIds.add(SQLState.LANG_INVALID_SQL_IN_BATCH);
static void initClientMessageIds() { // Add message ids that don't start with XJ here clientMessageIds.add(SQLState.NO_CURRENT_CONNECTION); clientMessageIds.add(SQLState.NOT_IMPLEMENTED); clientMessageIds.add(SQLState.CANNOT_CLOSE_ACTIVE_CONNECTION); clientMessageIds.add(SQLState.XACT_SAVEPOINT_RELEASE_ROLLBACK_FAIL); clientMessageIds.add(SQLState.UNSUPPORTED_ENCODING); clientMessageIds.add(SQLState.LANG_FORMAT_EXCEPTION); clientMessageIds.add(SQLState.LANG_DATA_TYPE_GET_MISMATCH); clientMessageIds.add(SQLState.LANG_DATA_TYPE_SET_MISMATCH); clientMessageIds.add(SQLState.LANG_DATE_SYNTAX_EXCEPTION); clientMessageIds.add(SQLState.CHARACTER_CONVERTER_NOT_AVAILABLE); clientMessageIds.add(SQLState.LANG_OUTSIDE_RANGE_FOR_DATATYPE); clientMessageIds.add(SQLState.LANG_STATEMENT_CLOSED_NO_REASON); clientMessageIds.add(SQLState.LANG_INVALID_COLUMN_POSITION); clientMessageIds.add(SQLState.INVALID_COLUMN_NAME); clientMessageIds.add("J104"); clientMessageIds.add(SQLState.HOLDABLE_RESULT_SET_NOT_AVAILABLE); clientMessageIds.add(SQLState.LANG_RETURN_OUTPUT_PARAM_CANNOT_BE_SET); clientMessageIds.add(SQLState.LANG_NULL_INTO_NON_NULL); clientMessageIds.add(SQLState.JDBC_METHOD_NOT_IMPLEMENTED); clientMessageIds.add(SQLState.JDBC_METHOD_NOT_SUPPORTED_BY_SERVER); clientMessageIds.add(SQLState.DRDA_NO_AUTOCOMMIT_UNDER_XA); clientMessageIds.add(SQLState.DRDA_INVALID_XA_STATE_ON_COMMIT_OR_ROLLBACK); clientMessageIds.add(SQLState.HOLDABLE_RESULT_SET_NOT_AVAILABLE); clientMessageIds.add(SQLState.INVALID_RESULTSET_TYPE); clientMessageIds.add(SQLState.INVALID_RESULTSET_CONCURRENCY); clientMessageIds.add(SQLState.SCROLL_SENSITIVE_NOT_SUPPORTED); clientMessageIds.add(SQLState.INSENSITIVE_UPDATABLE_NOT_SUPPORTED); clientMessageIds.add(SQLState.UNABLE_TO_OBTAIN_MESSAGE_TEXT_FROM_SERVER ); clientMessageIds.add(SQLState.NUMBER_OF_ROWS_TOO_LARGE_FOR_INT); clientMessageIds.add(SQLState.NOGETCONN_ON_CLOSED_POOLED_CONNECTION); clientMessageIds.add(SQLState.LOB_METHOD_ON_CLOSED_CONNECTION); clientMessageIds.add(SQLState.LANG_INVALID_PARAM_POSITION); clientMessageIds.add(SQLState.LANG_MISSING_PARMS); clientMessageIds.add(SQLState.LANG_NO_CURRENT_ROW); clientMessageIds.add(SQLState.LANG_STREAM_RETRIEVED_ALREADY); clientMessageIds.add(SQLState.CLIENT_RESULT_SET_NOT_OPEN); }
if ( messageId.startsWith("XJ") )
if ( messageId.startsWith("XJ") || messageId.startsWith("J") )
static boolean isClientMessage(String messageId) { if ( messageId.startsWith("XJ") ) { return true; } if ( clientMessageIds.contains(messageId)) { return true; } return false; }
if (!RowUtil.isRowEmpty(startKeyValue, (FormatableBitSet) null) || !RowUtil.isRowEmpty(stopKeyValue, (FormatableBitSet) null))
if (!RowUtil.isRowEmpty(startKeyValue) || !RowUtil.isRowEmpty(stopKeyValue))
public ScanManager openScan( TransactionManager xact_manager, Transaction rawtran, boolean hold, int open_mode, int lock_level, LockingPolicy locking_policy, int isolation_level, FormatableBitSet scanColumnList, DataValueDescriptor[] startKeyValue, int startSearchOperator, Qualifier qualifier[][], DataValueDescriptor[] stopKeyValue, int stopSearchOperator, StaticCompiledOpenConglomInfo static_info, DynamicCompiledOpenConglomInfo dynamic_info) throws StandardException { // Heap scans do not suppport start and stop scan positions (these // only make sense for ordered storage structures). if (!RowUtil.isRowEmpty(startKeyValue, (FormatableBitSet) null) || !RowUtil.isRowEmpty(stopKeyValue, (FormatableBitSet) null)) { throw StandardException.newException( SQLState.HEAP_UNIMPLEMENTED_FEATURE); } OpenConglomerate open_conglom = new OpenHeap(); if (open_conglom.init( (ContainerHandle) null, this, this.format_ids, xact_manager, rawtran, hold, open_mode, lock_level, locking_policy, dynamic_info) == null) { throw StandardException.newException( SQLState.HEAP_CONTAINER_NOT_FOUND, new Long(id.getContainerId())); } HeapScan heapscan = new HeapScan(); heapscan.init( open_conglom, scanColumnList, startKeyValue, startSearchOperator, qualifier, stopKeyValue, stopSearchOperator); return(heapscan); }
ResultSet rs = stmt.executeQuery("select * from tab1");
ResultSet rs = stmt.executeQuery("select " + "c1," + "c2," + "c3," + "c4," + "c5," + "c6," + "c1 as c1_spare," + "c2 as c2_spare," + "c3 as c3_spare " + "from tab1");
public static void main(String[] args) { Connection conn, connreturn; Statement stmt, stmtreturn; System.out.println("Test connection20 starting"); try { // use the ij utility to read the property file and // make the initial connection. ij.getPropertyArg(args); conn = ij.startJBMS(); isDerbyNet = TestUtil.isNetFramework(); stmt = conn.createStatement(); //create a table, insert a row, do a select from the table, stmt.execute("create table tab1("+ "c1 char(100) for bit data,"+ "c2 varchar(100) for bit data," + "c3 long varchar for bit data,"+ "c4 char(100),"+ "c5 varchar(100),"+ "c6 long varchar)"); connreturn = stmt.getConnection(); if (conn.equals(connreturn)) System.out.println("Got Same Connection Object"); else System.out.println("Got Different Connection Object"); // load some data into this table .. load_data(connreturn); // read the data of each type with all the possible functions ResultSet rs = stmt.executeQuery("select * from tab1"); int loop = 0; while(loop < 2 ) { while (rs.next()) { for(int i=1 ; i < 7 ; i++) { get_using_object(rs, i); get_using_string(rs, i); get_using_ascii_stream(rs, i); if(i < 4 ) // only c1 , c2, c3 { get_using_binary_stream(rs, i); get_using_bytes(rs, i); } } } // get the statment back from the result set stmtreturn = rs.getStatement(); if (stmt.equals(stmtreturn)) System.out.println("Got Same Statement Object"); else System.out.println("Got Different Statement Object"); rs.close(); rs = stmt.executeQuery("select * from tab1"); loop++; } stmt.close(); // Try to get the connection object thro database meta data DatabaseMetaData dbmeta = conn.getMetaData(); rs = dbmeta.getTypeInfo(); while (rs.next()) { System.out.println(rs.getString(1)); } // try to get a statemet from a meta data result set stmt = rs.getStatement(); // Try to get the Connection back from a Metadata System.out.println("Try to Get the connection back from metadata"); connreturn = dbmeta.getConnection(); if (conn.equals(connreturn)) System.out.println("Got Same Connection Object"); else System.out.println("Got Different Connection Object"); // Try to get the connection thru callable statement CallableStatement cs = conn.prepareCall("select * from tab1"); System.out.println(" Try to get the connection back from a callable stmt"); connreturn = cs.getConnection(); if (conn.equals(connreturn)) System.out.println("Got Same Connection Object"); else System.out.println("Got Different Connection Object"); cs.close(); conn.close(); } catch (SQLException e) { dumpSQLExceptions(e); e.printStackTrace(); } catch (Throwable e) { System.out.println("FAIL -- unexpected exception: "+e); e.printStackTrace(); } System.out.println("Test getConnection finished"); }
get_using_binary_stream(rs, i); get_using_bytes(rs, i);
get_using_binary_stream(rs, i + 6); get_using_bytes(rs, i + 6);
public static void main(String[] args) { Connection conn, connreturn; Statement stmt, stmtreturn; System.out.println("Test connection20 starting"); try { // use the ij utility to read the property file and // make the initial connection. ij.getPropertyArg(args); conn = ij.startJBMS(); isDerbyNet = TestUtil.isNetFramework(); stmt = conn.createStatement(); //create a table, insert a row, do a select from the table, stmt.execute("create table tab1("+ "c1 char(100) for bit data,"+ "c2 varchar(100) for bit data," + "c3 long varchar for bit data,"+ "c4 char(100),"+ "c5 varchar(100),"+ "c6 long varchar)"); connreturn = stmt.getConnection(); if (conn.equals(connreturn)) System.out.println("Got Same Connection Object"); else System.out.println("Got Different Connection Object"); // load some data into this table .. load_data(connreturn); // read the data of each type with all the possible functions ResultSet rs = stmt.executeQuery("select * from tab1"); int loop = 0; while(loop < 2 ) { while (rs.next()) { for(int i=1 ; i < 7 ; i++) { get_using_object(rs, i); get_using_string(rs, i); get_using_ascii_stream(rs, i); if(i < 4 ) // only c1 , c2, c3 { get_using_binary_stream(rs, i); get_using_bytes(rs, i); } } } // get the statment back from the result set stmtreturn = rs.getStatement(); if (stmt.equals(stmtreturn)) System.out.println("Got Same Statement Object"); else System.out.println("Got Different Statement Object"); rs.close(); rs = stmt.executeQuery("select * from tab1"); loop++; } stmt.close(); // Try to get the connection object thro database meta data DatabaseMetaData dbmeta = conn.getMetaData(); rs = dbmeta.getTypeInfo(); while (rs.next()) { System.out.println(rs.getString(1)); } // try to get a statemet from a meta data result set stmt = rs.getStatement(); // Try to get the Connection back from a Metadata System.out.println("Try to Get the connection back from metadata"); connreturn = dbmeta.getConnection(); if (conn.equals(connreturn)) System.out.println("Got Same Connection Object"); else System.out.println("Got Different Connection Object"); // Try to get the connection thru callable statement CallableStatement cs = conn.prepareCall("select * from tab1"); System.out.println(" Try to get the connection back from a callable stmt"); connreturn = cs.getConnection(); if (conn.equals(connreturn)) System.out.println("Got Same Connection Object"); else System.out.println("Got Different Connection Object"); cs.close(); conn.close(); } catch (SQLException e) { dumpSQLExceptions(e); e.printStackTrace(); } catch (Throwable e) { System.out.println("FAIL -- unexpected exception: "+e); e.printStackTrace(); } System.out.println("Test getConnection finished"); }
rs = stmt.executeQuery("select * from tab1"); loop++;
rs = stmt.executeQuery("select " + "c1," + "c2," + "c3," + "c4," + "c5," + "c6," + "c1 as c1_spare," + "c2 as c2_spare," + "c3 as c3_spare " + "from tab1"); loop++;
public static void main(String[] args) { Connection conn, connreturn; Statement stmt, stmtreturn; System.out.println("Test connection20 starting"); try { // use the ij utility to read the property file and // make the initial connection. ij.getPropertyArg(args); conn = ij.startJBMS(); isDerbyNet = TestUtil.isNetFramework(); stmt = conn.createStatement(); //create a table, insert a row, do a select from the table, stmt.execute("create table tab1("+ "c1 char(100) for bit data,"+ "c2 varchar(100) for bit data," + "c3 long varchar for bit data,"+ "c4 char(100),"+ "c5 varchar(100),"+ "c6 long varchar)"); connreturn = stmt.getConnection(); if (conn.equals(connreturn)) System.out.println("Got Same Connection Object"); else System.out.println("Got Different Connection Object"); // load some data into this table .. load_data(connreturn); // read the data of each type with all the possible functions ResultSet rs = stmt.executeQuery("select * from tab1"); int loop = 0; while(loop < 2 ) { while (rs.next()) { for(int i=1 ; i < 7 ; i++) { get_using_object(rs, i); get_using_string(rs, i); get_using_ascii_stream(rs, i); if(i < 4 ) // only c1 , c2, c3 { get_using_binary_stream(rs, i); get_using_bytes(rs, i); } } } // get the statment back from the result set stmtreturn = rs.getStatement(); if (stmt.equals(stmtreturn)) System.out.println("Got Same Statement Object"); else System.out.println("Got Different Statement Object"); rs.close(); rs = stmt.executeQuery("select * from tab1"); loop++; } stmt.close(); // Try to get the connection object thro database meta data DatabaseMetaData dbmeta = conn.getMetaData(); rs = dbmeta.getTypeInfo(); while (rs.next()) { System.out.println(rs.getString(1)); } // try to get a statemet from a meta data result set stmt = rs.getStatement(); // Try to get the Connection back from a Metadata System.out.println("Try to Get the connection back from metadata"); connreturn = dbmeta.getConnection(); if (conn.equals(connreturn)) System.out.println("Got Same Connection Object"); else System.out.println("Got Different Connection Object"); // Try to get the connection thru callable statement CallableStatement cs = conn.prepareCall("select * from tab1"); System.out.println(" Try to get the connection back from a callable stmt"); connreturn = cs.getConnection(); if (conn.equals(connreturn)) System.out.println("Got Same Connection Object"); else System.out.println("Got Different Connection Object"); cs.close(); conn.close(); } catch (SQLException e) { dumpSQLExceptions(e); e.printStackTrace(); } catch (Throwable e) { System.out.println("FAIL -- unexpected exception: "+e); e.printStackTrace(); } System.out.println("Test getConnection finished"); }
String strResult = sqlxUtil.serializeToString(itemRefs);
public XMLDataValue XMLQuery(XMLDataValue result, SqlXmlUtil sqlxUtil) throws StandardException { if (this.isNull()) { // if the context is null, we return null, // per SQL/XML[2006] 6.17:GR.1.a.ii.1. if (result == null) result = (XMLDataValue)getNewNull(); else result.setToNull(); return result; } try { // Return an XML data value whose contents are the // serialized version of the query results. int [] xType = new int[1]; ArrayList itemRefs = sqlxUtil.evalXQExpression( this, true, xType); String strResult = sqlxUtil.serializeToString(itemRefs); if (result == null) result = new XML(new SQLChar(strResult)); else result.setValue(new SQLChar(strResult)); // Now that we've set the result value, make sure // to indicate what kind of XML value we have. result.setXType(xType[0]); // And finally we return the query result as an XML value. return result; } catch (StandardException se) { // Just re-throw it. throw se; } catch (Throwable xe) { /* Failed somewhere during evaluation of the XML query expression; * turn error into a StandardException and throw it. Note: we * catch "Throwable" here to catch as many Xalan-produced errors * as possible in order to minimize the chance of an uncaught Xalan * error (such as a NullPointerException) causing Derby to fail in * a more serious way. In particular, an uncaught Java exception * like NPE can result in Derby throwing "ERROR 40XT0: An internal * error was identified by RawStore module" for all statements on * the connection after the failure--which we clearly don't want. * If we catch the error and wrap it, though, the statement will * fail but Derby will continue to run as normal. */ throw StandardException.newException( SQLState.LANG_XML_QUERY_ERROR, xe, "XMLQUERY"); } }
result = new XML(new SQLChar(strResult)); else result.setValue(new SQLChar(strResult));
result = new XML(); String strResult = sqlxUtil.serializeToString(itemRefs, result); result.setValue(new SQLChar(strResult));
public XMLDataValue XMLQuery(XMLDataValue result, SqlXmlUtil sqlxUtil) throws StandardException { if (this.isNull()) { // if the context is null, we return null, // per SQL/XML[2006] 6.17:GR.1.a.ii.1. if (result == null) result = (XMLDataValue)getNewNull(); else result.setToNull(); return result; } try { // Return an XML data value whose contents are the // serialized version of the query results. int [] xType = new int[1]; ArrayList itemRefs = sqlxUtil.evalXQExpression( this, true, xType); String strResult = sqlxUtil.serializeToString(itemRefs); if (result == null) result = new XML(new SQLChar(strResult)); else result.setValue(new SQLChar(strResult)); // Now that we've set the result value, make sure // to indicate what kind of XML value we have. result.setXType(xType[0]); // And finally we return the query result as an XML value. return result; } catch (StandardException se) { // Just re-throw it. throw se; } catch (Throwable xe) { /* Failed somewhere during evaluation of the XML query expression; * turn error into a StandardException and throw it. Note: we * catch "Throwable" here to catch as many Xalan-produced errors * as possible in order to minimize the chance of an uncaught Xalan * error (such as a NullPointerException) causing Derby to fail in * a more serious way. In particular, an uncaught Java exception * like NPE can result in Derby throwing "ERROR 40XT0: An internal * error was identified by RawStore module" for all statements on * the connection after the failure--which we clearly don't want. * If we catch the error and wrap it, though, the statement will * fail but Derby will continue to run as normal. */ throw StandardException.newException( SQLState.LANG_XML_QUERY_ERROR, xe, "XMLQUERY"); } }
if (this.hasTopLevelAttr()) { throw StandardException.newException( SQLState.LANG_XQUERY_SERIALIZATION_ERROR); }
public StringDataValue XMLSerialize(StringDataValue result, int targetType, int targetWidth) throws StandardException { if (result == null) { switch (targetType) { case Types.CHAR: result = new SQLChar(); break; case Types.VARCHAR: result = new SQLVarchar(); break; case Types.LONGVARCHAR: result = new SQLLongvarchar(); break; case Types.CLOB: result = new SQLClob(); break; default: // Shouldn't ever get here, as this check was performed // at bind time. if (SanityManager.DEBUG) { SanityManager.THROWASSERT( "Should NOT have made it to XMLSerialize " + "with a non-string target type: " + targetType); } return null; } } // Else we're reusing a StringDataValue. We only reuse // the result if we're executing the _same_ XMLSERIALIZE // call on multiple rows. That means that all rows // must have the same result type (targetType) and thus // we know that the StringDataValue already has the // correct type. So we're set. if (this.isNull()) { // Attempts to serialize a null XML value lead to a null // result (SQL/XML[2003] section 10.13). result.setToNull(); return result; } // Get the XML value as a string. For this UTF-8 impl, // we already have it as a UTF-8 string, so just use // that. result.setValue(getString()); // Seems wrong to trunc an XML document, as it then becomes non- // well-formed and thus useless. So we throw an error (that's // what the "true" in the next line says). result.setWidth(targetWidth, 0, true); return result; }
return new XML(xmlStringValue, getXType());
return new XML(xmlStringValue, getXType(), hasTopLevelAttr());
public DataValueDescriptor getClone() { return new XML(xmlStringValue, getXType()); }
if (((XMLDataValue)theValue).hasTopLevelAttr()) markAsHavingTopLevelAttr(); }
protected void setFrom(DataValueDescriptor theValue) throws StandardException { String strVal = theValue.getString(); if (strVal == null) { xmlStringValue = null; // Null is a valid value for DOCUMENT(ANY) setXType(XML_DOC_ANY); return; } // Here we just store the received value locally. if (xmlStringValue == null) xmlStringValue = new SQLChar(); xmlStringValue.setValue(strVal); /* * Assumption is that if theValue is not an XML * value then the caller is aware of whether or * not theValue constitutes a valid XML(DOCUMENT(ANY)) * and will behave accordingly (see in particular the * XMLQuery method of this class, which calls the * setValue() method of XMLDataValue which in turn * brings us to this method). */ if (theValue instanceof XMLDataValue) setXType(((XMLDataValue)theValue).getXType()); }
if (xtype == XML_DOC_ANY) containsTopLevelAttr = false;
public void setXType(int xtype) { this.xType = xtype; }
if ((duplicate_value = hash_table.put(key, row)) != null)
if ((duplicate_value = hash_table.put(key, row)) == null) doSpaceAccounting( row, false); else
private void add_row_to_hash_table( Hashtable hash_table, Object key, Object[] row) throws StandardException { Object duplicate_value = null; if ((duplicate_value = hash_table.put(key, row)) != null) { if (!remove_duplicates) { Vector row_vec; // inserted a duplicate if ((duplicate_value instanceof Vector)) { row_vec = (Vector) duplicate_value; } else { // allocate vector to hold duplicates row_vec = new Vector(2); // insert original row into vector row_vec.addElement(duplicate_value); } // insert new row into vector row_vec.addElement(row); // store vector of rows back into hash table, // overwriting the duplicate key that was // inserted. hash_table.put(key, row_vec); } } row = null; }
doSpaceAccounting( row, true);
private void add_row_to_hash_table( Hashtable hash_table, Object key, Object[] row) throws StandardException { Object duplicate_value = null; if ((duplicate_value = hash_table.put(key, row)) != null) { if (!remove_duplicates) { Vector row_vec; // inserted a duplicate if ((duplicate_value instanceof Vector)) { row_vec = (Vector) duplicate_value; } else { // allocate vector to hold duplicates row_vec = new Vector(2); // insert original row into vector row_vec.addElement(duplicate_value); } // insert new row into vector row_vec.addElement(row); // store vector of rows back into hash table, // overwriting the duplicate key that was // inserted. hash_table.put(key, row_vec); } } row = null; }
private Object[] cloneRow(Object[] old_row)
static Object[] cloneRow(Object[] old_row)
private Object[] cloneRow(Object[] old_row) throws StandardException { Object[] new_row = new DataValueDescriptor[old_row.length]; // the only difference between getClone and cloneObject is cloneObject does // not objectify a stream. We use getClone here. Beetle 4896. for (int i = 0; i < old_row.length; i++) new_row[i] = ((DataValueDescriptor) old_row[i]).getClone(); return(new_row); }
if( diskHashtable != null) { diskHashtable.close(); diskHashtable = null; }
public void close() throws StandardException { hash_table = null; return; }
return(hash_table.elements());
if( diskHashtable == null) return(hash_table.elements()); return new BackingStoreHashtableEnumeration();
public Enumeration elements() throws StandardException { return(hash_table.elements()); }
return(hash_table.get(key));
Object obj = hash_table.get(key); if( diskHashtable == null || obj != null) return obj; return diskHashtable.get( key);
public Object get(Object key) throws StandardException { return(hash_table.get(key)); }
return(hash_table.remove(key));
Object obj = hash_table.remove(key); if( obj != null || diskHashtable == null) return obj; return diskHashtable.remove(key);
public Object remove( Object key) throws StandardException { return(hash_table.remove(key)); }
return(hash_table.size());
if( diskHashtable == null) return(hash_table.size()); return hash_table.size() + diskHashtable.size();
public int size() throws StandardException { return(hash_table.size()); }
classname = checkForJDBC40Implementation(classname);
public static javax.sql.ConnectionPoolDataSource getConnectionPoolDataSource(Properties attrs) { String classname = getDataSourcePrefix() + CONNECTION_POOL_DATASOURCE_STRING + "DataSource"; return (javax.sql.ConnectionPoolDataSource) getDataSourceWithReflection(classname, attrs); }
if (JVMInfo.JDK_ID >= JVMInfo.J2SE_16) { String classname40 = classname + "40"; try { Class.forName(classname40); classname = classname40; } catch (ClassNotFoundException e) {} }
classname = checkForJDBC40Implementation(classname);
public static javax.sql.DataSource getDataSource(Properties attrs) { String classname; if(HAVE_DRIVER_CLASS) { classname = getDataSourcePrefix() + REGULAR_DATASOURCE_STRING + "DataSource"; // The JDBC 4.0 implementation of the DataSource interface // is suffixed with "40". Use it if it is available and // the JVM version is at least 1.6. if (JVMInfo.JDK_ID >= JVMInfo.J2SE_16) { String classname40 = classname + "40"; try { Class.forName(classname40); classname = classname40; } catch (ClassNotFoundException e) {} } return (javax.sql.DataSource) getDataSourceWithReflection(classname, attrs); } else return getSimpleDataSource(attrs); }