rem
stringlengths
0
477k
add
stringlengths
0
313k
context
stringlengths
6
599k
Properties p = new Properties(); p.setProperty(name, value);
public void setClientInfo(String name, String value) throws SQLException{ checkIfClosed(); // Allow null to simplify compliance testing through // reflection, (test all methods in an interface with null // arguments) if (name == null && value == null) { return; } Properties p = new Properties(); p.setProperty(name, value); setClientInfo(p); }
Connection bootDatabase(String password) throws SQLException {
Connection bootDatabase(int passwordKey) throws SQLException { String connAttrs = ""; if (encryptionType == USING_PASSWORD) { if (passwordKey == NEW) connAttrs = "bootPassword=" + NEW_PASSWORD; else if (passwordKey == OLD) connAttrs = "bootPassword=" + OLD_PASSWORD; }
Connection bootDatabase(String password) throws SQLException { return TestUtil.getConnection(TEST_DATABASE_NAME, "bootPassword=" + password); }
return TestUtil.getConnection(TEST_DATABASE_NAME, "bootPassword=" + password);
if (encryptionType == USING_KEY) { if (passwordKey == NEW) connAttrs = "encryptionKey=" + NEW_KEY; else if (passwordKey == OLD) connAttrs = "encryptionKey=" + OLD_KEY; } if (verbose) logMessage("booting " + currentTestDatabase + " with " + connAttrs); return TestUtil.getConnection(currentTestDatabase, connAttrs);
Connection bootDatabase(String password) throws SQLException { return TestUtil.getConnection(TEST_DATABASE_NAME, "bootPassword=" + password); }
private void createEncryptedDatabase() throws SQLException
private Connection createEncryptedDatabase() throws SQLException
private void createEncryptedDatabase() throws SQLException { TestUtil.getConnection(TEST_DATABASE_NAME, "create=true;dataEncryption=true;bootPassword=" + OLD_PASSWORD); }
TestUtil.getConnection(TEST_DATABASE_NAME, "create=true;dataEncryption=true;bootPassword=" + OLD_PASSWORD);
String connAttrs = ""; if (encryptionType == USING_PASSWORD) { connAttrs = "create=true;dataEncryption=true;bootPassword=" + OLD_PASSWORD; } if (encryptionType == USING_KEY) { connAttrs = "create=true;dataEncryption=true;encryptionKey=" + OLD_KEY; } return TestUtil.getConnection(currentTestDatabase, connAttrs);
private void createEncryptedDatabase() throws SQLException { TestUtil.getConnection(TEST_DATABASE_NAME, "create=true;dataEncryption=true;bootPassword=" + OLD_PASSWORD); }
private void encryptDatabase(String password)
private Connection encryptDatabase()
private void encryptDatabase(String password) throws SQLException { //encrypt an existing database. String connAttrs = "dataEncryption=true;bootPassword=" + password ; TestUtil.getConnection(TEST_DATABASE_NAME, connAttrs); }
String connAttrs = "dataEncryption=true;bootPassword=" + password ;
String connAttrs = ""; if (encryptionType == USING_PASSWORD) { connAttrs = "dataEncryption=true;bootPassword=" + OLD_PASSWORD; } if (encryptionType == USING_KEY) { connAttrs = "dataEncryption=true;encryptionKey=" + OLD_KEY; }
private void encryptDatabase(String password) throws SQLException { //encrypt an existing database. String connAttrs = "dataEncryption=true;bootPassword=" + password ; TestUtil.getConnection(TEST_DATABASE_NAME, connAttrs); }
TestUtil.getConnection(TEST_DATABASE_NAME, connAttrs);
if (verbose) logMessage("encrypting " + currentTestDatabase + " with " + connAttrs); return TestUtil.getConnection(currentTestDatabase, connAttrs);
private void encryptDatabase(String password) throws SQLException { //encrypt an existing database. String connAttrs = "dataEncryption=true;bootPassword=" + password ; TestUtil.getConnection(TEST_DATABASE_NAME, connAttrs); }
return rs.getInt(1);
int max = rs.getInt(1); rs.close(); s.close(); return max;
private int findMax(Connection conn, String tableName) throws SQLException { Statement s = conn.createStatement(); ResultSet rs = s.executeQuery("SELECT max(ID) from " + tableName); rs.next(); return rs.getInt(1); }
ps.close();
void insert(Connection conn, String tableName, int rowCount) throws SQLException { PreparedStatement ps = conn.prepareStatement("INSERT INTO " + tableName + " VALUES(?,?)"); int startId = findMax(conn, tableName); for (int i = startId; i < rowCount; i++) { ps.setInt(1, i); // ID ps.setString(2 , "skywalker" + i); ps.executeUpdate(); } conn.commit(); ps.close(); }
private void reEncryptDatabase(String currentPassword, String newPassword) throws SQLException
private Connection reEncryptDatabase() throws SQLException
private void reEncryptDatabase(String currentPassword, String newPassword) throws SQLException { // re-encrypt the database. String connAttrs = "bootPassword=" + currentPassword + ";newBootPassword=" + newPassword; TestUtil.getConnection(TEST_DATABASE_NAME, connAttrs); }
String connAttrs = "bootPassword=" + currentPassword + ";newBootPassword=" + newPassword; TestUtil.getConnection(TEST_DATABASE_NAME, connAttrs);
String connAttrs = ""; if (encryptionType == USING_PASSWORD) { connAttrs = "bootPassword=" + OLD_PASSWORD + ";newBootPassword=" + NEW_PASSWORD; } if (encryptionType == USING_KEY) { connAttrs = "encryptionKey=" + OLD_KEY + ";newEncryptionKey=" + NEW_KEY; } return TestUtil.getConnection(currentTestDatabase, connAttrs);
private void reEncryptDatabase(String currentPassword, String newPassword) throws SQLException { // re-encrypt the database. String connAttrs = "bootPassword=" + currentPassword + ";newBootPassword=" + newPassword; TestUtil.getConnection(TEST_DATABASE_NAME, connAttrs); }
createEncryptedDatabase(); Connection conn = TestUtil.getConnection(TEST_DATABASE_NAME, null); createTable(conn, TEST_TABLE_NAME); insert(conn, TEST_TABLE_NAME, 100); conn.commit(); shutdown(TEST_DATABASE_NAME);
if (SanityManager.DEBUG) { if (verbose) logMessage("Start testing re-encryption with Password"); currentTestDatabase = TEST_REENCRYPT_PWD_DATABASE; encryptionType = USING_PASSWORD; runCrashRecoveryTestCases(true);
private void runTest() throws Exception { logMessage("Begin ReEncryptCrashRecovery Test"); createEncryptedDatabase(); Connection conn = TestUtil.getConnection(TEST_DATABASE_NAME, null); createTable(conn, TEST_TABLE_NAME); //load some rows insert(conn, TEST_TABLE_NAME, 100); conn.commit(); //shutdown the test db shutdown(TEST_DATABASE_NAME); // re-enryption crash/recovery test cases. crashBeforeCommit(); recover_crashBeforeCommit(); //shutdown the test db shutdown(TEST_DATABASE_NAME); logMessage("End ReEncryptCrashRecovery Test"); }
crashBeforeCommit(); recover_crashBeforeCommit(); shutdown(TEST_DATABASE_NAME);
if (verbose) logMessage("Start Testing encryption with Password");
private void runTest() throws Exception { logMessage("Begin ReEncryptCrashRecovery Test"); createEncryptedDatabase(); Connection conn = TestUtil.getConnection(TEST_DATABASE_NAME, null); createTable(conn, TEST_TABLE_NAME); //load some rows insert(conn, TEST_TABLE_NAME, 100); conn.commit(); //shutdown the test db shutdown(TEST_DATABASE_NAME); // re-enryption crash/recovery test cases. crashBeforeCommit(); recover_crashBeforeCommit(); //shutdown the test db shutdown(TEST_DATABASE_NAME); logMessage("End ReEncryptCrashRecovery Test"); }
currentTestDatabase = TEST_ENCRYPT_PWD_DATABASE; encryptionType = USING_PASSWORD; runCrashRecoveryTestCases(false); if (verbose) { logMessage("Start Testing Encryption with external Key"); } currentTestDatabase = TEST_ENCRYPT_KEY_DATABASE; encryptionType = USING_KEY; runCrashRecoveryTestCases(false); if (verbose) logMessage("Start Testing re-encryption with external Key"); currentTestDatabase = TEST_REENCRYPT_KEY_DATABASE; encryptionType = USING_KEY; runCrashRecoveryTestCases(true); }
private void runTest() throws Exception { logMessage("Begin ReEncryptCrashRecovery Test"); createEncryptedDatabase(); Connection conn = TestUtil.getConnection(TEST_DATABASE_NAME, null); createTable(conn, TEST_TABLE_NAME); //load some rows insert(conn, TEST_TABLE_NAME, 100); conn.commit(); //shutdown the test db shutdown(TEST_DATABASE_NAME); // re-enryption crash/recovery test cases. crashBeforeCommit(); recover_crashBeforeCommit(); //shutdown the test db shutdown(TEST_DATABASE_NAME); logMessage("End ReEncryptCrashRecovery Test"); }
void shutdown(String dbName) {
void shutdown() {
void shutdown(String dbName) { try{ //shutdown TestUtil.getConnection(dbName, "shutdown=true"); }catch(SQLException se){ if (se.getSQLState() == null || !(se.getSQLState().equals("08006"))) { // database was not shutdown properly dumpSQLException(se); } } }
TestUtil.getConnection(dbName, "shutdown=true");
TestUtil.getConnection(currentTestDatabase, "shutdown=true");
void shutdown(String dbName) { try{ //shutdown TestUtil.getConnection(dbName, "shutdown=true"); }catch(SQLException se){ if (se.getSQLState() == null || !(se.getSQLState().equals("08006"))) { // database was not shutdown properly dumpSQLException(se); } } }
}else {
SQLException ne = sqle.getNextException(); if (ne != null) { String message = ne.getMessage(); if (message.indexOf(debugFlag) != -1) { expectedExcepion = true; } } } if (!expectedExcepion)
private void verifyException(SQLException sqle, String debugFlag) { if (sqle != null) { if (sqle.getSQLState() != null && sqle.getSQLState().equals("XJ040")) { // boot failed as expected with the debug flag }else { dumpSQLException(sqle); } } else { if (SanityManager.DEBUG) { logMessage("Did not crash at " + debugFlag); } } }
} } else {
} else {
private void verifyException(SQLException sqle, String debugFlag) { if (sqle != null) { if (sqle.getSQLState() != null && sqle.getSQLState().equals("XJ040")) { // boot failed as expected with the debug flag }else { dumpSQLException(sqle); } } else { if (SanityManager.DEBUG) { logMessage("Did not crash at " + debugFlag); } } }
unblockBackup();
protected void postComplete(int commitflag, Integer commitOrAbort) throws StandardException { if (postCompleteMode) doComplete(commitOrAbort); // if we are want to commitNoSync with KEEP_LOCKS flag set, don't // release any locks if ((commitflag & Transaction.KEEP_LOCKS) == 0) { releaseAllLocks(); } else { if (SanityManager.DEBUG) { SanityManager.ASSERT(commitOrAbort.equals(COMMIT), "cannot keep locks around after an ABORT"); } } setIdleState(); // any backup blocking operations (like unlogged ops) in this // transaction are done with post commit/abort work by now, // unblock the backup. unblockBackup(); inComplete = null; }
unblockBackup();
private final void postTermination() throws StandardException { // move all the postTermination work to the postCommit queue int count = (postTerminationWorks == null) ? 0 : postTerminationWorks.size(); for (int i = 0; i < count; i++) addPostCommitWork((Serviceable)postTerminationWorks.get(i)); if (count > 0) postTerminationWorks.clear(); // if there are post commit work to be done, transfer them to the // daemon. The log is flushed, all locks released and the // transaction has ended at this point. if (postCommitWorks != null && !postCommitWorks.isEmpty()) { int pcsize = postCommitWorks.size(); // do we want to do post commit work with this transaction object? if (doPostCommitWorkInTran()) { try { inPostCommitProcessing = true; // to avoid confusion, copy the post commit work to an array if this // is going to do some work now Serviceable[] work = new Serviceable[pcsize]; work = (Serviceable[])postCommitWorks.toArray(work); // clear this for post commit processing to queue its own post // commit works - when it commits, it will send all its post // commit request to the daemon instead of dealing with it here. postCommitWorks.clear(); //All the post commit work that is part of the database creation //should be done on this thread immediately. boolean doWorkInThisThread = xactFactory.inDatabaseCreation(); for (int i = 0; i < pcsize; i++) { //process work that should be done immediately or //when we are in still in database creattion. //All the other work should be submitted //to the post commit thread to be processed asynchronously if (doWorkInThisThread || work[i].serviceImmediately()) { try { // this may cause other post commit work to be // added. when that transaction commits, those // work will be transfered to the daemon if (work[i].performWork(xc.getContextManager()) == Serviceable.DONE) work[i] = null; // if REQUEUE, leave it on for the postcommit // daemon to handle } catch (StandardException se) { // don't try to service this again work[i] = null; // try to handle it here. If we fail, then let the error percolate. xc.cleanupOnError(se); } } // either it need not be serviedASAP or it needs // requeueing, send it off. Note that this is one case // where a REQUEUE ends up in the high priority queue. // Unfortunately, there is no easy way to tell. If the // Servicable is well mannered, it can change itself from // serviceASAP to not serviceASAP if it returns REQUEUE. if (work[i] != null) { boolean needHelp = xactFactory.submitPostCommitWork(work[i]); work[i] = null; if (needHelp) doWorkInThisThread = true; } } } finally { inPostCommitProcessing = false; // if something untoward happends, clear the queue. if (postCommitWorks != null) postCommitWorks.clear(); } } else { // this is for non-user transaction or post commit work that is // submitted in PostCommitProcessing. (i.e., a post commit // work submitting other post commit work) for (int i = 0; i < pcsize; i++) { // SanityManager.DEBUG_PRINT("PostTermination",postCommitWorks.elementAt((i)).toString()); xactFactory.submitPostCommitWork((Serviceable)postCommitWorks.get((i))); } } postCommitWorks.clear(); } }
return getRegisteredDriver().acceptsURL(url);
return getDriverModule().acceptsURL(url);
public boolean acceptsURL(String url) throws SQLException { return getRegisteredDriver().acceptsURL(url); }
private static void boot() {
static void boot() {
private static void boot() { PrintStream ps = DriverManager.getLogStream(); if (ps == null) ps = System.err; new JDBCBoot().boot(Attribute.PROTOCOL, ps); }
return getRegisteredDriver().connect(url, info);
return getDriverModule().connect(url, info);
public Connection connect(String url, Properties info) throws SQLException { return getRegisteredDriver().connect(url, info); }
return (getRegisteredDriver().getMajorVersion());
return (getDriverModule().getMajorVersion());
public int getMajorVersion() { try { return (getRegisteredDriver().getMajorVersion()); } catch (SQLException se) { return 0; } }
return (getRegisteredDriver().getMinorVersion());
return (getDriverModule().getMinorVersion());
public int getMinorVersion() { try { return (getRegisteredDriver().getMinorVersion()); } catch (SQLException se) { return 0; } }
return getRegisteredDriver().getPropertyInfo(url, info);
return getDriverModule().getPropertyInfo(url, info);
public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { return getRegisteredDriver().getPropertyInfo(url, info); }
return (getRegisteredDriver().jdbcCompliant());
return (getDriverModule().jdbcCompliant());
public boolean jdbcCompliant() { try { return (getRegisteredDriver().jdbcCompliant()); } catch (SQLException se) { return false; } }
sqle.initCause(getCause());
sqle.initCause(this);
public SQLException getSQLException() { if ( wrappedException_ != null ) { return wrappedException_; } // When we have support for JDBC 4 SQLException subclasses, this is // where we decide which exception to create SQLException sqle = new SQLException(getMessage(), getSQLState(), getErrorCode()); // If we're in a runtime that supports chained exceptions, set the cause // of the SQLException. if (JVMInfo.JDK_ID >= JVMInfo.J2SE_14 ) { sqle.initCause(getCause()); } // Set up the nextException chain if ( nextException_ != null ) { // The exception chain gets constructed automatically through // the beautiful power of recursion sqle.setNextException(nextException_.getSQLException()); } return sqle; }
if(create) properties.put(Attribute.CRYPTO_KEY_LENGTH,String.valueOf(generatedKey.length)); else if(generatedKey.length != encodedKeyLength && encodedKeyLength > 0) throw StandardException.newException(SQLState.ENCRYPTION_BAD_EXTERNAL_KEY);
public void boot(boolean create, Properties properties) throws StandardException { if (SanityManager.DEBUG) { if (JVMInfo.JDK_ID < 2) SanityManager.THROWASSERT("expected JDK ID to be 2 - is " + JVMInfo.JDK_ID); } boolean provider_or_algo_specified = false; boolean storeProperties = create; String externalKey = properties.getProperty(Attribute.CRYPTO_EXTERNAL_KEY); if (externalKey != null) { storeProperties = false; } cryptoProvider = properties.getProperty(Attribute.CRYPTO_PROVIDER); if (cryptoProvider == null) { // JDK 1.3 does not create providers by itself. if (JVMInfo.JDK_ID == 2) { String vendor; try { vendor = System.getProperty("java.vendor", ""); } catch (SecurityException se) { vendor = ""; } vendor = StringUtil.SQLToUpperCase(vendor); if (vendor.startsWith("IBM ")) cryptoProvider = "com.ibm.crypto.provider.IBMJCE"; else if (vendor.startsWith("SUN ")) cryptoProvider = "com.sun.crypto.provider.SunJCE"; } } else { provider_or_algo_specified = true; // explictly putting the properties back into the properties // saves then in service.properties at create time. // if (storeProperties) // properties.put(Attribute.CRYPTO_PROVIDER, cryptoProvider); int dotPos = cryptoProvider.lastIndexOf('.'); if (dotPos == -1) cryptoProviderShort = cryptoProvider; else cryptoProviderShort = cryptoProvider.substring(dotPos+1); } cryptoAlgorithm = properties.getProperty(Attribute.CRYPTO_ALGORITHM); if (cryptoAlgorithm == null) cryptoAlgorithm = DEFAULT_ALGORITHM; else { provider_or_algo_specified = true; } // explictly putting the properties back into the properties // saves then in service.properties at create time. if (storeProperties) properties.put(Attribute.CRYPTO_ALGORITHM, cryptoAlgorithm); int firstSlashPos = cryptoAlgorithm.indexOf('/'); int lastSlashPos = cryptoAlgorithm.lastIndexOf('/'); if (firstSlashPos < 0 || lastSlashPos < 0 || firstSlashPos == lastSlashPos) throw StandardException.newException(SQLState.ENCRYPTION_BAD_ALG_FORMAT, cryptoAlgorithm); cryptoAlgorithmShort = cryptoAlgorithm.substring(0,firstSlashPos); if (provider_or_algo_specified) { // Track 3715 - disable use of provider/aglo specification if // jce environment is not 1.2.1. The ExemptionMechanism class // exists in jce1.2.1 and not in jce1.2, so try and load the // class and if you can't find it don't allow the encryption. // This is a requirement from the government to give cloudscape // export clearance for 3.6. Note that the check is not needed // if no provider/algo is specified, in that case we default to // a DES weak encryption algorithm which also is allowed for // export (this is how 3.5 got it's clearance). try { Class c = Class.forName("javax.crypto.ExemptionMechanism"); } catch (Throwable t) { throw StandardException.newException( SQLState.ENCRYPTION_BAD_JCE); } } // If connecting to an existing database and Attribute.CRYPTO_KEY_LENGTH is set // then obtain the encoded key length values without padding bytes and retrieve // the keylength in bits if boot password mechanism is used // note: Attribute.CRYPTO_KEY_LENGTH is set during creation time to a supported // key length in the connection url. Internally , two values are stored in this property // if encryptionKey is used, this property will have only the encoded key length // if boot password mechanism is used, this property will have the following // keylengthBits-EncodedKeyLength if(!create) { // if available, parse the keylengths stored in Attribute.CRYPTO_KEY_LENGTH if(properties.getProperty(Attribute.CRYPTO_KEY_LENGTH) != null) { String keyLengths = properties.getProperty(Attribute.CRYPTO_KEY_LENGTH); int pos = keyLengths.lastIndexOf('-'); encodedKeyLength = Integer.parseInt(keyLengths.substring(pos+1)); if(pos != -1) keyLengthBits = Integer.parseInt(keyLengths.substring(0,pos)); } } // case 1 - if 'encryptionKey' is not set and 'encryptionKeyLength' is set, then use // the 'encryptionKeyLength' property value as the keyLength in bits. // case 2 - 'encryptionKey' property is not set and 'encryptionKeyLength' is not set, then // use the defaults keylength: 56bits for DES, 168 for DESede and 128 for any other encryption // algorithm if (externalKey == null && create) { if(properties.getProperty(Attribute.CRYPTO_KEY_LENGTH) != null) { keyLengthBits = Integer.parseInt(properties.getProperty(Attribute.CRYPTO_KEY_LENGTH)); } else if (cryptoAlgorithmShort.equals(DES)) { keyLengthBits = 56; } else if (cryptoAlgorithmShort.equals(DESede) || cryptoAlgorithmShort.equals(TripleDES)) { keyLengthBits = 168; } else { keyLengthBits = 128; } } // check the feedback mode String feedbackMode = cryptoAlgorithm.substring(firstSlashPos+1,lastSlashPos); if (!feedbackMode.equals("CBC") && !feedbackMode.equals("CFB") && !feedbackMode.equals("ECB") && !feedbackMode.equals("OFB")) throw StandardException.newException(SQLState.ENCRYPTION_BAD_FEEDBACKMODE, feedbackMode); // check the NoPadding mode is used String padding = cryptoAlgorithm.substring(lastSlashPos+1,cryptoAlgorithm.length()); if (!padding.equals("NoPadding")) throw StandardException.newException(SQLState.ENCRYPTION_BAD_PADDING, padding); Throwable t; try { if (cryptoProvider != null) { // provider package should be set by property if (Security.getProvider(cryptoProviderShort) == null) { action = 1; // add provider through privileged block. java.security.AccessController.doPrivileged(this); } } // need this to check the boot password messageDigest = MessageDigest.getInstance(MESSAGE_DIGEST); byte[] generatedKey; if (externalKey != null) { // incorrect to specify external key and boot password if (properties.getProperty(Attribute.BOOT_PASSWORD) != null) throw StandardException.newException(SQLState.SERVICE_WRONG_BOOT_PASSWORD); generatedKey = org.apache.derby.iapi.util.StringUtil.fromHexString(externalKey, 0, externalKey.length()); if(create) properties.put(Attribute.CRYPTO_KEY_LENGTH,String.valueOf(generatedKey.length)); else // mismatch in encryptedKey used at creation versus at connecting again if(generatedKey.length != encodedKeyLength && encodedKeyLength > 0) throw StandardException.newException(SQLState.ENCRYPTION_BAD_EXTERNAL_KEY); } else { generatedKey = handleBootPassword(create, properties); if(create) properties.put(Attribute.CRYPTO_KEY_LENGTH,keyLengthBits+"-"+generatedKey.length); } // Make a key and IV object out of the generated key mainSecretKey = generateKey(generatedKey); mainIV = generateIV(generatedKey); if (create) { properties.put(Attribute.DATA_ENCRYPTION, "true"); // Set two new properties to allow for future changes to the log and data encryption // schemes. This property is introduced in version 10 , value starts at 1. properties.put(RawStoreFactory.DATA_ENCRYPT_ALGORITHM_VERSION,String.valueOf(1)); properties.put(RawStoreFactory.LOG_ENCRYPT_ALGORITHM_VERSION,String.valueOf(1)); } return; } catch (java.security.PrivilegedActionException pae) { t = pae.getException(); } catch (NoSuchAlgorithmException nsae) { t = nsae; } catch (SecurityException se) { t = se; } catch (LinkageError le) { t = le; } catch (ClassCastException cce) { t = cce; } throw StandardException.newException(SQLState.MISSING_ENCRYPTION_PROVIDER, t); }
optimizer.considerCost(this, restrictionList, getCostEstimate(), outerCost);
public CostEstimate optimizeIt( Optimizer optimizer, OptimizablePredicateList predList, CostEstimate outerCost, RowOrdering rowOrdering) throws StandardException { /* ** RESOLVE: Most types of Optimizables only implement estimateCost(), ** and leave it up to optimizeIt() in FromTable to figure out the ** total cost of the join. A ProjectRestrict can have a non-Optimizable ** child, though, in which case we want to tell the child the ** number of outer rows - it could affect the join strategy ** significantly. So we implement optimizeIt() here, which overrides ** the optimizeIt() in FromTable. This assumes that the join strategy ** for which this join node is the inner table is a nested loop join, ** which will not be a valid assumption when we implement other ** strategies like materialization (hash join can work only on ** base tables). The join strategy for a base table under a ** ProjectRestrict is set in the base table itself. */ CostEstimate childCost; costEstimate = getCostEstimate(optimizer); /* ** Don't re-optimize a child result set that has already been fully ** optimized. For example, if the child result set is a SelectNode, ** it will be changed to a ProjectRestrictNode, which we don't want ** to re-optimized. */ // NOTE: TO GET THE RIGHT COST, THE CHILD RESULT MAY HAVE TO BE // OPTIMIZED MORE THAN ONCE, BECAUSE THE NUMBER OF OUTER ROWS // MAY BE DIFFERENT EACH TIME. // if (childResultOptimized) // return costEstimate; // It's possible that a call to optimize the left/right will cause // a new "truly the best" plan to be stored in the underlying base // tables. If that happens and then we decide to skip that plan // (which we might do if the call to "considerCost()" below decides // the current path is infeasible or not the best) we need to be // able to revert back to the "truly the best" plans that we had // saved before we got here. So with this next call we save the // current plans using "this" node as the key. If needed, we'll // then make the call to revert the plans in OptimizerImpl's // getNextDecoratedPermutation() method. addOrLoadBestPlanMapping(true, this); /* If the childResult is instanceof Optimizable, then we optimizeIt. * Otherwise, we are going into a new query block. If the new query * block has already had its access path modified, then there is * nothing to do. Otherwise, we must begin the optimization process * anew on the new query block. */ if (childResult instanceof Optimizable) { childCost = ((Optimizable) childResult).optimizeIt( optimizer, restrictionList, outerCost, rowOrdering); /* Copy child cost to this node's cost */ costEstimate.setCost( childCost.getEstimatedCost(), childCost.rowCount(), childCost.singleScanRowCount()); optimizer.considerCost(this, restrictionList, getCostEstimate(), outerCost); } else if ( ! accessPathModified) { if (SanityManager.DEBUG) { if (! ((childResult instanceof SelectNode) || (childResult instanceof RowResultSetNode))) { SanityManager.THROWASSERT( "childResult is expected to be instanceof " + "SelectNode or RowResultSetNode - it is a " + childResult.getClass().getName()); } } childResult = childResult.optimize(optimizer.getDataDictionary(), restrictionList, outerCost.rowCount()); /* Copy child cost to this node's cost */ childCost = childResult.costEstimate; costEstimate.setCost( childCost.getEstimatedCost(), childCost.rowCount(), childCost.singleScanRowCount()); getBestAccessPath().setCostEstimate(costEstimate); /* ** The current access path may not be part of a sort avoidance ** path, but set the cost estimate there anyway, just in case ** it is. */ getBestSortAvoidancePath().setCostEstimate(costEstimate); // childResultOptimized = true; /* RESOLVE - ARBITRARYHASHJOIN - Passing restriction list here, as above, is correct. * However, passing predList makes the following work: * select * from t1, (select * from t2) c properties joinStrategy = hash where t1.c1 = c.c1; * The following works with restrictionList: * select * from t1, (select c1 + 0 from t2) c(c1) properties joinStrategy = hash where t1.c1 = c.c1; */ optimizer.considerCost(this, restrictionList, getCostEstimate(), outerCost); } return costEstimate; }
double memusage = optimizable.memoryUsage( estimatedCost.rowCount() / outerCost.rowCount()); if (memusage > maxMemoryPerTable)
if( ! optimizable.memoryUsageOK( estimatedCost.rowCount() / outerCost.rowCount(), maxMemoryPerTable))
public void considerCost(Optimizable optimizable, OptimizablePredicateList predList, CostEstimate estimatedCost, CostEstimate outerCost) throws StandardException { /* ** Don't consider non-feasible join strategies. */ if ( ! optimizable.feasibleJoinStrategy(predList, this)) { return; } /* ** Skip this access path if it takes too much memory. ** ** NOTE: The default assumption here is that the number of rows in ** a single scan is the total number of rows divided by the number ** of outer rows. The optimizable may over-ride this assumption. ** ** NOTE: This is probably not necessary here, because we should ** get here only for nested loop joins, which don't use memory. */ double memusage = optimizable.memoryUsage( estimatedCost.rowCount() / outerCost.rowCount()); if (memusage > maxMemoryPerTable) { if (optimizerTrace) { trace(SKIPPING_DUE_TO_EXCESS_MEMORY, 0, 0, memusage, null); } return; } /* Pick the cheapest cost for this particular optimizable. * NOTE: Originally, the code only chose the new access path if * it was cheaper than the old access path. However, I (Jerry) * found that the new and old costs were the same for a derived * table and the old access path did not have a join strategy * associated with it in that case. So, we now choose the new * access path if it is the same cost or cheaper than the current * access path. */ AccessPath ap = optimizable.getBestAccessPath(); CostEstimate bestCostEstimate = ap.getCostEstimate(); if ((bestCostEstimate == null) || (estimatedCost.compare(bestCostEstimate) <= 0)) { ap.setCostEstimate(estimatedCost); optimizable.rememberJoinStrategyAsBest(ap); } /* ** Keep track of the best sort-avoidance path if there is a ** required row ordering. */ if (requiredRowOrdering != null) { /* ** The current optimizable can avoid a sort only if the ** outer one does, also (if there is an outer one). */ if (joinPosition == 0 || optimizableList.getOptimizable( proposedJoinOrder[joinPosition - 1]). considerSortAvoidancePath()) { /* ** There is a required row ordering - does the proposed access ** path avoid a sort? */ if (requiredRowOrdering.sortRequired(currentRowOrdering, assignedTableMap) == RequiredRowOrdering.NOTHING_REQUIRED) { ap = optimizable.getBestSortAvoidancePath(); bestCostEstimate = ap.getCostEstimate(); /* Is this the cheapest sort-avoidance path? */ if ((bestCostEstimate == null) || (estimatedCost.compare(bestCostEstimate) < 0)) { ap.setCostEstimate(estimatedCost); optimizable.rememberJoinStrategyAsBest(ap); optimizable.rememberSortAvoidancePath(); /* ** Remember the current row ordering as best */ currentRowOrdering.copy(bestRowOrdering); } } } } }
trace(SKIPPING_DUE_TO_EXCESS_MEMORY, 0, 0, memusage, null);
trace(SKIPPING_DUE_TO_EXCESS_MEMORY, 0, 0, 0.0, null);
public void considerCost(Optimizable optimizable, OptimizablePredicateList predList, CostEstimate estimatedCost, CostEstimate outerCost) throws StandardException { /* ** Don't consider non-feasible join strategies. */ if ( ! optimizable.feasibleJoinStrategy(predList, this)) { return; } /* ** Skip this access path if it takes too much memory. ** ** NOTE: The default assumption here is that the number of rows in ** a single scan is the total number of rows divided by the number ** of outer rows. The optimizable may over-ride this assumption. ** ** NOTE: This is probably not necessary here, because we should ** get here only for nested loop joins, which don't use memory. */ double memusage = optimizable.memoryUsage( estimatedCost.rowCount() / outerCost.rowCount()); if (memusage > maxMemoryPerTable) { if (optimizerTrace) { trace(SKIPPING_DUE_TO_EXCESS_MEMORY, 0, 0, memusage, null); } return; } /* Pick the cheapest cost for this particular optimizable. * NOTE: Originally, the code only chose the new access path if * it was cheaper than the old access path. However, I (Jerry) * found that the new and old costs were the same for a derived * table and the old access path did not have a join strategy * associated with it in that case. So, we now choose the new * access path if it is the same cost or cheaper than the current * access path. */ AccessPath ap = optimizable.getBestAccessPath(); CostEstimate bestCostEstimate = ap.getCostEstimate(); if ((bestCostEstimate == null) || (estimatedCost.compare(bestCostEstimate) <= 0)) { ap.setCostEstimate(estimatedCost); optimizable.rememberJoinStrategyAsBest(ap); } /* ** Keep track of the best sort-avoidance path if there is a ** required row ordering. */ if (requiredRowOrdering != null) { /* ** The current optimizable can avoid a sort only if the ** outer one does, also (if there is an outer one). */ if (joinPosition == 0 || optimizableList.getOptimizable( proposedJoinOrder[joinPosition - 1]). considerSortAvoidancePath()) { /* ** There is a required row ordering - does the proposed access ** path avoid a sort? */ if (requiredRowOrdering.sortRequired(currentRowOrdering, assignedTableMap) == RequiredRowOrdering.NOTHING_REQUIRED) { ap = optimizable.getBestSortAvoidancePath(); bestCostEstimate = ap.getCostEstimate(); /* Is this the cheapest sort-avoidance path? */ if ((bestCostEstimate == null) || (estimatedCost.compare(bestCostEstimate) < 0)) { ap.setCostEstimate(estimatedCost); optimizable.rememberJoinStrategyAsBest(ap); optimizable.rememberSortAvoidancePath(); /* ** Remember the current row ordering as best */ currentRowOrdering.copy(bestRowOrdering); } } } } }
double memusage = optimizable.memoryUsage( estimatedCost.rowCount() / outerCost.rowCount()); if (memusage > maxMemoryPerTable)
if( ! optimizable.memoryUsageOK( estimatedCost.rowCount() / outerCost.rowCount(), maxMemoryPerTable))
private void costBasedCostOptimizable(Optimizable optimizable, TableDescriptor td, ConglomerateDescriptor cd, OptimizablePredicateList predList, CostEstimate outerCost) throws StandardException { CostEstimate estimatedCost = estimateTotalCost(predList, cd, outerCost, optimizable); /* ** Skip this access path if it takes too much memory. ** ** NOTE: The default assumption here is that the number of rows in ** a single scan is the total number of rows divided by the number ** of outer rows. The optimizable may over-ride this assumption. */ double memusage = optimizable.memoryUsage( estimatedCost.rowCount() / outerCost.rowCount()); if (memusage > maxMemoryPerTable) { if (optimizerTrace) { trace(SKIPPING_DUE_TO_EXCESS_MEMORY, 0, 0, memusage, null); } return; } /* Pick the cheapest cost for this particular optimizable. */ AccessPath ap = optimizable.getBestAccessPath(); CostEstimate bestCostEstimate = ap.getCostEstimate(); if ((bestCostEstimate == null) || (estimatedCost.compare(bestCostEstimate) < 0)) { ap.setConglomerateDescriptor(cd); ap.setCostEstimate(estimatedCost); ap.setCoveringIndexScan(optimizable.isCoveringIndex(cd)); /* ** It's a non-matching index scan either if there is no ** predicate list, or nothing in the predicate list is useful ** for limiting the scan. */ ap.setNonMatchingIndexScan( (predList == null) || ( ! ( predList.useful(optimizable, cd) ) ) ); ap.setLockMode(optimizable.getCurrentAccessPath().getLockMode()); optimizable.rememberJoinStrategyAsBest(ap); } /* ** Keep track of the best sort-avoidance path if there is a ** required row ordering. */ if (requiredRowOrdering != null) { /* ** The current optimizable can avoid a sort only if the ** outer one does, also (if there is an outer one). */ if (joinPosition == 0 || optimizableList.getOptimizable( proposedJoinOrder[joinPosition - 1]). considerSortAvoidancePath()) { /* ** There is a required row ordering - does the proposed access ** path avoid a sort? */ if (requiredRowOrdering.sortRequired(currentRowOrdering, assignedTableMap) == RequiredRowOrdering.NOTHING_REQUIRED) { ap = optimizable.getBestSortAvoidancePath(); bestCostEstimate = ap.getCostEstimate(); /* Is this the cheapest sort-avoidance path? */ if ((bestCostEstimate == null) || (estimatedCost.compare(bestCostEstimate) < 0)) { ap.setConglomerateDescriptor(cd); ap.setCostEstimate(estimatedCost); ap.setCoveringIndexScan( optimizable.isCoveringIndex(cd)); /* ** It's a non-matching index scan either if there is no ** predicate list, or nothing in the predicate list is ** useful for limiting the scan. */ ap.setNonMatchingIndexScan( (predList == null) || ( ! (predList.useful(optimizable, cd)) ) ); ap.setLockMode( optimizable.getCurrentAccessPath().getLockMode()); optimizable.rememberJoinStrategyAsBest(ap); optimizable.rememberSortAvoidancePath(); /* ** Remember the current row ordering as best */ currentRowOrdering.copy(bestRowOrdering); } } } } }
trace(SKIPPING_DUE_TO_EXCESS_MEMORY, 0, 0, memusage, null);
trace(SKIPPING_DUE_TO_EXCESS_MEMORY, 0, 0, 0.0, null);
private void costBasedCostOptimizable(Optimizable optimizable, TableDescriptor td, ConglomerateDescriptor cd, OptimizablePredicateList predList, CostEstimate outerCost) throws StandardException { CostEstimate estimatedCost = estimateTotalCost(predList, cd, outerCost, optimizable); /* ** Skip this access path if it takes too much memory. ** ** NOTE: The default assumption here is that the number of rows in ** a single scan is the total number of rows divided by the number ** of outer rows. The optimizable may over-ride this assumption. */ double memusage = optimizable.memoryUsage( estimatedCost.rowCount() / outerCost.rowCount()); if (memusage > maxMemoryPerTable) { if (optimizerTrace) { trace(SKIPPING_DUE_TO_EXCESS_MEMORY, 0, 0, memusage, null); } return; } /* Pick the cheapest cost for this particular optimizable. */ AccessPath ap = optimizable.getBestAccessPath(); CostEstimate bestCostEstimate = ap.getCostEstimate(); if ((bestCostEstimate == null) || (estimatedCost.compare(bestCostEstimate) < 0)) { ap.setConglomerateDescriptor(cd); ap.setCostEstimate(estimatedCost); ap.setCoveringIndexScan(optimizable.isCoveringIndex(cd)); /* ** It's a non-matching index scan either if there is no ** predicate list, or nothing in the predicate list is useful ** for limiting the scan. */ ap.setNonMatchingIndexScan( (predList == null) || ( ! ( predList.useful(optimizable, cd) ) ) ); ap.setLockMode(optimizable.getCurrentAccessPath().getLockMode()); optimizable.rememberJoinStrategyAsBest(ap); } /* ** Keep track of the best sort-avoidance path if there is a ** required row ordering. */ if (requiredRowOrdering != null) { /* ** The current optimizable can avoid a sort only if the ** outer one does, also (if there is an outer one). */ if (joinPosition == 0 || optimizableList.getOptimizable( proposedJoinOrder[joinPosition - 1]). considerSortAvoidancePath()) { /* ** There is a required row ordering - does the proposed access ** path avoid a sort? */ if (requiredRowOrdering.sortRequired(currentRowOrdering, assignedTableMap) == RequiredRowOrdering.NOTHING_REQUIRED) { ap = optimizable.getBestSortAvoidancePath(); bestCostEstimate = ap.getCostEstimate(); /* Is this the cheapest sort-avoidance path? */ if ((bestCostEstimate == null) || (estimatedCost.compare(bestCostEstimate) < 0)) { ap.setConglomerateDescriptor(cd); ap.setCostEstimate(estimatedCost); ap.setCoveringIndexScan( optimizable.isCoveringIndex(cd)); /* ** It's a non-matching index scan either if there is no ** predicate list, or nothing in the predicate list is ** useful for limiting the scan. */ ap.setNonMatchingIndexScan( (predList == null) || ( ! (predList.useful(optimizable, cd)) ) ); ap.setLockMode( optimizable.getCurrentAccessPath().getLockMode()); optimizable.rememberJoinStrategyAsBest(ap); optimizable.rememberSortAvoidancePath(); /* ** Remember the current row ordering as best */ currentRowOrdering.copy(bestRowOrdering); } } } } }
boolean completeCompile(QueryTreeNode qt)
void completeCompile(QueryTreeNode qt)
boolean completeCompile(QueryTreeNode qt) throws StandardException { //if (finished) // throw StandardException.newException(SQLState.LANG_STATEMENT_CLOSED, "completeCompile()"); paramTypeDescriptors = qt.getParameterTypes(); //If the query references a SESSION schema table (temporary or permanent), then mark so in this statement //This information will be used by EXECUTE STATEMENT if it is executing a statement that was created with NOCOMPILE. Because //of NOCOMPILE, we could not catch SESSION schema table reference by the statement at CREATE STATEMENT time. Need to catch //such statements at EXECUTE STATEMENT time when the query is getting compiled. referencesSessionSchema = qt.referencesSessionSchema(); // erase cursor info in case statement text changed if (targetTable!=null) { targetTable = null; updateMode = 0; updateColumns = null; targetColumns = null; } // get the result description (null for non-cursor statements) // would we want to reuse an old resultDesc? // or do we need to always replace in case this was select *? resultDesc = qt.makeResultDescription(); // would look at resultDesc.getStatementType() but it // doesn't call out cursors as such, so we check // the root node type instead. if (resultDesc != null) { /* For cursors, we carry around some extra information. */ CursorInfo cursorInfo = (CursorInfo)qt.getCursorInfo(); if (cursorInfo != null) { targetTable = cursorInfo.targetTable; targetColumns = cursorInfo.targetColumns; updateColumns = cursorInfo.updateColumns; updateMode = cursorInfo.updateMode; } } isValid = true; //if this statement is referencing session schema tables, then we do not want cache it. return referencesSessionSchema; }
referencesSessionSchema = qt.referencesSessionSchema();
boolean completeCompile(QueryTreeNode qt) throws StandardException { //if (finished) // throw StandardException.newException(SQLState.LANG_STATEMENT_CLOSED, "completeCompile()"); paramTypeDescriptors = qt.getParameterTypes(); //If the query references a SESSION schema table (temporary or permanent), then mark so in this statement //This information will be used by EXECUTE STATEMENT if it is executing a statement that was created with NOCOMPILE. Because //of NOCOMPILE, we could not catch SESSION schema table reference by the statement at CREATE STATEMENT time. Need to catch //such statements at EXECUTE STATEMENT time when the query is getting compiled. referencesSessionSchema = qt.referencesSessionSchema(); // erase cursor info in case statement text changed if (targetTable!=null) { targetTable = null; updateMode = 0; updateColumns = null; targetColumns = null; } // get the result description (null for non-cursor statements) // would we want to reuse an old resultDesc? // or do we need to always replace in case this was select *? resultDesc = qt.makeResultDescription(); // would look at resultDesc.getStatementType() but it // doesn't call out cursors as such, so we check // the root node type instead. if (resultDesc != null) { /* For cursors, we carry around some extra information. */ CursorInfo cursorInfo = (CursorInfo)qt.getCursorInfo(); if (cursorInfo != null) { targetTable = cursorInfo.targetTable; targetColumns = cursorInfo.targetColumns; updateColumns = cursorInfo.updateColumns; updateMode = cursorInfo.updateMode; } } isValid = true; //if this statement is referencing session schema tables, then we do not want cache it. return referencesSessionSchema; }
return referencesSessionSchema;
return;
boolean completeCompile(QueryTreeNode qt) throws StandardException { //if (finished) // throw StandardException.newException(SQLState.LANG_STATEMENT_CLOSED, "completeCompile()"); paramTypeDescriptors = qt.getParameterTypes(); //If the query references a SESSION schema table (temporary or permanent), then mark so in this statement //This information will be used by EXECUTE STATEMENT if it is executing a statement that was created with NOCOMPILE. Because //of NOCOMPILE, we could not catch SESSION schema table reference by the statement at CREATE STATEMENT time. Need to catch //such statements at EXECUTE STATEMENT time when the query is getting compiled. referencesSessionSchema = qt.referencesSessionSchema(); // erase cursor info in case statement text changed if (targetTable!=null) { targetTable = null; updateMode = 0; updateColumns = null; targetColumns = null; } // get the result description (null for non-cursor statements) // would we want to reuse an old resultDesc? // or do we need to always replace in case this was select *? resultDesc = qt.makeResultDescription(); // would look at resultDesc.getStatementType() but it // doesn't call out cursors as such, so we check // the root node type instead. if (resultDesc != null) { /* For cursors, we carry around some extra information. */ CursorInfo cursorInfo = (CursorInfo)qt.getCursorInfo(); if (cursorInfo != null) { targetTable = cursorInfo.targetTable; targetColumns = cursorInfo.targetColumns; updateColumns = cursorInfo.updateColumns; updateMode = cursorInfo.updateMode; } } isValid = true; //if this statement is referencing session schema tables, then we do not want cache it. return referencesSessionSchema; }
daemonGroup.setDaemon(true);
private boolean PBinitialize(boolean lite) { if (!lite) { try { daemonGroup = new ThreadGroup("derby.daemons"); } catch (SecurityException se) { } } InputStream versionStream = getClass().getResourceAsStream(ProductGenusNames.DBMS_INFO); engineVersion = ProductVersionHolder.getProductVersionHolderFromMyEnv(versionStream); String systemHome; // create the system home directory if it doesn't exist try { // SECURITY PERMISSION - OP2 systemHome = System.getProperty(Property.SYSTEM_HOME_PROPERTY); } catch (SecurityException se) { // system home will be the current directory systemHome = null; } if (systemHome != null) { home = new File(systemHome); // SECURITY PERMISSION - OP2a if (home.exists()) { if (!home.isDirectory()) { report(Property.SYSTEM_HOME_PROPERTY + "=" + systemHome + " does not represent a directory"); return false; } } else if (!lite) { try { // SECURITY PERMISSION - OP2b home.mkdirs(); } catch (SecurityException se) { return false; } } } return true; }
System.out.println("SELECT with " + numUnions/100 * 100 + " unions");
private static void largeUnionSelect(Connection con, String viewName, int numUnions) throws Exception { StringBuffer selectSQLBuffer = new StringBuffer("select * from t0 ") ; for (int i = 1; i < numUnions/100;i++) { selectSQLBuffer.append(" UNION ALL (SELECT * FROM " + viewName + ")"); } try { // Ready to execute the problematic query String selectSQL = selectSQLBuffer.toString(); //System.out.println(selectSQL); System.out.println("SELECT with " + numUnions/100 * 100 + " unions"); PreparedStatement pstmt = con.prepareStatement(selectSQL); ResultSet rs = pstmt.executeQuery(); int numRowsExpected = (numUnions/100 * 100); int numRows = 0; while (rs.next()) { numRows++; if ((numRows % 100) == 0) checkRowData(rs); } System.out.println("PASS: Row data check ok"); con.commit(); pstmt.close(); con.close(); } catch (SQLException sqle) { System.out.println("FAILED QUERY"); do { System.out.println(sqle.getSQLState() + ":" + sqle.getMessage()); sqle = sqle.getNextException(); } while (sqle != null); } }
System.out.println("PASS: Row data check ok");
System.out.println("PASS: " + testName + " Row data check ok");
private static void largeUnionSelect(Connection con, String viewName, int numUnions) throws Exception { StringBuffer selectSQLBuffer = new StringBuffer("select * from t0 ") ; for (int i = 1; i < numUnions/100;i++) { selectSQLBuffer.append(" UNION ALL (SELECT * FROM " + viewName + ")"); } try { // Ready to execute the problematic query String selectSQL = selectSQLBuffer.toString(); //System.out.println(selectSQL); System.out.println("SELECT with " + numUnions/100 * 100 + " unions"); PreparedStatement pstmt = con.prepareStatement(selectSQL); ResultSet rs = pstmt.executeQuery(); int numRowsExpected = (numUnions/100 * 100); int numRows = 0; while (rs.next()) { numRows++; if ((numRows % 100) == 0) checkRowData(rs); } System.out.println("PASS: Row data check ok"); con.commit(); pstmt.close(); con.close(); } catch (SQLException sqle) { System.out.println("FAILED QUERY"); do { System.out.println(sqle.getSQLState() + ":" + sqle.getMessage()); sqle = sqle.getNextException(); } while (sqle != null); } }
con.close();
private static void largeUnionSelect(Connection con, String viewName, int numUnions) throws Exception { StringBuffer selectSQLBuffer = new StringBuffer("select * from t0 ") ; for (int i = 1; i < numUnions/100;i++) { selectSQLBuffer.append(" UNION ALL (SELECT * FROM " + viewName + ")"); } try { // Ready to execute the problematic query String selectSQL = selectSQLBuffer.toString(); //System.out.println(selectSQL); System.out.println("SELECT with " + numUnions/100 * 100 + " unions"); PreparedStatement pstmt = con.prepareStatement(selectSQL); ResultSet rs = pstmt.executeQuery(); int numRowsExpected = (numUnions/100 * 100); int numRows = 0; while (rs.next()) { numRows++; if ((numRows % 100) == 0) checkRowData(rs); } System.out.println("PASS: Row data check ok"); con.commit(); pstmt.close(); con.close(); } catch (SQLException sqle) { System.out.println("FAILED QUERY"); do { System.out.println(sqle.getSQLState() + ":" + sqle.getMessage()); sqle = sqle.getNextException(); } while (sqle != null); } }
System.out.println("FAILED QUERY"); do { System.out.println(sqle.getSQLState() + ":" + sqle.getMessage()); sqle = sqle.getNextException(); } while (sqle != null);
reportFailure(testName, sqle);
private static void largeUnionSelect(Connection con, String viewName, int numUnions) throws Exception { StringBuffer selectSQLBuffer = new StringBuffer("select * from t0 ") ; for (int i = 1; i < numUnions/100;i++) { selectSQLBuffer.append(" UNION ALL (SELECT * FROM " + viewName + ")"); } try { // Ready to execute the problematic query String selectSQL = selectSQLBuffer.toString(); //System.out.println(selectSQL); System.out.println("SELECT with " + numUnions/100 * 100 + " unions"); PreparedStatement pstmt = con.prepareStatement(selectSQL); ResultSet rs = pstmt.executeQuery(); int numRowsExpected = (numUnions/100 * 100); int numRows = 0; while (rs.next()) { numRows++; if ((numRows % 100) == 0) checkRowData(rs); } System.out.println("PASS: Row data check ok"); con.commit(); pstmt.close(); con.close(); } catch (SQLException sqle) { System.out.println("FAILED QUERY"); do { System.out.println(sqle.getSQLState() + ":" + sqle.getMessage()); sqle = sqle.getNextException(); } while (sqle != null); } }
Statement stmt = null; PreparedStatement pstmt = null; String tableName = "t0"; String viewName = "v0"; ij.getPropertyArg(argv); Connection con = ij.startJBMS(); con.setAutoCommit(false); stmt = con.createStatement(); System.out.println("connected"); try { stmt.executeUpdate("drop table " + tableName); }catch (SQLException se) { } try { stmt.executeUpdate("drop view " + viewName); }catch (SQLException se) { } String createSQL = "create table " + tableName + "(si smallint,i int, bi bigint, r real, f float, d double precision, n5_2 numeric(5,2), dec10_3 decimal(10,3), ch20 char(3),vc varchar(20), lvc long varchar)"; stmt.executeUpdate(createSQL); stmt.executeUpdate("insert into " + tableName + " values(2,3,4,5.3,5.3,5.3,31.13,123456.123, 'one','one','one')"); System.out.println("Building view 100 unions"); StringBuffer createView = new StringBuffer("create view " + viewName + " as select * from " + tableName); for (int i = 1; i < 100; i ++) { createView.append(" UNION ALL (SELECT * FROM " + tableName + ")"); } String createViewString = createView.toString(); stmt.executeUpdate(createView.toString()); largeUnionSelect(con, viewName, 2000); largeUnionSelect(con, viewName, 10000);
ij.getPropertyArg(argv); Connection con = ij.startJBMS(); con.setAutoCommit(false); testParamsInWhereClause(con); testUnions(con); con.commit(); con.close();
public static void main(String argv[]) throws Exception { Statement stmt = null; PreparedStatement pstmt = null; //int numUnions = 4000; //int numUnions = 2000; /* We still have problems with large queries. Passes at 4000. With size 5000 it gets "java.lang.VerifyError: (class: db2j/exe/ac601a400fx0102xc673xe3e9x000000163ac04, method: execute signature: ()Lcom/ibm/db2j/protocol/Database/Language/Interface/ResultSet;) Illegal target of jump or branch". My fix affects generated method "fillResultSet". With size 10000 largeCodeGen gets Java exception: 'java.io.IOException: constant_pool(70796 > 65535)'. */ String tableName = "t0"; String viewName = "v0"; ij.getPropertyArg(argv); Connection con = ij.startJBMS(); con.setAutoCommit(false); stmt = con.createStatement(); System.out.println("connected"); // Create table try { stmt.executeUpdate("drop table " + tableName); }catch (SQLException se) { // drop error ok. } try { stmt.executeUpdate("drop view " + viewName); }catch (SQLException se) { // drop error ok. } String createSQL = "create table " + tableName + "(si smallint,i int, bi bigint, r real, f float, d double precision, n5_2 numeric(5,2), dec10_3 decimal(10,3), ch20 char(3),vc varchar(20), lvc long varchar)"; stmt.executeUpdate(createSQL); stmt.executeUpdate("insert into " + tableName + " values(2,3,4,5.3,5.3,5.3,31.13,123456.123, 'one','one','one')"); System.out.println("Building view 100 unions"); StringBuffer createView = new StringBuffer("create view " + viewName + " as select * from " + tableName); for (int i = 1; i < 100; i ++) { createView.append(" UNION ALL (SELECT * FROM " + tableName + ")"); } String createViewString = createView.toString(); //System.out.println(createViewString); stmt.executeUpdate(createView.toString()); // 2000 unions caused method too big error in verifier largeUnionSelect(con, viewName, 2000); // 10000 unions overflows the number of constant pool entries largeUnionSelect(con, viewName, 10000); }
System.out.println("Derby boot failed: " + ":" + attrs.getProperty("databaseName"));
System.out.println("Derby boot failed: " + attrs.getProperty("databaseName") + " : " + e.getSQLState() + ": " + e.getMessage());
private static void bootDerby () throws SQLException { Properties attrs = new Properties(); attrs.setProperty("databaseName", makeDatabaseName()); attrs.setProperty("createDatabase", "create"); DataSource ds = TestUtil.getDataSource(attrs); try { Connection conn = ds.getConnection(); conn.close(); } catch (SQLException e) { System.out.println("Derby boot failed: " + ":" + attrs.getProperty("databaseName")); throw e; } }
assertEmpty(errStreamFile);
private static void checkField() throws AssertException, IOException, SQLException { openStreams(); resetProps(); sysProps.put(FIELD_PROP, "org.apache.derbyTesting.functionTests.tests.lang."+ "errorStream.fieldStream"); bootDerby(); shutdownDerby(); closeStreams(); assertNonExisting(fileStreamFile); assertEmpty(methodStreamFile); assertNonEmpty(fieldStreamFile); }
assertEmpty(errStreamFile);
private static void checkFile() throws AssertException, IOException, SQLException { openStreams(); resetProps(); sysProps.put(FILE_PROP, fileStreamFile.getCanonicalPath()); bootDerby(); shutdownDerby(); closeStreams(); assertNonEmpty(fileStreamFile); assertEmpty(methodStreamFile); assertEmpty(fieldStreamFile); }
assertEmpty(errStreamFile);
private static void checkFileOverField() throws AssertException, IOException, SQLException { openStreams(); resetProps(); sysProps.put(FILE_PROP, fileStreamFile.getCanonicalPath()); sysProps.put(FIELD_PROP, "org.apache.derbyTesting.functionTests.tests.lang."+ "errorStream.fieldStream"); bootDerby(); shutdownDerby(); closeStreams(); assertNonEmpty(fileStreamFile); assertEmpty(methodStreamFile); assertEmpty(fieldStreamFile); }
assertEmpty(errStreamFile);
private static void checkFileOverMethod() throws AssertException, IOException, SQLException { openStreams(); resetProps(); sysProps.put(FILE_PROP, fileStreamFile.getCanonicalPath()); sysProps.put(METHOD_PROP, "org.apache.derbyTesting.functionTests.tests.lang."+ "errorStream.getStream"); bootDerby(); shutdownDerby(); closeStreams(); assertNonEmpty(fileStreamFile); assertEmpty(methodStreamFile); assertEmpty(fieldStreamFile); }
assertEmpty(errStreamFile);
private static void checkFileOverMethodAndField() throws AssertException, IOException, SQLException { openStreams(); resetProps(); sysProps.put(FILE_PROP, fileStreamFile.getCanonicalPath()); sysProps.put(METHOD_PROP, "org.apache.derbyTesting.functionTests.tests.lang."+ "errorStream.getStream"); sysProps.put(FIELD_PROP, "org.apache.derbyTesting.functionTests.tests.lang."+ "errorStream.fieldStream"); bootDerby(); shutdownDerby(); closeStreams(); assertNonEmpty(fileStreamFile); assertEmpty(methodStreamFile); assertEmpty(fieldStreamFile); }
assertEmpty(errStreamFile);
private static void checkMethod() throws AssertException, IOException, SQLException { openStreams(); resetProps(); sysProps.put(METHOD_PROP, "org.apache.derbyTesting.functionTests.tests.lang."+ "errorStream.getStream"); bootDerby(); shutdownDerby(); closeStreams(); assertNonExisting(fileStreamFile); assertNonEmpty(methodStreamFile); assertEmpty(fieldStreamFile); }
assertEmpty(errStreamFile);
private static void checkMethodOverField() throws AssertException, IOException, SQLException { openStreams(); resetProps(); sysProps.put(METHOD_PROP, "org.apache.derbyTesting.functionTests.tests.lang."+ "errorStream.getStream"); sysProps.put(FIELD_PROP, "org.apache.derbyTesting.functionTests.tests.lang."+ "errorStream.fieldStream"); bootDerby(); shutdownDerby(); closeStreams(); assertNonExisting(fileStreamFile); assertNonEmpty(methodStreamFile); assertEmpty(fieldStreamFile); }
assertNonEmpty(errStreamFile);
private static void checkWrongField() throws AssertException, IOException, SQLException { openStreams(); resetProps(); sysProps.put(FIELD_PROP, "org.apache.derbyTesting.functionTests.tests.lang."+ "errorStream.nonExistingFieldStream"); bootDerby(); shutdownDerby(); closeStreams(); assertNonExisting(fileStreamFile); assertEmpty(methodStreamFile); assertEmpty(fieldStreamFile); }
assertNonEmpty(errStreamFile);
private static void checkWrongFile() throws AssertException, IOException, SQLException { openStreams(); sysProps.put(FILE_PROP, new File(derbyHome+"foo", // erroneous path makeStreamFilename("file")).getCanonicalPath()); bootDerby(); shutdownDerby(); closeStreams(); assertNonExisting(fileStreamFile); assertEmpty(methodStreamFile); assertEmpty(fieldStreamFile); }
assertNonEmpty(errStreamFile);
private static void checkWrongMethod() throws AssertException, IOException, SQLException { openStreams(); resetProps(); sysProps.put(METHOD_PROP, "org.apache.derbyTesting.functionTests.tests.lang."+ "errorStream.nonExistingGetStream"); bootDerby(); shutdownDerby(); closeStreams(); assertNonExisting(fileStreamFile); assertEmpty(methodStreamFile); assertEmpty(fieldStreamFile); }
System.setErr(System.out);
private static void closeStreams() throws IOException { try { methodStream.close(); fieldStream.close(); } catch (IOException e) { System.out.println("Could not close stream files"); throw e; } }
errStreamFile = new File(derbyHome, makeStreamFilename("err")); errStream =new FileOutputStream(errStreamFile); System.setErr(new PrintStream(errStream));
private static void openStreams() throws IOException{ runNo += 1; try { fileStreamFile = new File(derbyHome, makeStreamFilename("file")); methodStreamFile = new File(derbyHome, makeStreamFilename("method")); methodStream = new FileOutputStream(methodStreamFile); fieldStreamFile = new File(derbyHome, makeStreamFilename("field")); fieldStream = new FileOutputStream(fieldStreamFile); } catch (IOException e) { System.out.println("Could not open stream files"); throw e; } }
public <T> T createQueryObject(Class<T> ifc) throws SQLException{
public <T extends BaseQuery>T createQueryObject(Class<T> ifc) throws SQLException {
public <T> T createQueryObject(Class<T> ifc) throws SQLException{ throw Util.notImplemented(); }
userSuppliedOptimizerOverrides,
public RealNestedLoopLeftOuterJoinStatistics( int numOpens, int rowsSeen, int rowsFiltered, long constructorTime, long openTime, long nextTime, long closeTime, int resultSetNumber, int rowsSeenLeft, int rowsSeenRight, int rowsReturned, long restrictionTime, double optimizerEstimatedRowCount, double optimizerEstimatedCost, ResultSetStatistics leftResultSetStatistics, ResultSetStatistics rightResultSetStatistics, int emptyRightRowsReturned ) { super( numOpens, rowsSeen, rowsFiltered, constructorTime, openTime, nextTime, closeTime, resultSetNumber, rowsSeenLeft, rowsSeenRight, rowsReturned, restrictionTime, false, // We never do an EXISTS join for an outer join optimizerEstimatedRowCount, optimizerEstimatedCost, leftResultSetStatistics, rightResultSetStatistics ); this.emptyRightRowsReturned = emptyRightRowsReturned; }
try { s.execute("drop table t1"); } catch (SQLException se) {} try { s.execute("drop procedure za"); } catch (SQLException se) {}
public static void main(String[] args) { Connection con; PreparedStatement ps; Statement s; String nullString = null; System.out.println("Test nullSQLText starting"); try { // use the ij utility to read the property file and // make the initial connection. ij.getPropertyArg(args); con = ij.startJBMS(); con.setAutoCommit(true); // make sure it is true s = con.createStatement(); try { // test null String in prepared statement System.out.println("Test prepareStatement with null argument"); ps = con.prepareStatement(nullString); } catch (SQLException e) { System.out.println("FAIL -- expected exception"); dumpSQLExceptions(e); } try { // test null String in execute statement System.out.println("Test execute with null argument"); s.execute(nullString); } catch (SQLException e) { System.out.println("FAIL -- expected exception"); dumpSQLExceptions(e); } try { // test null String in execute query statement System.out.println("Test executeQuery with null argument"); s.executeQuery(nullString); } catch (SQLException e) { System.out.println("FAIL -- expected exception"); dumpSQLExceptions(e); } try { // test null String in execute update statement System.out.println("Test executeUpdate with null argument"); s.executeUpdate(nullString); } catch (SQLException e) { System.out.println("FAIL -- expected exception"); dumpSQLExceptions(e); } // Test comments in statements. derby522(s); con.close(); } catch (SQLException e) { dumpSQLExceptions(e); e.printStackTrace(System.out); } catch (Throwable e) { System.out.println("FAIL -- unexpected exception:"); e.printStackTrace(System.out); } System.out.println("Test nullSQLText finished"); }
bootingDictionary.upgradeMakeCatalog(tc, DataDictionary.SYSREQUIREDPERM_CATALOG_NUM);
private void doFullUpgrade(TransactionController tc, int fromMajorVersionNumber, String aid) throws StandardException { // Only supports upgrade from Derby 10.0 releases onwards if (fromMajorVersionNumber < DataDictionary.DD_VERSION_CS_10_0) { throw StandardException.newException(SQLState.UPGRADE_UNSUPPORTED, DD_Version.majorToString(fromMajorVersionNumber), this); } //Drop and recreate the stored versions of the JDBC database metadata queries //This is to make sure that we have the stored versions of JDBC database //metadata queries matching with this release of the engine. dropJDBCMetadataSPSes(tc, false); bootingDictionary.createSystemSps(tc); /* * OLD Cloudscape 5.1 upgrade code, Derby does not support * upgrade from Cloudscape 5.x databases. If it ever is changed * to do so, this code would be useful. if (fromMajorVersionNumber <= DataDictionary.DD_VERSION_CS_5_1) { // drop sps in SYSIBM, SYSIBM, recreate SYSIBM, SYSDUMMY1, populate SYSDUMMY1, create procs dropJDBCMetadataSPSes(tc, true); SchemaDescriptor sd = bootingDictionary.getSchemaDescriptor("SYSIBM", null, false); if (sd != null) bootingDictionary.dropSchemaDescriptor("SYSIBM", tc); sd = bootingDictionary.getSysIBMSchemaDescriptor(); bootingDictionary.addDescriptor(sd, null, DataDictionary.SYSSCHEMAS_CATALOG_NUM, false, tc); bootingDictionary.upgradeMakeCatalog(tc, DataDictionary.SYSDUMMY1_CATALOG_NUM); bootingDictionary.populateSYSDUMMY1(tc); bootingDictionary.create_SYSIBM_procedures(tc); bootingDictionary.createSystemSps(tc); } */ if (fromMajorVersionNumber == DataDictionary.DD_VERSION_CS_10_0) { // This upgrade depends on the SYSUTIL schema, which only exists // since 10.0. Will not work to upgrade any db previous to 10.0, // thus only checks for 10.0 rather than <= 10.0. bootingDictionary.create_10_1_system_procedures( tc, bootingDictionary.getSystemUtilSchemaDescriptor().getUUID()); } if (fromMajorVersionNumber <= DataDictionary.DD_VERSION_DERBY_10_1) { // On ugrade from versions before 10.2, create system procedures // added in 10.2. bootingDictionary.create_10_2_system_procedures( tc, bootingDictionary.getSystemUtilSchemaDescriptor().getUUID()); if (SanityManager.DEBUG) SanityManager.ASSERT((aid != null), "Failed to get new DBA authorization"); // Add new system catalogs created for grant and revoke bootingDictionary.upgradeMakeCatalog(tc, DataDictionary.SYSTABLEPERMS_CATALOG_NUM); bootingDictionary.upgradeMakeCatalog(tc, DataDictionary.SYSCOLPERMS_CATALOG_NUM); bootingDictionary.upgradeMakeCatalog(tc, DataDictionary.SYSROUTINEPERMS_CATALOG_NUM); bootingDictionary.upgradeMakeCatalog(tc, DataDictionary.SYSREQUIREDPERM_CATALOG_NUM); // Change system schemas to be owned by aid bootingDictionary.updateSystemSchemaAuthorization(aid, tc); } }
columnInfo[ix].autoincInc != 0,
public void executeConstantAction( Activation activation ) throws StandardException { TableDescriptor td; UUID toid; SchemaDescriptor schemaDescriptor; ColumnDescriptor columnDescriptor; ExecRow template; LanguageConnectionContext lcc = activation.getLanguageConnectionContext(); DataDictionary dd = lcc.getDataDictionary(); DependencyManager dm = dd.getDependencyManager(); TransactionController tc = lcc.getTransactionExecute(); /* Mark the activation as being for create table */ activation.setForCreateTable(); /* ** Create a row template to tell the store what type of rows this table ** holds. */ template = RowUtil.getEmptyValueRow(columnInfo.length, lcc); /* Get a template value for each column */ for (int ix = 0; ix < columnInfo.length; ix++) { /* If there is a default value, use it, otherwise use null */ if (columnInfo[ix].defaultValue != null) template.setColumn(ix + 1, columnInfo[ix].defaultValue); else template.setColumn(ix + 1, columnInfo[ix].dataType.getNull() ); } /* create the conglomerate to hold the table's rows * RESOLVE - If we ever have a conglomerate creator * that lets us specify the conglomerate number then * we will need to handle it here. */ long conglomId = tc.createConglomerate( "heap", // we're requesting a heap conglomerate template.getRowArray(), // row template null, //column sort order - not required for heap properties, // properties tableType == TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE ? (TransactionController.IS_TEMPORARY | TransactionController.IS_KEPT) : TransactionController.IS_DEFAULT); /* ** Inform the data dictionary that we are about to write to it. ** There are several calls to data dictionary "get" methods here ** that might be done in "read" mode in the data dictionary, but ** it seemed safer to do this whole operation in "write" mode. ** ** We tell the data dictionary we're done writing at the end of ** the transaction. */ if ( tableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE ) dd.startWriting(lcc); SchemaDescriptor sd; if (tableType == TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE) sd = dd.getSchemaDescriptor(schemaName, tc, true); else sd = DDLConstantAction.getSchemaDescriptorForCreate(dd, activation, schemaName); // // Create a new table descriptor. // DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator(); if ( tableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE ) { td = ddg.newTableDescriptor(tableName, sd, tableType, lockGranularity); dd.addDescriptor(td, sd, DataDictionary.SYSTABLES_CATALOG_NUM, false, tc); } else { td = ddg.newTableDescriptor(tableName, sd, tableType, onCommitDeleteRows, onRollbackDeleteRows); td.setUUID(dd.getUUIDFactory().createUUID()); } toid = td.getUUID(); // Save the TableDescriptor off in the Activation activation.setDDLTableDescriptor(td); /* NOTE: We must write the columns out to the system * tables before any of the conglomerates, including * the heap, since we read the columns before the * conglomerates when building a TableDescriptor. * This will hopefully reduce the probability of * a deadlock involving those system tables. */ // for each column, stuff system.column int index = 1; ColumnDescriptor[] cdlArray = new ColumnDescriptor[columnInfo.length]; for (int ix = 0; ix < columnInfo.length; ix++) { UUID defaultUUID = columnInfo[ix].newDefaultUUID; /* Generate a UUID for the default, if one exists * and there is no default id yet. */ if (columnInfo[ix].defaultInfo != null && defaultUUID == null) { defaultUUID = dd.getUUIDFactory().createUUID(); } columnDescriptor = new ColumnDescriptor( columnInfo[ix].name, index++, columnInfo[ix].dataType, columnInfo[ix].defaultValue, columnInfo[ix].defaultInfo, td, defaultUUID, columnInfo[ix].autoincStart, columnInfo[ix].autoincInc, columnInfo[ix].autoincInc != 0, columnInfo[ix].autoinc_create_or_modify_Start_Increment ); cdlArray[ix] = columnDescriptor; } if ( tableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE ) { dd.addDescriptorArray(cdlArray, td, DataDictionary.SYSCOLUMNS_CATALOG_NUM, false, tc); } // now add the column descriptors to the table. ColumnDescriptorList cdl = td.getColumnDescriptorList(); for (int i = 0; i < cdlArray.length; i++) cdl.add(cdlArray[i]); // // Create a conglomerate desciptor with the conglomId filled in and // add it. // // RESOLVE: Get information from the conglomerate descriptor which // was provided. // ConglomerateDescriptor cgd = ddg.newConglomerateDescriptor(conglomId, null, false, null, false, null, toid, sd.getUUID()); if ( tableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE ) { dd.addDescriptor(cgd, sd, DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc); } // add the newly added conglomerate to the table descriptor ConglomerateDescriptorList conglomList = td.getConglomerateDescriptorList(); conglomList.add(cgd); /* Create any constraints */ if (constraintActions != null) { /* ** Do everything but FK constraints first, ** then FK constraints on 2nd pass. */ for (int conIndex = 0; conIndex < constraintActions.length; conIndex++) { // skip fks if (!constraintActions[conIndex].isForeignKeyConstraint()) { constraintActions[conIndex].executeConstantAction(activation); } } for (int conIndex = 0; conIndex < constraintActions.length; conIndex++) { // only foreign keys if (constraintActions[conIndex].isForeignKeyConstraint()) { constraintActions[conIndex].executeConstantAction(activation); } } } if ( tableType == TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE ) { lcc.addDeclaredGlobalTempTable(td); } }
else columnDescriptor = new ColumnDescriptor( columnInfo[ix].name, index++, columnInfo[ix].dataType, columnInfo[ix].defaultValue, columnInfo[ix].defaultInfo, td, defaultUUID, columnInfo[ix].autoincStart, columnInfo[ix].autoincInc );
public void executeConstantAction( Activation activation ) throws StandardException { TableDescriptor td; UUID toid; SchemaDescriptor schemaDescriptor; ColumnDescriptor columnDescriptor; ExecRow template; LanguageConnectionContext lcc = activation.getLanguageConnectionContext(); DataDictionary dd = lcc.getDataDictionary(); DependencyManager dm = dd.getDependencyManager(); TransactionController tc = lcc.getTransactionExecute(); /* Mark the activation as being for create table */ activation.setForCreateTable(); /* ** Create a row template to tell the store what type of rows this table ** holds. */ template = RowUtil.getEmptyValueRow(columnInfo.length, lcc); /* Get a template value for each column */ for (int ix = 0; ix < columnInfo.length; ix++) { /* If there is a default value, use it, otherwise use null */ if (columnInfo[ix].defaultValue != null) template.setColumn(ix + 1, columnInfo[ix].defaultValue); else template.setColumn(ix + 1, columnInfo[ix].dataType.getNull() ); } /* create the conglomerate to hold the table's rows * RESOLVE - If we ever have a conglomerate creator * that lets us specify the conglomerate number then * we will need to handle it here. */ long conglomId = tc.createConglomerate( "heap", // we're requesting a heap conglomerate template.getRowArray(), // row template null, //column sort order - not required for heap properties, // properties tableType == TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE ? (TransactionController.IS_TEMPORARY | TransactionController.IS_KEPT) : TransactionController.IS_DEFAULT); /* ** Inform the data dictionary that we are about to write to it. ** There are several calls to data dictionary "get" methods here ** that might be done in "read" mode in the data dictionary, but ** it seemed safer to do this whole operation in "write" mode. ** ** We tell the data dictionary we're done writing at the end of ** the transaction. */ if ( tableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE ) dd.startWriting(lcc); SchemaDescriptor sd; if (tableType == TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE) sd = dd.getSchemaDescriptor(schemaName, tc, true); else sd = DDLConstantAction.getSchemaDescriptorForCreate(dd, activation, schemaName); // // Create a new table descriptor. // DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator(); if ( tableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE ) { td = ddg.newTableDescriptor(tableName, sd, tableType, lockGranularity); dd.addDescriptor(td, sd, DataDictionary.SYSTABLES_CATALOG_NUM, false, tc); } else { td = ddg.newTableDescriptor(tableName, sd, tableType, onCommitDeleteRows, onRollbackDeleteRows); td.setUUID(dd.getUUIDFactory().createUUID()); } toid = td.getUUID(); // Save the TableDescriptor off in the Activation activation.setDDLTableDescriptor(td); /* NOTE: We must write the columns out to the system * tables before any of the conglomerates, including * the heap, since we read the columns before the * conglomerates when building a TableDescriptor. * This will hopefully reduce the probability of * a deadlock involving those system tables. */ // for each column, stuff system.column int index = 1; ColumnDescriptor[] cdlArray = new ColumnDescriptor[columnInfo.length]; for (int ix = 0; ix < columnInfo.length; ix++) { UUID defaultUUID = columnInfo[ix].newDefaultUUID; /* Generate a UUID for the default, if one exists * and there is no default id yet. */ if (columnInfo[ix].defaultInfo != null && defaultUUID == null) { defaultUUID = dd.getUUIDFactory().createUUID(); } columnDescriptor = new ColumnDescriptor( columnInfo[ix].name, index++, columnInfo[ix].dataType, columnInfo[ix].defaultValue, columnInfo[ix].defaultInfo, td, defaultUUID, columnInfo[ix].autoincStart, columnInfo[ix].autoincInc, columnInfo[ix].autoincInc != 0, columnInfo[ix].autoinc_create_or_modify_Start_Increment ); cdlArray[ix] = columnDescriptor; } if ( tableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE ) { dd.addDescriptorArray(cdlArray, td, DataDictionary.SYSCOLUMNS_CATALOG_NUM, false, tc); } // now add the column descriptors to the table. ColumnDescriptorList cdl = td.getColumnDescriptorList(); for (int i = 0; i < cdlArray.length; i++) cdl.add(cdlArray[i]); // // Create a conglomerate desciptor with the conglomId filled in and // add it. // // RESOLVE: Get information from the conglomerate descriptor which // was provided. // ConglomerateDescriptor cgd = ddg.newConglomerateDescriptor(conglomId, null, false, null, false, null, toid, sd.getUUID()); if ( tableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE ) { dd.addDescriptor(cgd, sd, DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc); } // add the newly added conglomerate to the table descriptor ConglomerateDescriptorList conglomList = td.getConglomerateDescriptorList(); conglomList.add(cgd); /* Create any constraints */ if (constraintActions != null) { /* ** Do everything but FK constraints first, ** then FK constraints on 2nd pass. */ for (int conIndex = 0; conIndex < constraintActions.length; conIndex++) { // skip fks if (!constraintActions[conIndex].isForeignKeyConstraint()) { constraintActions[conIndex].executeConstantAction(activation); } } for (int conIndex = 0; conIndex < constraintActions.length; conIndex++) { // only foreign keys if (constraintActions[conIndex].isForeignKeyConstraint()) { constraintActions[conIndex].executeConstantAction(activation); } } } if ( tableType == TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE ) { lcc.addDeclaredGlobalTempTable(td); } }
StorageRandomAccessFile newRaf = privGetRandomAccessFile(newFile);
newRaf = privGetRandomAccessFile(newFile);
protected void encryptContainer(BaseContainerHandle handle, String newFilePath) throws StandardException { BasePage page = null; StorageFile newFile = dataFactory.getStorageFactory().newStorageFile(newFilePath); try { long lastPageNumber= getLastPageNumber(handle); StorageRandomAccessFile newRaf = privGetRandomAccessFile(newFile); byte[] encryptionBuf = null; encryptionBuf = new byte[pageSize]; // copy all the pages from the current container to the // new container file after encryting the pages. for (long pageNumber = FIRST_ALLOC_PAGE_NUMBER; pageNumber <= lastPageNumber; pageNumber++) { page = getLatchedPage(handle, pageNumber); // update the page array before writing to the disk // with container header and encrypt it. byte[] dataToWrite = updatePageArray(pageNumber, page.getPageArray(), encryptionBuf, true); newRaf.write(dataToWrite, 0, pageSize); // unlatch releases page from cache. page.unlatch(); page = null; } newRaf.close(); }catch (IOException ioe) { throw StandardException.newException( SQLState.FILE_CONTAINER_EXCEPTION, ioe, newFile); } finally { if (page != null) { page.unlatch(); page = null; } } }
if (newRaf != null) { try { newRaf.close(); }catch (IOException ioe) { newRaf = null; throw StandardException.newException( SQLState.FILE_CONTAINER_EXCEPTION, ioe, newFile); } }
protected void encryptContainer(BaseContainerHandle handle, String newFilePath) throws StandardException { BasePage page = null; StorageFile newFile = dataFactory.getStorageFactory().newStorageFile(newFilePath); try { long lastPageNumber= getLastPageNumber(handle); StorageRandomAccessFile newRaf = privGetRandomAccessFile(newFile); byte[] encryptionBuf = null; encryptionBuf = new byte[pageSize]; // copy all the pages from the current container to the // new container file after encryting the pages. for (long pageNumber = FIRST_ALLOC_PAGE_NUMBER; pageNumber <= lastPageNumber; pageNumber++) { page = getLatchedPage(handle, pageNumber); // update the page array before writing to the disk // with container header and encrypt it. byte[] dataToWrite = updatePageArray(pageNumber, page.getPageArray(), encryptionBuf, true); newRaf.write(dataToWrite, 0, pageSize); // unlatch releases page from cache. page.unlatch(); page = null; } newRaf.close(); }catch (IOException ioe) { throw StandardException.newException( SQLState.FILE_CONTAINER_EXCEPTION, ioe, newFile); } finally { if (page != null) { page.unlatch(); page = null; } } }
return getWholeDigits(decimalValue) + decimalValue.scale();
return SQLDecimal.getWholeDigits(decimalValue) + decimalValue.scale();
private static int getPrecision(BigDecimal decimalValue) { if ((decimalValue == null) || decimalValue.equals(ZERO)) { return 0; } return getWholeDigits(decimalValue) + decimalValue.scale(); }
return getWholeDigits(getBigDecimal());
return SQLDecimal.getWholeDigits(getBigDecimal());
private int getWholeDigits() { return getWholeDigits(getBigDecimal()); }
jira614Test_a(conn);
public static void main (String args[]) { try { System.out.println("prepStmt Test Starts"); ij.getPropertyArg(args); conn = ij.startJBMS(); if (conn == null) { System.out.println("conn didn't work"); return; } Statement cleanstmt = conn.createStatement(); TestUtil.cleanUpTest(cleanstmt, testObjects); PreparedStatement ps; ResultSet rs; boolean hasResultSet; int uc; // executeUpdate() without parameters System.out.println("executeUpdate() without parameters"); ps = conn.prepareStatement("create table t1(c1 int, c2 int, c3 int)"); uc = ps.executeUpdate(); System.out.println("Update count is: " + uc); // executeUpdate() with parameters System.out.println("executeUpdate() with parameters"); ps = conn.prepareStatement("insert into t1 values (?, 5, ?)"); ps.setInt(1, 99); ps.setInt(2, 9); uc = ps.executeUpdate(); System.out.println("Update count is: " + uc); // execute() with parameters, no result set returned System.out.println("execute() with parameters, no result set returned"); ps = conn.prepareStatement("insert into t1 values (2, 6, ?), (?, 5, 8)"); ps.setInt(1, 10); ps.setInt(2, 7); hasResultSet = ps.execute(); while (hasResultSet) { rs = ps.getResultSet(); while (rs.next()) System.out.println("ERROR: should not get here!"); hasResultSet = ps.getMoreResults(); } uc = ps.getUpdateCount(); if (uc != -1) System.out.println("Update count is: " + uc); // executeQuery() without parameters System.out.println("executQuery() without parameters"); ps = conn.prepareStatement("select * from t1"); rs = ps.executeQuery(); while (rs.next()) System.out.println("got row: "+" "+rs.getInt(1)+" "+rs.getInt(2)+" "+rs.getInt(3)); System.out.println("end of rows"); // executeQuery() with parameters System.out.println("executQuery() with parameters"); ps = conn.prepareStatement("select * from t1 where c2 = ?"); ps.setInt(1, 5); rs = ps.executeQuery(); while (rs.next()) System.out.println("got row: "+" "+rs.getInt(1)+" "+rs.getInt(2)+" "+rs.getInt(3)); System.out.println("end of rows"); // execute() with parameters, with result set returned System.out.println("execute() with parameters with result set returned"); ps = conn.prepareStatement("select * from t1 where c2 = ?"); ps.setInt(1, 5); hasResultSet = ps.execute(); while (hasResultSet) { rs = ps.getResultSet(); while (rs.next()) System.out.println("got row: "+" "+rs.getInt(1)+" "+rs.getInt(2)+" "+rs.getInt(3)); hasResultSet = ps.getMoreResults(); } System.out.println("end of rows"); uc = ps.getUpdateCount(); if (uc != -1) System.out.println("Update count is: " + uc); // test different data types for input parameters of a PreparedStatement System.out.println("test different data types for input parameters of a Prepared Statement"); ps = conn.prepareStatement("create table t2(si smallint,i int, bi bigint, r real, f float, d double precision, n5_2 numeric(5,2), dec10_3 decimal(10,3), ch20 char(20),vc varchar(20), lvc long varchar,b20 char(23) for bit data, vb varchar(23) for bit data, lvb long varchar for bit data, dt date, tm time, ts timestamp not null)"); uc = ps.executeUpdate(); System.out.println("Update count is: " + uc); // byte array for binary values. byte[] ba = new byte[] {0x00,0x1,0x2,0x3,0x4,0x5,0x6,0x7,0x8,0x9,0xa,0xb,0xc, 0xd,0xe,0xf,0x10,0x11,0x12,0x13 }; ps = conn.prepareStatement("insert into t2 values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ,? , ?)"); ps.setShort(1, (short) 1); ps.setInt(2, 2); ps.setLong(3, 3); ps.setFloat(4, (float) 4.0); ps.setDouble(5, 5.0); ps.setDouble(6, 6.0); ps.setBigDecimal(7, new BigDecimal("77.77")); ps.setBigDecimal(8, new BigDecimal("8.1")); ps.setString(9, "column9string"); byte[] c10ba = new String("column10vcstring").getBytes(); int len = c10ba.length; ps.setAsciiStream(10, new ByteArrayInputStream(c10ba), len); byte[] c11ba = new String("column11lvcstring").getBytes(); len = c11ba.length; ps.setCharacterStream(11, new InputStreamReader(new ByteArrayInputStream(c11ba)),len); ps.setBytes(12,ba); // Calling setBytes on the varchar for bit data type because it // Appears DB2 UDB accepts this only for the BLOB data type... // ps.setBinaryStream(13, new ByteArrayInputStream(ba), ba.length); ps.setBytes(13,ba); ps.setBytes(14,ba); ps.setDate(15, Date.valueOf("2002-04-12")); ps.setTime(16, Time.valueOf("11:44:30")); ps.setTimestamp(17, Timestamp.valueOf("2002-04-12 11:44:30.000000000")); uc = ps.executeUpdate(); System.out.println("Update count is: " + uc); // test setObject on different datatypes of the input parameters of // PreparedStatement System.out.println("test setObject on different data types for input parameters of a Prepared Statement"); ps = conn.prepareStatement("insert into t2 values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ,? , ?)"); ps.setObject(1, new Integer(1)); ps.setObject(2, new Integer(2)); ps.setObject(3, new Long(3)); ps.setObject(4, new Float(4.0)); ps.setObject(5, new Double(5.0)); ps.setObject(6, new Double(6.0)); ps.setObject(7, new BigDecimal("77.77")); ps.setObject(8, new BigDecimal("8.1")); ps.setObject(9, "column11string"); ps.setObject(10, "column10vcstring"); ps.setObject(11, "column11lvcstring"); ps.setObject(12,ba); ps.setObject(13,ba); ps.setObject(14,ba); ps.setObject(15, Date.valueOf("2002-04-12")); ps.setObject(16, Time.valueOf("11:44:30")); ps.setObject(17, Timestamp.valueOf("2002-04-12 11:44:30.000000000")); uc = ps.executeUpdate(); System.out.println("Update count is: " + uc); // test setNull on different datatypes of the input parameters of PreparedStatement System.out.println("test setNull on different data types for input parameters of a Prepared Statement"); ps = conn.prepareStatement("insert into t2 values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ,? , ?)"); ps.setNull(1, java.sql.Types.SMALLINT); ps.setNull(2, java.sql.Types.INTEGER); ps.setNull(3, java.sql.Types.BIGINT); ps.setNull(4, java.sql.Types.REAL); ps.setNull(5, java.sql.Types.FLOAT); ps.setNull(6, java.sql.Types.DOUBLE); ps.setNull(7, java.sql.Types.NUMERIC); ps.setNull(8, java.sql.Types.DECIMAL); ps.setNull(9, java.sql.Types.CHAR); ps.setNull(10, java.sql.Types.VARCHAR); ps.setNull(11, java.sql.Types.LONGVARCHAR); ps.setNull(12, java.sql.Types.BINARY); ps.setNull(13, java.sql.Types.VARBINARY); ps.setNull(14, java.sql.Types.LONGVARBINARY); ps.setNull(15, java.sql.Types.DATE); ps.setNull(16, java.sql.Types.TIME); ps.setTimestamp(17, Timestamp.valueOf("2002-04-12 11:44:31.000000000")); //slightly after hasResultSet = ps.execute(); uc = ps.getUpdateCount(); if (uc != -1) System.out.println("Update count is: " + uc); ps = conn.prepareStatement("select * from t2"); rs = ps.executeQuery(); while (rs.next()) { System.out.println("got row: "+" "+rs.getShort(1)+ " "+rs.getInt(2)+" "+rs.getLong(3)+ " "+rs.getFloat(4)+" "+rs.getDouble(5)+ " "+rs.getDouble(6)+" "+rs.getBigDecimal(7)+ " "+rs.getBigDecimal(8)+" "+rs.getString(9)+ " "+rs.getString(10)+" "+rs.getString(11)+ " "+bytesToString(rs.getBytes(12)) + " "+bytesToString(rs.getBytes(13)) + " "+bytesToString(rs.getBytes(14)) + " "+rs.getDate(15)+ " "+rs.getTime(16)+" "+rs.getTimestamp(17)); Timestamp ts = rs.getTimestamp(17); Timestamp temp = Timestamp.valueOf("2002-04-12 11:44:30.000000000"); if (ts.after(temp)) System.out.println("After first Timestamp!"); else if (ts.before(temp)) System.out.println("Before first Timestamp!"); else System.out.println("Timestamp match!"); } System.out.println("end of rows"); try { ps = conn.prepareStatement("select * from t2 where i = ?"); rs = ps.executeQuery(); } catch (SQLException e) { System.out.println("SQLState: " + e.getSQLState() + " message: " + e.getMessage()); } try { ps = conn.prepareStatement("insert into t2 values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"); ps.executeUpdate(); } catch (SQLException e) { System.out.println("SQLState: " + e.getSQLState() + " message: " + e.getMessage()); } try { int tabSize = 1000; String createBigTabSql = "create table bigtab ("; for (int i = 1; i <= tabSize; i++) { createBigTabSql += "c"+ i + " int"; if (i != tabSize) createBigTabSql += ", "; else createBigTabSql += " )"; } //System.out.println(createBigTabSql); ps = conn.prepareStatement(createBigTabSql); uc = ps.executeUpdate(); insertTab(conn, "bigtab",50); insertTab(conn, "bigtab",200); insertTab(conn, "bigtab", 300); insertTab(conn, "bigtab",500); // prepared Statement with many params (bug 4863) insertTab(conn, "bigtab", 1000); selectFromBigTab(conn); // Negative Cases System.out.println("Insert wrong column name"); insertTab(conn, "bigtab", 1001); // this one will give a sytax error System.out.println("Expected Syntax error "); insertTab(conn, "bigtab", 0); // table doesn't exist System.out.println("Expected Table does not exist "); insertTab(conn, "wrongtab",1000); } catch (SQLException e) { System.out.println("SQLState: " + e.getSQLState() + " message: " + e.getMessage()); } rs.close(); ps.close(); testBigDecimalSetObject(conn); testBigDecimalSetObjectWithScale(conn); test4975(conn); test5130(conn); test5172(conn); jira614Test(conn); jira170Test(conn); jira125Test(conn); conn.close(); // refresh conn before cleaning up conn = ij.startJBMS(); cleanstmt = conn.createStatement(); TestUtil.cleanUpTest(cleanstmt, testObjects); cleanstmt.close(); conn.close(); System.out.println("prepStmt Test Ends"); } catch (Exception e) { e.printStackTrace(); } }
if (frameworkString == null) { String useprocessFramework = RunTest.framework; if (useprocessFramework != null) frameworkString = useprocessFramework; }
private static int getFramework() { if (framework != UNKNOWN_FRAMEWORK) return framework; String frameworkString = (String) AccessController.doPrivileged (new PrivilegedAction() { public Object run() { return System.getProperty("framework"); } } ); if (frameworkString == null || frameworkString.toUpperCase(Locale.ENGLISH).equals("EMBEDDED")) framework = EMBEDDED_FRAMEWORK; else if (frameworkString.toUpperCase(Locale.ENGLISH).equals("DERBYNETCLIENT")) framework = DERBY_NET_CLIENT_FRAMEWORK; else if (frameworkString.toUpperCase(Locale.ENGLISH).equals("DERBYNET")) framework = DERBY_NET_FRAMEWORK; else if (frameworkString.toUpperCase(Locale.ENGLISH).indexOf("DB2JNET") != -1) framework = OLD_NET_FRAMEWORK; return framework; }
ColumnTypeConversionException(LogWriter logWriter) {
ColumnTypeConversionException(LogWriter logWriter, String sourceType, String targetType) {
ColumnTypeConversionException(LogWriter logWriter) { super(logWriter, "Invalid data conversion:" + " Wrong result column type for requested conversion."); }
"Invalid data conversion:" + " Wrong result column type for requested conversion.");
new MessageId(SQLState.LANG_DATA_TYPE_GET_MISMATCH), sourceType, targetType);
ColumnTypeConversionException(LogWriter logWriter) { super(logWriter, "Invalid data conversion:" + " Wrong result column type for requested conversion."); }
public void copyFields(ValueNode oldVN)
public void copyFields(ValueNode oldVN) throws StandardException
public void copyFields(ValueNode oldVN) { dataTypeServices = oldVN.getTypeServices(); typeId = oldVN.getTypeId(); }
public TypeCompiler getTypeCompiler()
public TypeCompiler getTypeCompiler() throws StandardException
public TypeCompiler getTypeCompiler() { if (typeCompiler == null) { /* ** getTypeId() is overriddend by parameter node so ** don't get smart and remove the extra method call. */ typeCompiler = getTypeCompiler(getTypeId()); } return typeCompiler; }
public TypeId getTypeId()
public TypeId getTypeId() throws StandardException
public TypeId getTypeId() { return typeId; }
public DataTypeDescriptor getTypeServices()
public DataTypeDescriptor getTypeServices() throws StandardException
public DataTypeDescriptor getTypeServices() { return dataTypeServices; }
throws StandardException
public double selectivity(Optimizable optTable) { // Return 1 if additional predicates have been generated from this one. if (transformed) { return 1.0; } else { return 0.5d; } }
public void setType(DataTypeDescriptor dataTypeServices)
public void setType(DataTypeDescriptor dataTypeServices) throws StandardException
public void setType(DataTypeDescriptor dataTypeServices) { this.dataTypeServices = dataTypeServices; /* Get this now so we only have to cast it once */ if (dataTypeServices == null) typeId = null; else typeId = dataTypeServices.getTypeId(); // Clear the typeCompiler, just in case type has changed typeCompiler = null; }
if ((arg1 instanceof ConstantNode) || (arg1.isParameterNode()))
if ((arg1 instanceof ConstantNode) || (arg1.requiresTypeFromContext()))
private void updateMaps(JBitSet[] tableColMap, boolean[] eqOuterCols, int[] tableNumbers, int tableNumber, int resultTable, ValueNode arg1, ValueNode arg2) throws StandardException { /* arg2 is a column from our table. This * is a good = for both All tables and Outer arrays * if the right side is a constant or a parameter * or a column from an outer table. * It is a good = for only the All array if * the right side is a column from this query block. */ if ((arg1 instanceof ConstantNode) || (arg1.isParameterNode())) { setValueCols(tableColMap, eqOuterCols, ((ColumnReference) arg2).getColumnNumber(), resultTable); } else if((arg1 instanceof ColumnReference && ((ColumnReference) arg1).getTableNumber() != tableNumber)) { /* See if other columns is a correlation column */ int otherTN = ((ColumnReference) arg1).getTableNumber(); int index = 0; int colNumber = ((ColumnReference) arg2).getColumnNumber(); for ( ; index < tableNumbers.length; index++) { if (otherTN == tableNumbers[index]) { break; } } /* Correlation column, so we can treat it as a constant */ if (index == tableNumbers.length) { setValueCols(tableColMap, eqOuterCols, colNumber, resultTable); } else if (tableColMap != null) { tableColMap[index].set(colNumber); } } else { /* See if other side contains a column reference from the same table */ JBitSet referencedTables = arg1.getTablesReferenced(); /* See if other columns are all correlation columns */ int index = 0; int colNumber = ((ColumnReference) arg2).getColumnNumber(); for ( ; index < tableNumbers.length; index++) { if (referencedTables.get(tableNumbers[index])) { break; } } /* Correlation column, so we can treat it as a constant */ if (index == tableNumbers.length) { setValueCols(tableColMap, eqOuterCols, colNumber, resultTable); } else if (tableColMap != null && !referencedTables.get(tableNumber)) { tableColMap[index].set(colNumber); } } }
public static void pmap(short in, short[] inout, short[] out) { inout[0] += 6; out[0] = 77;
public static void pmap(BigDecimal in, BigDecimal[] inout, BigDecimal[] out) { inout[0] = inout[0].add(new BigDecimal(2.3)); out[0] = new BigDecimal(84.1);
public static void pmap(short in, short[] inout, short[] out) { inout[0] += 6; out[0] = 77; }
executeQuery, executeUpdate,
boolean executeStatement(Activation a, boolean executeQuery, boolean executeUpdate) throws SQLException { // we don't differentiate the update from the resultset case. // so, there could be a result set. // note: the statement interface will paste together // an activation and make sure the prepared statement // is still valid, so it is preferrable, for now, // to creating our own activation and stuffing it in // the prepared statement. synchronized (getConnectionSynchronization()) { setupContextStack(); // make sure there's context boolean retval; pvs = a.getParameterValueSet(); try { // The following is from the javadoc for java.sql.Statement // Only one ResultSet per Statement can be open at any point in time. // Therefore, if the reading of one ResultSet is interleaved with the // reading of another, each must have been generated by different Statements. // All statement execute methods implicitly close a // statment's current ResultSet if an open one exists. if (results != null) { results.close(); results = null; } clearWarnings(); if (! forMetaData) { commitIfNeeded(); // commit the last statement if needed needCommit(); } else { if (lcc.getActivationCount() > 1) { // we do not want to commit here as there seems to be other // statements/resultSets currently opened for this connection. } else { commitIfNeeded(); // we can legitimately commit needCommit(); } } // if this was a prepared statement, this just // gets it for us, it won't recompile unless it is invalid. PreparedStatement ps = a.getPreparedStatement(); ps.rePrepare(lcc); addWarning(ps.getCompileTimeWarnings()); /* ** WARNING WARNING ** ** Any state set in the activation before execution *must* be copied ** to the new activation in GenericActivationHolder.execute() when ** the statement has been recompiled. State such as ** singleExecution, cursorName, holdability, maxRows. */ if (cursorName != null) { a.setCursorName(cursorName); } boolean executeHoldable = getExecuteHoldable(); a.setResultSetHoldability(executeHoldable); //reset the activation to clear warnings //and clear existing result sets in case this has been cached a.reset(); a.setMaxRows(maxRows); long timeoutMillis = (long)timeoutSeconds * 1000L; ResultSet resultsToWrap = ps.execute(a, executeQuery, executeUpdate, false, timeoutMillis); addWarning(a.getWarnings()); if (resultsToWrap.returnsRows()) { EmbedResultSet lresults = factory.newEmbedResultSet(getEmbedConnection(), resultsToWrap, forMetaData, this, ps.isAtomic()); results = lresults; // Set up the finalization of the ResultSet to // mark the activation as unused. It will be // closed sometime later by the connection // outside of finalization. if (a.isSingleExecution()) lresults.finalizeActivation = a; updateCount = -1; retval = true; } else { // Only applipable for an insert statement, which does not return rows. //the auto-generated keys resultset will be null if used for non-insert statement if (a.getAutoGeneratedKeysResultsetMode() && (resultsToWrap.getAutoGeneratedKeysResultset() != null)) { resultsToWrap.getAutoGeneratedKeysResultset().open(); autoGeneratedKeysResultSet = factory.newEmbedResultSet(getEmbedConnection(), resultsToWrap.getAutoGeneratedKeysResultset(), false, this, ps.isAtomic()); } updateCount = resultsToWrap.modifiedRowCount(); resultsToWrap.finish(); // Don't need the result set any more results = null; // note that we have none. boolean haveDynamicResults = false; if (a.getDynamicResults() != null) { haveDynamicResults = processDynamicResults(a.getDynamicResults(), a.getMaxDynamicResults()); } if (!haveDynamicResults) { if (a.isSingleExecution()) { a.close(); } if (!forMetaData) commitIfNeeded(); else { if (lcc.getActivationCount() > 1) { // we do not want to commit here as there seems to be other // statements/resultSets currently opened for this connection. } else { commitIfNeeded(); // we can legitimately commit } } } retval = haveDynamicResults; } } catch (Throwable t) { if (a.isSingleExecution()) { try { a.close(); } catch (Throwable tt) {;} } throw handleException(t); } finally { restoreContextStack(); } return retval; } }
boolean haveDynamicResults = false;
int dynamicResultCount = 0;
boolean executeStatement(Activation a, boolean executeQuery, boolean executeUpdate) throws SQLException { // we don't differentiate the update from the resultset case. // so, there could be a result set. // note: the statement interface will paste together // an activation and make sure the prepared statement // is still valid, so it is preferrable, for now, // to creating our own activation and stuffing it in // the prepared statement. synchronized (getConnectionSynchronization()) { setupContextStack(); // make sure there's context boolean retval; pvs = a.getParameterValueSet(); try { // The following is from the javadoc for java.sql.Statement // Only one ResultSet per Statement can be open at any point in time. // Therefore, if the reading of one ResultSet is interleaved with the // reading of another, each must have been generated by different Statements. // All statement execute methods implicitly close a // statment's current ResultSet if an open one exists. if (results != null) { results.close(); results = null; } clearWarnings(); if (! forMetaData) { commitIfNeeded(); // commit the last statement if needed needCommit(); } else { if (lcc.getActivationCount() > 1) { // we do not want to commit here as there seems to be other // statements/resultSets currently opened for this connection. } else { commitIfNeeded(); // we can legitimately commit needCommit(); } } // if this was a prepared statement, this just // gets it for us, it won't recompile unless it is invalid. PreparedStatement ps = a.getPreparedStatement(); ps.rePrepare(lcc); addWarning(ps.getCompileTimeWarnings()); /* ** WARNING WARNING ** ** Any state set in the activation before execution *must* be copied ** to the new activation in GenericActivationHolder.execute() when ** the statement has been recompiled. State such as ** singleExecution, cursorName, holdability, maxRows. */ if (cursorName != null) { a.setCursorName(cursorName); } boolean executeHoldable = getExecuteHoldable(); a.setResultSetHoldability(executeHoldable); //reset the activation to clear warnings //and clear existing result sets in case this has been cached a.reset(); a.setMaxRows(maxRows); long timeoutMillis = (long)timeoutSeconds * 1000L; ResultSet resultsToWrap = ps.execute(a, executeQuery, executeUpdate, false, timeoutMillis); addWarning(a.getWarnings()); if (resultsToWrap.returnsRows()) { EmbedResultSet lresults = factory.newEmbedResultSet(getEmbedConnection(), resultsToWrap, forMetaData, this, ps.isAtomic()); results = lresults; // Set up the finalization of the ResultSet to // mark the activation as unused. It will be // closed sometime later by the connection // outside of finalization. if (a.isSingleExecution()) lresults.finalizeActivation = a; updateCount = -1; retval = true; } else { // Only applipable for an insert statement, which does not return rows. //the auto-generated keys resultset will be null if used for non-insert statement if (a.getAutoGeneratedKeysResultsetMode() && (resultsToWrap.getAutoGeneratedKeysResultset() != null)) { resultsToWrap.getAutoGeneratedKeysResultset().open(); autoGeneratedKeysResultSet = factory.newEmbedResultSet(getEmbedConnection(), resultsToWrap.getAutoGeneratedKeysResultset(), false, this, ps.isAtomic()); } updateCount = resultsToWrap.modifiedRowCount(); resultsToWrap.finish(); // Don't need the result set any more results = null; // note that we have none. boolean haveDynamicResults = false; if (a.getDynamicResults() != null) { haveDynamicResults = processDynamicResults(a.getDynamicResults(), a.getMaxDynamicResults()); } if (!haveDynamicResults) { if (a.isSingleExecution()) { a.close(); } if (!forMetaData) commitIfNeeded(); else { if (lcc.getActivationCount() > 1) { // we do not want to commit here as there seems to be other // statements/resultSets currently opened for this connection. } else { commitIfNeeded(); // we can legitimately commit } } } retval = haveDynamicResults; } } catch (Throwable t) { if (a.isSingleExecution()) { try { a.close(); } catch (Throwable tt) {;} } throw handleException(t); } finally { restoreContextStack(); } return retval; } }
haveDynamicResults = processDynamicResults(a.getDynamicResults(), a.getMaxDynamicResults());
dynamicResultCount = processDynamicResults(a.getDynamicResults(), a.getMaxDynamicResults());
boolean executeStatement(Activation a, boolean executeQuery, boolean executeUpdate) throws SQLException { // we don't differentiate the update from the resultset case. // so, there could be a result set. // note: the statement interface will paste together // an activation and make sure the prepared statement // is still valid, so it is preferrable, for now, // to creating our own activation and stuffing it in // the prepared statement. synchronized (getConnectionSynchronization()) { setupContextStack(); // make sure there's context boolean retval; pvs = a.getParameterValueSet(); try { // The following is from the javadoc for java.sql.Statement // Only one ResultSet per Statement can be open at any point in time. // Therefore, if the reading of one ResultSet is interleaved with the // reading of another, each must have been generated by different Statements. // All statement execute methods implicitly close a // statment's current ResultSet if an open one exists. if (results != null) { results.close(); results = null; } clearWarnings(); if (! forMetaData) { commitIfNeeded(); // commit the last statement if needed needCommit(); } else { if (lcc.getActivationCount() > 1) { // we do not want to commit here as there seems to be other // statements/resultSets currently opened for this connection. } else { commitIfNeeded(); // we can legitimately commit needCommit(); } } // if this was a prepared statement, this just // gets it for us, it won't recompile unless it is invalid. PreparedStatement ps = a.getPreparedStatement(); ps.rePrepare(lcc); addWarning(ps.getCompileTimeWarnings()); /* ** WARNING WARNING ** ** Any state set in the activation before execution *must* be copied ** to the new activation in GenericActivationHolder.execute() when ** the statement has been recompiled. State such as ** singleExecution, cursorName, holdability, maxRows. */ if (cursorName != null) { a.setCursorName(cursorName); } boolean executeHoldable = getExecuteHoldable(); a.setResultSetHoldability(executeHoldable); //reset the activation to clear warnings //and clear existing result sets in case this has been cached a.reset(); a.setMaxRows(maxRows); long timeoutMillis = (long)timeoutSeconds * 1000L; ResultSet resultsToWrap = ps.execute(a, executeQuery, executeUpdate, false, timeoutMillis); addWarning(a.getWarnings()); if (resultsToWrap.returnsRows()) { EmbedResultSet lresults = factory.newEmbedResultSet(getEmbedConnection(), resultsToWrap, forMetaData, this, ps.isAtomic()); results = lresults; // Set up the finalization of the ResultSet to // mark the activation as unused. It will be // closed sometime later by the connection // outside of finalization. if (a.isSingleExecution()) lresults.finalizeActivation = a; updateCount = -1; retval = true; } else { // Only applipable for an insert statement, which does not return rows. //the auto-generated keys resultset will be null if used for non-insert statement if (a.getAutoGeneratedKeysResultsetMode() && (resultsToWrap.getAutoGeneratedKeysResultset() != null)) { resultsToWrap.getAutoGeneratedKeysResultset().open(); autoGeneratedKeysResultSet = factory.newEmbedResultSet(getEmbedConnection(), resultsToWrap.getAutoGeneratedKeysResultset(), false, this, ps.isAtomic()); } updateCount = resultsToWrap.modifiedRowCount(); resultsToWrap.finish(); // Don't need the result set any more results = null; // note that we have none. boolean haveDynamicResults = false; if (a.getDynamicResults() != null) { haveDynamicResults = processDynamicResults(a.getDynamicResults(), a.getMaxDynamicResults()); } if (!haveDynamicResults) { if (a.isSingleExecution()) { a.close(); } if (!forMetaData) commitIfNeeded(); else { if (lcc.getActivationCount() > 1) { // we do not want to commit here as there seems to be other // statements/resultSets currently opened for this connection. } else { commitIfNeeded(); // we can legitimately commit } } } retval = haveDynamicResults; } } catch (Throwable t) { if (a.isSingleExecution()) { try { a.close(); } catch (Throwable tt) {;} } throw handleException(t); } finally { restoreContextStack(); } return retval; } }
if (!haveDynamicResults) {
if (dynamicResultCount == 0) {
boolean executeStatement(Activation a, boolean executeQuery, boolean executeUpdate) throws SQLException { // we don't differentiate the update from the resultset case. // so, there could be a result set. // note: the statement interface will paste together // an activation and make sure the prepared statement // is still valid, so it is preferrable, for now, // to creating our own activation and stuffing it in // the prepared statement. synchronized (getConnectionSynchronization()) { setupContextStack(); // make sure there's context boolean retval; pvs = a.getParameterValueSet(); try { // The following is from the javadoc for java.sql.Statement // Only one ResultSet per Statement can be open at any point in time. // Therefore, if the reading of one ResultSet is interleaved with the // reading of another, each must have been generated by different Statements. // All statement execute methods implicitly close a // statment's current ResultSet if an open one exists. if (results != null) { results.close(); results = null; } clearWarnings(); if (! forMetaData) { commitIfNeeded(); // commit the last statement if needed needCommit(); } else { if (lcc.getActivationCount() > 1) { // we do not want to commit here as there seems to be other // statements/resultSets currently opened for this connection. } else { commitIfNeeded(); // we can legitimately commit needCommit(); } } // if this was a prepared statement, this just // gets it for us, it won't recompile unless it is invalid. PreparedStatement ps = a.getPreparedStatement(); ps.rePrepare(lcc); addWarning(ps.getCompileTimeWarnings()); /* ** WARNING WARNING ** ** Any state set in the activation before execution *must* be copied ** to the new activation in GenericActivationHolder.execute() when ** the statement has been recompiled. State such as ** singleExecution, cursorName, holdability, maxRows. */ if (cursorName != null) { a.setCursorName(cursorName); } boolean executeHoldable = getExecuteHoldable(); a.setResultSetHoldability(executeHoldable); //reset the activation to clear warnings //and clear existing result sets in case this has been cached a.reset(); a.setMaxRows(maxRows); long timeoutMillis = (long)timeoutSeconds * 1000L; ResultSet resultsToWrap = ps.execute(a, executeQuery, executeUpdate, false, timeoutMillis); addWarning(a.getWarnings()); if (resultsToWrap.returnsRows()) { EmbedResultSet lresults = factory.newEmbedResultSet(getEmbedConnection(), resultsToWrap, forMetaData, this, ps.isAtomic()); results = lresults; // Set up the finalization of the ResultSet to // mark the activation as unused. It will be // closed sometime later by the connection // outside of finalization. if (a.isSingleExecution()) lresults.finalizeActivation = a; updateCount = -1; retval = true; } else { // Only applipable for an insert statement, which does not return rows. //the auto-generated keys resultset will be null if used for non-insert statement if (a.getAutoGeneratedKeysResultsetMode() && (resultsToWrap.getAutoGeneratedKeysResultset() != null)) { resultsToWrap.getAutoGeneratedKeysResultset().open(); autoGeneratedKeysResultSet = factory.newEmbedResultSet(getEmbedConnection(), resultsToWrap.getAutoGeneratedKeysResultset(), false, this, ps.isAtomic()); } updateCount = resultsToWrap.modifiedRowCount(); resultsToWrap.finish(); // Don't need the result set any more results = null; // note that we have none. boolean haveDynamicResults = false; if (a.getDynamicResults() != null) { haveDynamicResults = processDynamicResults(a.getDynamicResults(), a.getMaxDynamicResults()); } if (!haveDynamicResults) { if (a.isSingleExecution()) { a.close(); } if (!forMetaData) commitIfNeeded(); else { if (lcc.getActivationCount() > 1) { // we do not want to commit here as there seems to be other // statements/resultSets currently opened for this connection. } else { commitIfNeeded(); // we can legitimately commit } } } retval = haveDynamicResults; } } catch (Throwable t) { if (a.isSingleExecution()) { try { a.close(); } catch (Throwable tt) {;} } throw handleException(t); } finally { restoreContextStack(); } return retval; } }
retval = haveDynamicResults;
retval = (dynamicResultCount > 0);
boolean executeStatement(Activation a, boolean executeQuery, boolean executeUpdate) throws SQLException { // we don't differentiate the update from the resultset case. // so, there could be a result set. // note: the statement interface will paste together // an activation and make sure the prepared statement // is still valid, so it is preferrable, for now, // to creating our own activation and stuffing it in // the prepared statement. synchronized (getConnectionSynchronization()) { setupContextStack(); // make sure there's context boolean retval; pvs = a.getParameterValueSet(); try { // The following is from the javadoc for java.sql.Statement // Only one ResultSet per Statement can be open at any point in time. // Therefore, if the reading of one ResultSet is interleaved with the // reading of another, each must have been generated by different Statements. // All statement execute methods implicitly close a // statment's current ResultSet if an open one exists. if (results != null) { results.close(); results = null; } clearWarnings(); if (! forMetaData) { commitIfNeeded(); // commit the last statement if needed needCommit(); } else { if (lcc.getActivationCount() > 1) { // we do not want to commit here as there seems to be other // statements/resultSets currently opened for this connection. } else { commitIfNeeded(); // we can legitimately commit needCommit(); } } // if this was a prepared statement, this just // gets it for us, it won't recompile unless it is invalid. PreparedStatement ps = a.getPreparedStatement(); ps.rePrepare(lcc); addWarning(ps.getCompileTimeWarnings()); /* ** WARNING WARNING ** ** Any state set in the activation before execution *must* be copied ** to the new activation in GenericActivationHolder.execute() when ** the statement has been recompiled. State such as ** singleExecution, cursorName, holdability, maxRows. */ if (cursorName != null) { a.setCursorName(cursorName); } boolean executeHoldable = getExecuteHoldable(); a.setResultSetHoldability(executeHoldable); //reset the activation to clear warnings //and clear existing result sets in case this has been cached a.reset(); a.setMaxRows(maxRows); long timeoutMillis = (long)timeoutSeconds * 1000L; ResultSet resultsToWrap = ps.execute(a, executeQuery, executeUpdate, false, timeoutMillis); addWarning(a.getWarnings()); if (resultsToWrap.returnsRows()) { EmbedResultSet lresults = factory.newEmbedResultSet(getEmbedConnection(), resultsToWrap, forMetaData, this, ps.isAtomic()); results = lresults; // Set up the finalization of the ResultSet to // mark the activation as unused. It will be // closed sometime later by the connection // outside of finalization. if (a.isSingleExecution()) lresults.finalizeActivation = a; updateCount = -1; retval = true; } else { // Only applipable for an insert statement, which does not return rows. //the auto-generated keys resultset will be null if used for non-insert statement if (a.getAutoGeneratedKeysResultsetMode() && (resultsToWrap.getAutoGeneratedKeysResultset() != null)) { resultsToWrap.getAutoGeneratedKeysResultset().open(); autoGeneratedKeysResultSet = factory.newEmbedResultSet(getEmbedConnection(), resultsToWrap.getAutoGeneratedKeysResultset(), false, this, ps.isAtomic()); } updateCount = resultsToWrap.modifiedRowCount(); resultsToWrap.finish(); // Don't need the result set any more results = null; // note that we have none. boolean haveDynamicResults = false; if (a.getDynamicResults() != null) { haveDynamicResults = processDynamicResults(a.getDynamicResults(), a.getMaxDynamicResults()); } if (!haveDynamicResults) { if (a.isSingleExecution()) { a.close(); } if (!forMetaData) commitIfNeeded(); else { if (lcc.getActivationCount() > 1) { // we do not want to commit here as there seems to be other // statements/resultSets currently opened for this connection. } else { commitIfNeeded(); // we can legitimately commit } } } retval = haveDynamicResults; } } catch (Throwable t) { if (a.isSingleExecution()) { try { a.close(); } catch (Throwable tt) {;} } throw handleException(t); } finally { restoreContextStack(); } return retval; } }
private boolean processDynamicResults(java.sql.ResultSet[][] holder, int maxDynamicResultSets) throws SQLException {
private int processDynamicResults(java.sql.ResultSet[][] holder, int maxDynamicResultSets) throws SQLException {
private boolean processDynamicResults(java.sql.ResultSet[][] holder, int maxDynamicResultSets) throws SQLException { EmbedResultSet[] sorted = new EmbedResultSet[holder.length]; int actualCount = 0; for (int i = 0; i < holder.length; i++) { java.sql.ResultSet[] param = holder[i]; if (param[0] == null) continue; java.sql.ResultSet rs = param[0]; param[0] = null; // ignore non-cloudscape result sets or results sets from another connection if (!(rs instanceof EmbedResultSet)) continue; EmbedResultSet lrs = (EmbedResultSet) rs; if (lrs.getEmbedConnection().rootConnection != getEmbedConnection().rootConnection) continue; // ignore closed result sets. if (lrs.isClosed) continue; lrs.setDynamicResultSet(this); sorted[actualCount++] = lrs; } if (actualCount != 0) { // results are defined to be ordered according to their creation if (actualCount != 1) { java.util.Arrays.sort(sorted, 0, actualCount); } dynamicResults = sorted; if (actualCount > maxDynamicResultSets) { addWarning(StandardException.newWarning(SQLState.LANG_TOO_MANY_DYNAMIC_RESULTS_RETURNED)); for (int i = maxDynamicResultSets; i < actualCount; i++) { sorted[i].close(); sorted[i] = null; } actualCount = maxDynamicResultSets; } updateCount = -1; results = sorted[0]; currentDynamicResultSet = 0; // 0100C is not returned for procedures written in Java, from the SQL2003 spec. // getWarnings(StandardException.newWarning(SQLState.LANG_DYNAMIC_RESULTS_RETURNED)); return true; } return false; }
return true;
private boolean processDynamicResults(java.sql.ResultSet[][] holder, int maxDynamicResultSets) throws SQLException { EmbedResultSet[] sorted = new EmbedResultSet[holder.length]; int actualCount = 0; for (int i = 0; i < holder.length; i++) { java.sql.ResultSet[] param = holder[i]; if (param[0] == null) continue; java.sql.ResultSet rs = param[0]; param[0] = null; // ignore non-cloudscape result sets or results sets from another connection if (!(rs instanceof EmbedResultSet)) continue; EmbedResultSet lrs = (EmbedResultSet) rs; if (lrs.getEmbedConnection().rootConnection != getEmbedConnection().rootConnection) continue; // ignore closed result sets. if (lrs.isClosed) continue; lrs.setDynamicResultSet(this); sorted[actualCount++] = lrs; } if (actualCount != 0) { // results are defined to be ordered according to their creation if (actualCount != 1) { java.util.Arrays.sort(sorted, 0, actualCount); } dynamicResults = sorted; if (actualCount > maxDynamicResultSets) { addWarning(StandardException.newWarning(SQLState.LANG_TOO_MANY_DYNAMIC_RESULTS_RETURNED)); for (int i = maxDynamicResultSets; i < actualCount; i++) { sorted[i].close(); sorted[i] = null; } actualCount = maxDynamicResultSets; } updateCount = -1; results = sorted[0]; currentDynamicResultSet = 0; // 0100C is not returned for procedures written in Java, from the SQL2003 spec. // getWarnings(StandardException.newWarning(SQLState.LANG_DYNAMIC_RESULTS_RETURNED)); return true; } return false; }
return false;
return actualCount;
private boolean processDynamicResults(java.sql.ResultSet[][] holder, int maxDynamicResultSets) throws SQLException { EmbedResultSet[] sorted = new EmbedResultSet[holder.length]; int actualCount = 0; for (int i = 0; i < holder.length; i++) { java.sql.ResultSet[] param = holder[i]; if (param[0] == null) continue; java.sql.ResultSet rs = param[0]; param[0] = null; // ignore non-cloudscape result sets or results sets from another connection if (!(rs instanceof EmbedResultSet)) continue; EmbedResultSet lrs = (EmbedResultSet) rs; if (lrs.getEmbedConnection().rootConnection != getEmbedConnection().rootConnection) continue; // ignore closed result sets. if (lrs.isClosed) continue; lrs.setDynamicResultSet(this); sorted[actualCount++] = lrs; } if (actualCount != 0) { // results are defined to be ordered according to their creation if (actualCount != 1) { java.util.Arrays.sort(sorted, 0, actualCount); } dynamicResults = sorted; if (actualCount > maxDynamicResultSets) { addWarning(StandardException.newWarning(SQLState.LANG_TOO_MANY_DYNAMIC_RESULTS_RETURNED)); for (int i = maxDynamicResultSets; i < actualCount; i++) { sorted[i].close(); sorted[i] = null; } actualCount = maxDynamicResultSets; } updateCount = -1; results = sorted[0]; currentDynamicResultSet = 0; // 0100C is not returned for procedures written in Java, from the SQL2003 spec. // getWarnings(StandardException.newWarning(SQLState.LANG_DYNAMIC_RESULTS_RETURNED)); return true; } return false; }
throw SQLExceptionFactory.notImplemented("getClientInfo()");
checkForNullPhysicalConnection(); return physicalConnection_.getClientInfo();
public Properties getClientInfo() throws SQLException { throw SQLExceptionFactory.notImplemented("getClientInfo()"); }
SQLException sqle = SQLExceptionFactory.notImplemented("setClientInfo(Properties)"); throw new ClientInfoException(sqle.getMessage(), sqle.getSQLState(), properties == null ? null : (Properties)properties.clone());
try { checkForNullPhysicalConnection(); } catch (SQLException se) { throw new ClientInfoException (se.getMessage(), se.getSQLState(), (new FailedProperties40(properties)).getProperties()); } physicalConnection_.setClientInfo(properties);
public void setClientInfo(Properties properties) throws ClientInfoException { SQLException sqle = SQLExceptionFactory.notImplemented("setClientInfo(Properties)"); throw new ClientInfoException(sqle.getMessage(), sqle.getSQLState(), properties == null ? null : (Properties)properties.clone()); }
ps = null; pmd = null;
protected void tearDown() throws Exception { if(ps != null && !ps.isClosed()) ps.close(); super.tearDown(); }
int offset = origString.indexOf(DERBY_SYSTEM_HOME);
String replaceString = DERBY_SYSTEM_HOME + File.separator; int offset = origString.indexOf(replaceString);
private static String replaceSystemHome(String origString) { int offset = origString.indexOf(DERBY_SYSTEM_HOME); if (offset == -1) return origString; else return origString.substring(0,offset) + "[DERBY_SYSTEM_HOME]"+ origString.substring(offset + DERBY_SYSTEM_HOME.length()); }
return origString.substring(0,offset) + "[DERBY_SYSTEM_HOME]"+ origString.substring(offset + DERBY_SYSTEM_HOME.length());
return origString.substring(0,offset) + "[DERBY_SYSTEM_HOME]/"+ origString.substring(offset + replaceString.length());
private static String replaceSystemHome(String origString) { int offset = origString.indexOf(DERBY_SYSTEM_HOME); if (offset == -1) return origString; else return origString.substring(0,offset) + "[DERBY_SYSTEM_HOME]"+ origString.substring(offset + DERBY_SYSTEM_HOME.length()); }
throws SQLException { throw Util.notImplemented();
throws SQLException { try { return getRealConnection().createArray (typeName, elements); } catch (SQLException sqle) { notifyException(sqle); throw sqle; }
public Array createArray(String typeName, Object[] elements) throws SQLException { throw Util.notImplemented(); }
throw Util.notImplemented();
try { return getRealConnection().createNClob(); } catch (SQLException sqle) { notifyException(sqle); throw sqle; }
public NClob createNClob() throws SQLException{ throw Util.notImplemented(); }
throw Util.notImplemented();
try { return getRealConnection().createSQLXML (); } catch (SQLException sqle) { notifyException(sqle); throw sqle; }
public SQLXML createSQLXML() throws SQLException{ throw Util.notImplemented(); }
throws SQLException { throw Util.notImplemented();
throws SQLException { try { return getRealConnection().createStruct (typeName, attributes); } catch (SQLException sqle) { notifyException(sqle); throw sqle; }
public Struct createStruct(String typeName, Object[] attributes) throws SQLException { throw Util.notImplemented(); }
int getJDBCLevel() { return 4;}
final int getJDBCLevel() { return 4;}
int getJDBCLevel() { return 4;}
public java.util.Map<String,Class<?>> getTypeMap() throws SQLException {
public final java.util.Map<String,Class<?>> getTypeMap() throws SQLException {
public java.util.Map<String,Class<?>> getTypeMap() throws SQLException { try { return getRealConnection().getTypeMap(); } catch (SQLException se) { notifyException(se); throw se; } }
public boolean isWrapperFor(Class<?> interfaces) throws SQLException {
public final boolean isWrapperFor(Class<?> interfaces) throws SQLException {
public boolean isWrapperFor(Class<?> interfaces) throws SQLException { checkIfClosed(); return interfaces.isInstance(this); }
public BrokeredStatement newBrokeredStatement
public final BrokeredStatement newBrokeredStatement
public BrokeredStatement newBrokeredStatement (BrokeredStatementControl statementControl) throws SQLException { return new BrokeredStatement40(statementControl, getJDBCLevel()); }