rem
stringlengths
0
477k
add
stringlengths
0
313k
context
stringlengths
6
599k
return new BrokeredStatement40(statementControl, getJDBCLevel());
try { return new BrokeredStatement40(statementControl, getJDBCLevel()); } catch (SQLException sqle) { notifyException(sqle); throw sqle; }
public BrokeredStatement newBrokeredStatement (BrokeredStatementControl statementControl) throws SQLException { return new BrokeredStatement40(statementControl, getJDBCLevel()); }
public <T> T unwrap(java.lang.Class<T> interfaces)
public final <T> T unwrap(java.lang.Class<T> interfaces)
public <T> T unwrap(java.lang.Class<T> interfaces) throws SQLException{ checkIfClosed(); //Derby does not implement non-standard methods on //JDBC objects try { return interfaces.cast(this); } catch (ClassCastException cce) { throw Util.generateCsSQLException(SQLState.UNABLE_TO_UNWRAP, interfaces); } }
for (int i = 0; i < 65532; i++)
for (int i = 0; i < 32765; i++)
private static void jira428Test(Connection conn) throws Exception { Statement stmt = conn.createStatement(); PreparedStatement ps ; try { stmt.execute("drop table jira428"); } catch (Throwable t) { } stmt.execute("create table jira428 (i integer)"); boolean savedAutoCommit = conn.getAutoCommit(); conn.setAutoCommit(false); ps = conn.prepareStatement("insert into jira428 values (?)"); for (int i = 0; i < 65532; i++) { ps.setInt(1, i); ps.addBatch(); } ps.executeBatch(); conn.commit(); // We don't run this part of the test for the JCC client because // the exception forces the connection closed. For DerbyNetClient, it's // a clean exception that we can catch and recover from, so we test // that code path: if (TestUtil.isDerbyNetClientFramework()) { ps = conn.prepareStatement("insert into jira428 values (?)"); for (int i = 0; i < 100000; i++) { ps.setInt(1, i); ps.addBatch(); } try { ps.executeBatch(); System.out.println("JIRA428 FAILURE: expected an exception saying no more than 65534 statements in a single batch"); } catch (BatchUpdateException bue) { // We don't print anything here because we use the same // master files for DerbyNet and DerbyNetClient, and we only // run this portion of the test for DerbyNetClient. // The exception that we get says "no more than 65534 stmts". } conn.commit(); } conn.setAutoCommit(savedAutoCommit); }
if (stmt.getQryprctyp() == CodePoint.LMTBLKPRC) {
if (stmt.getQryprctyp() == CodePoint.LMTBLKPRC && stmt.getQryrowset() != 0) {
private void processCommands() throws DRDAProtocolException { DRDAStatement stmt = null; int updateCount = 0; boolean PRPSQLSTTfailed = false; boolean checkSecurityCodepoint = session.requiresSecurityCodepoint(); do { correlationID = reader.readDssHeader(); int codePoint = reader.readLengthAndCodePoint(); int writerMark = writer.markDSSClearPoint(); if (checkSecurityCodepoint) verifyInOrderACCSEC_SECCHK(codePoint,session.getRequiredSecurityCodepoint()); switch(codePoint) { case CodePoint.CNTQRY: try{ stmt = parseCNTQRY(); if (stmt != null) { writeQRYDTA(stmt); if (stmt.rsIsClosed()) { writeENDQRYRM(CodePoint.SVRCOD_WARNING); writeNullSQLCARDobject(); } // Send any warnings if JCC can handle them checkWarning(null, null, stmt.getResultSet(), 0, false, sendWarningsOnCNTQRY); } } catch(SQLException e) { // if we got a SQLException we need to clean up and // close the result set Beetle 4758 cleanUpAndCloseResultSet(stmt, e, writerMark); } break; case CodePoint.EXCSQLIMM: try { updateCount = parseEXCSQLIMM(); // RESOLVE: checking updateCount is not sufficient // since it will be 0 for creates, we need to know when // any logged changes are made to the database // Not getting this right for JCC is probably O.K., this // will probably be a problem for ODBC and XA // The problem is that JDBC doesn't provide this information // so we would have to expand the JDBC API or call a // builtin method to check(expensive) // For now we will assume that every execute immediate // does an update (that is the most conservative thing) if (database.RDBUPDRM_sent == false) { writeRDBUPDRM(); } // we need to set update count in SQLCARD checkWarning(null, database.getDefaultStatement().getStatement(), null, updateCount, true, true); } catch (SQLException e) { writer.clearDSSesBackToMark(writerMark); writeSQLCARDs(e, 0); errorInChain(e); } break; case CodePoint.EXCSQLSET: try { if (parseEXCSQLSET()) // all went well. writeSQLCARDs(null,0); } catch (SQLWarning w) { writeSQLCARD(w, CodePoint.SVRCOD_WARNING, 0, 0); } catch (SQLException e) { writer.clearDSSesBackToMark(writerMark); writeSQLCARDs(e, 0); errorInChain(e); } break; case CodePoint.PRPSQLSTT: int sqldaType; PRPSQLSTTfailed = false; try { database.getConnection().clearWarnings(); sqldaType = parsePRPSQLSTT(); if (sqldaType > 0) // do write SQLDARD writeSQLDARD(database.getCurrentStatement(), (sqldaType == CodePoint.TYPSQLDA_LIGHT_OUTPUT), database.getConnection().getWarnings()); else checkWarning(database.getConnection(), null, null, 0, true, true); } catch (SQLException e) { writer.clearDSSesBackToMark(writerMark); writeSQLCARDs(e, 0, true); PRPSQLSTTfailed = true; errorInChain(e); } break; case CodePoint.OPNQRY: PreparedStatement ps = null; try { if (PRPSQLSTTfailed) { // read the command objects // for ps with parameter // Skip objects/parameters skipRemainder(true); // If we failed to prepare, then we fail // to open, which means OPNQFLRM. writeOPNQFLRM(null); break; } Pkgnamcsn pkgnamcsn = parseOPNQRY(); if (pkgnamcsn != null) { stmt = database.getDRDAStatement(pkgnamcsn); ps = stmt.getPreparedStatement(); ps.clearWarnings(); if (pendingStatementTimeout >= 0) { ps.setQueryTimeout(pendingStatementTimeout); pendingStatementTimeout = -1; } stmt.execute(); writeOPNQRYRM(false, stmt); checkWarning(null, ps, null, 0, false, true); writeQRYDSC(stmt, false); stmt.rsSuspend(); if (stmt.getQryprctyp() == CodePoint.LMTBLKPRC) { // The DRDA spec allows us to send // QRYDTA here if there are no LOB // columns. DRDAResultSet drdars = stmt.getCurrentDrdaResultSet(); try { if (drdars != null && !drdars.hasLobColumns()) { writeQRYDTA(stmt); } } catch (SQLException sqle) { cleanUpAndCloseResultSet(stmt, sqle, writerMark); } } } } catch (SQLException e) { writer.clearDSSesBackToMark(writerMark); // The fix for DERBY-1196 removed code // here to close the prepared statement // if OPNQRY failed. writeOPNQFLRM(e); } break; case CodePoint.RDBCMM: try { if (SanityManager.DEBUG) trace("Received commit"); if (!database.getConnection().getAutoCommit()) { database.getConnection().clearWarnings(); database.commit(); writeENDUOWRM(COMMIT); checkWarning(database.getConnection(), null, null, 0, true, true); } // we only want to write one of these per transaction // so set to false in preparation for next command database.RDBUPDRM_sent = false; } catch (SQLException e) { writer.clearDSSesBackToMark(writerMark); // Even in case of error, we have to write the ENDUOWRM. writeENDUOWRM(COMMIT); writeSQLCARDs(e, 0); errorInChain(e); } break; case CodePoint.RDBRLLBCK: try { if (SanityManager.DEBUG) trace("Received rollback"); database.getConnection().clearWarnings(); database.rollback(); writeENDUOWRM(ROLLBACK); checkWarning(database.getConnection(), null, null, 0, true, true); // we only want to write one of these per transaction // so set to false in preparation for next command database.RDBUPDRM_sent = false; } catch (SQLException e) { writer.clearDSSesBackToMark(writerMark); // Even in case of error, we have to write the ENDUOWRM. writeENDUOWRM(ROLLBACK); writeSQLCARDs(e, 0); errorInChain(e); } break; case CodePoint.CLSQRY: try{ stmt = parseCLSQRY(); stmt.rsClose(); writeSQLCARDs(null, 0); } catch (SQLException e) { writer.clearDSSesBackToMark(writerMark); writeSQLCARDs(e, 0); errorInChain(e); } break; case CodePoint.EXCSAT: parseEXCSAT(); writeEXCSATRD(); break; case CodePoint.ACCSEC: int securityCheckCode = parseACCSEC(); writeACCSECRD(securityCheckCode); checkSecurityCodepoint = true; break; case CodePoint.SECCHK: if(parseDRDAConnection()) // security all checked and connection ok checkSecurityCodepoint = false; break; /* since we don't support sqlj, we won't get bind commands from jcc, we * might get it from ccc; just skip them. */ case CodePoint.BGNBND: reader.skipBytes(); writeSQLCARDs(null, 0); break; case CodePoint.BNDSQLSTT: reader.skipBytes(); parseSQLSTTDss(); writeSQLCARDs(null, 0); break; case CodePoint.SQLSTTVRB: // optional reader.skipBytes(); break; case CodePoint.ENDBND: reader.skipBytes(); writeSQLCARDs(null, 0); break; case CodePoint.DSCSQLSTT: if (PRPSQLSTTfailed) { reader.skipBytes(); writeSQLCARDs(null, 0); break; } try { boolean rtnOutput = parseDSCSQLSTT(); writeSQLDARD(database.getCurrentStatement(), rtnOutput, null); } catch (SQLException e) { writer.clearDSSesBackToMark(writerMark); server.consoleExceptionPrint(e); try { writeSQLDARD(database.getCurrentStatement(), true, e); } catch (SQLException e2) { // should not get here since doing nothing with ps agentError("Why am I getting another SQLException?"); } errorInChain(e); } break; case CodePoint.EXCSQLSTT: if (PRPSQLSTTfailed) { // Skip parameters too if they are chained Beetle 4867 skipRemainder(true); writeSQLCARDs(null, 0); break; } try { parseEXCSQLSTT(); DRDAStatement curStmt = database.getCurrentStatement(); if (curStmt != null) curStmt.rsSuspend(); } catch (SQLException e) { writer.clearDSSesBackToMark(writerMark); if (SanityManager.DEBUG) { server.consoleExceptionPrint(e); } writeSQLCARDs(e, 0); errorInChain(e); } break; case CodePoint.SYNCCTL: if (xaProto == null) xaProto = new DRDAXAProtocol(this); xaProto.parseSYNCCTL(); break; default: codePointNotSupported(codePoint); } // Set the correct chaining bits for whatever // reply DSS(es) we just wrote. If we've reached // the end of the chain, this method will send // the DSS(es) across. finalizeChain(); } while (reader.isChainedWithSameID() || reader.isChainedWithDiffID()); }
systemStartupProperties = props;
private TestConfiguration(Properties props) throws NumberFormatException { dbName = props.getProperty(KEY_DBNAME, DEFAULT_DBNAME); userName = props.getProperty(KEY_USER_NAME, DEFAULT_USER_NAME); userPassword = props.getProperty(KEY_USER_PASSWORD, DEFAULT_USER_PASSWORD); hostName = props.getProperty(KEY_HOSTNAME, DEFAULT_HOSTNAME); isVerbose = Boolean.valueOf(props.getProperty(KEY_VERBOSE)).booleanValue(); String portStr = props.getProperty(KEY_PORT); if (portStr != null) { try { port = Integer.parseInt(portStr); } catch (NumberFormatException nfe) { // We lose stacktrace here, but it is not important. throw new NumberFormatException( "Port number must be an integer. Value: " + portStr); } } else { port = DEFAULT_PORT; } String framework = props.getProperty(KEY_FRAMEWORK, DEFAULT_FRAMEWORK); if ("DerbyNetClient".equals(framework)) { jdbcClient = JDBCClient.DERBYNETCLIENT; } else if ("DerbyNet".equals(framework)) { jdbcClient = JDBCClient.DERBYNET; } else { jdbcClient = JDBCClient.EMBEDDED; } url = createJDBCUrlWithDatabaseName(dbName); }
public DateTimeDataValue getTimestamp( DataValueDescriptor date, DataValueDescriptor time) throws StandardException
public DateTimeDataValue getTimestamp( DataValueDescriptor operand) throws StandardException
public DateTimeDataValue getTimestamp( DataValueDescriptor date, DataValueDescriptor time) throws StandardException { return new SQLTimestamp( date, time); }
return new SQLTimestamp( date, time);
return SQLTimestamp.computeTimestampFunction( operand, this);
public DateTimeDataValue getTimestamp( DataValueDescriptor date, DataValueDescriptor time) throws StandardException { return new SQLTimestamp( date, time); }
reader.setLimit(valueLength);
if (valueLength != UNKNOWN_LENGTH) { reader.setLimit(valueLength); } if (SanityManager.DEBUG && valueLength == UNKNOWN_LENGTH) { SanityManager.ASSERT(numCharsToTruncate == 0); }
public ReaderToUTF8Stream(Reader appReader, int valueLength,int numCharsToTruncate) { this.reader = new LimitReader(appReader); reader.setLimit(valueLength); buffer = new byte[BUFSIZE]; blen = -1; this.charsToTruncate = numCharsToTruncate; this.valueLength = valueLength; }
return new EmbedSQLException( MessageService.getCompleteMessage(messageId, args), messageId, next, severity, t, args);
String message = MessageService.getCompleteMessage (messageId, args); return exceptionFactory.getSQLException ( message, messageId, next, severity, t, args);
private static SQLException newEmbedSQLException(String messageId, Object[] args, SQLException next, int severity, Throwable t) { return new EmbedSQLException( MessageService.getCompleteMessage(messageId, args), messageId, next, severity, t, args); }
XADataSource xadatasource = getXADataSource(); ConnectionPoolDataSource cpds = getConnectionPoolDataSource();
XADataSource xadatasource = TestDataSourceFactory.getXADataSource(); ConnectionPoolDataSource cpds = TestDataSourceFactory.getConnectionPoolDataSource();
public void setUp() throws SQLException { XADataSource xadatasource = getXADataSource(); ConnectionPoolDataSource cpds = getConnectionPoolDataSource(); pooledConnection = cpds.getPooledConnection(); xaconnection = xadatasource.getXAConnection(); //register this class as a event listener for the //statement events //registering as a listener for the //PooledConnection object pooledConnection.addStatementEventListener(this); //registering as a listener for the //XAConnection xaconnection.addStatementEventListener(this); }
getGoal(name).attain();
getProject().attainGoal(name);
public void doTag(final XMLOutput output) throws Exception { log.debug( "Trying to invoke target: " + name ); // attain the goal and throw up any exception to be caught by parent tags getGoal(name).attain(); }
createTestDatabase();
createTestDatabase(dbCreationScript_1);
protected void doTest() { try { createTestDatabase(); // Don't let error stream ruin the diff. System.err.close(); // The only test we need to run is the one for // Network Server; see functionTests/tools/ // dblook_test.java. runTest(3, testDBName, testDBName + "_new"); } catch (Exception e) { System.out.println("-=- FAILED: to complete the test:"); e.printStackTrace(); } }
"22005".equals(sqle.getSQLState())||
private static void testSetStringInvalidValue(int type, PreparedStatement psi) { // Do not perform this test for string types. // Only test for types wich will fail with setString("InvalidValue"); switch (jdbcTypes[type]) { case Types.CHAR: case Types.VARCHAR: case Types.LONGVARCHAR: case Types.CLOB: return; } String sqlType = SQLTypes[type]; try { System.out.print(" setString(\"Invalid Value\") " ); psi.setString(1,"Invalid Value"); psi.executeUpdate(); // Should have gotten exception. Test fails String error = "FAIL - setString(1,\"Invalld Value\") for type " + sqlType + " did not throw an exception as expected"; } catch (SQLException sqle) { if ("22018".equals(sqle.getSQLState())|| "XCL12".equals(sqle.getSQLState())|| "22007".equals(sqle.getSQLState())|| (sqle.getMessage().indexOf("Invalid data conversion") != -1) || (sqle.getMessage().indexOf("Illegal Conversion") != -1)) System.out.println(" IC (Expected)"); else dumpSQLExceptions(sqle); } catch (Exception e) { // JCC may throw Illegal argument exception for // String conversion error for date/time/timestamp if (TestUtil.isJCCFramework() && e instanceof IllegalArgumentException) System.out.println( e.getMessage()); else System.out.println("FAIL: Unexpected Exception " + e.getMessage()); } }
Enumeration enum;
Enumeration e;
public String getUniqueConnectionName() { int newNum = 0; boolean newConnectionNameOk = false; String newConnectionName = ""; Enumeration enum; while (!newConnectionNameOk){ newConnectionName = Session.DEFAULT_NAME + newNum; newConnectionNameOk = true; enum = sessions.keys(); while (enum.hasMoreElements() && newConnectionNameOk){ if (((String)enum.nextElement()).equals(newConnectionName)) newConnectionNameOk = false; } newNum = newNum + 1; } return newConnectionName; }
enum = sessions.keys(); while (enum.hasMoreElements() && newConnectionNameOk){ if (((String)enum.nextElement()).equals(newConnectionName))
e = sessions.keys(); while (e.hasMoreElements() && newConnectionNameOk){ if (((String)e.nextElement()).equals(newConnectionName))
public String getUniqueConnectionName() { int newNum = 0; boolean newConnectionNameOk = false; String newConnectionName = ""; Enumeration enum; while (!newConnectionNameOk){ newConnectionName = Session.DEFAULT_NAME + newNum; newConnectionNameOk = true; enum = sessions.keys(); while (enum.hasMoreElements() && newConnectionNameOk){ if (((String)enum.nextElement()).equals(newConnectionName)) newConnectionNameOk = false; } newNum = newNum + 1; } return newConnectionName; }
getCompilerContext().addRequiredColumnPriv(cd);
if (isPrivilegeCollectionRequired()) getCompilerContext().addRequiredColumnPriv(cd);
protected void bind(DDLStatementNode ddlNode, DataDictionary dd) throws StandardException { super.bind(ddlNode, dd); refTableSd = getSchemaDescriptor(refTableName.getSchemaName()); if (refTableSd.isSystemSchema()) { throw StandardException.newException(SQLState.LANG_NO_FK_ON_SYSTEM_SCHEMA); } // check the referenced table, unless this is a self-referencing constraint if (refTableName.equals(ddlNode.getObjectName())) return; // error when the referenced table does not exist TableDescriptor td = getTableDescriptor(refTableName.getTableName(), refTableSd); if (td == null) throw StandardException.newException(SQLState.LANG_INVALID_FK_NO_REF_TAB, getConstraintMoniker(), refTableName.getTableName()); // Verify if REFERENCES_PRIV is granted to columns referenced getCompilerContext().pushCurrentPrivType(getPrivType()); // If references clause doesn't have columnlist, get primary key info if (refRcl.size()==0 && (td.getPrimaryKey() != null)) { // Get the primary key columns int[] refCols = td.getPrimaryKey().getReferencedColumns(); for (int i=0; i<refCols.length; i++) { ColumnDescriptor cd = td.getColumnDescriptor(refCols[i]); // Set tableDescriptor for this column descriptor. Needed for adding required table // access permission. Column descriptors may not have this set already. cd.setTableDescriptor(td); getCompilerContext().addRequiredColumnPriv(cd); } } else { for (int i=0; i<refRcl.size(); i++) { ResultColumn rc = (ResultColumn) refRcl.elementAt(i); ColumnDescriptor cd = td.getColumnDescriptor(rc.getName()); if (cd != null) { // Set tableDescriptor for this column descriptor. Needed for adding required table // access permission. Column descriptors may not have this set already. cd.setTableDescriptor(td); getCompilerContext().addRequiredColumnPriv(cd); } } } getCompilerContext().popCurrentPrivType(); }
TransactionController tc = activation.getTransactionController();
private void insertToPositionIndex(int position, RowLocation rl ) throws StandardException { if(!positionIndexCreated) { int numKeys = 2; position_sqllong = new SQLLongint(); positionIndexRow = new DataValueDescriptor[numKeys]; positionIndexRow[0] = position_sqllong; positionIndexRow[1] = rl; Properties props = makeIndexProperties(positionIndexRow, CID); positionIndexConglomId = tc.createConglomerate("BTREE", positionIndexRow, null, props, TransactionController.IS_TEMPORARY | TransactionController.IS_KEPT); positionIndex_cc = tc.openConglomerate( positionIndexConglomId, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE); positionIndexCreated = true; } position_sqllong.setValue(position); positionIndexRow[0] = position_sqllong; positionIndexRow[1] = rl; //insert the row location to position index positionIndex_cc.insert(positionIndexRow); }
TransactionController tc = activation.getTransactionController();
private boolean isRowAlreadyExist(ExecRow inputRow) throws StandardException { DataValueDescriptor rlColumn; RowLocation baseRowLocation; rlColumn = inputRow.getColumn(inputRow.nColumns()); if(CID!=0 && rlColumn instanceof SQLRef) { baseRowLocation = (RowLocation) (rlColumn).getObject(); if(!uniqueIndexCreated) { int numKeys = 2; uniqueIndexRow = new DataValueDescriptor[numKeys]; uniqueIndexRow[0] = baseRowLocation; uniqueIndexRow[1] = baseRowLocation; Properties props = makeIndexProperties(uniqueIndexRow, CID); uniqueIndexConglomId = tc.createConglomerate("BTREE",uniqueIndexRow , null, props, TransactionController.IS_TEMPORARY | TransactionController.IS_KEPT); uniqueIndex_cc = tc.openConglomerate( uniqueIndexConglomId, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE); uniqueIndexCreated = true; } uniqueIndexRow[0] = baseRowLocation; uniqueIndexRow[1] = baseRowLocation; // Insert the row into the secondary index. int status; if ((status = uniqueIndex_cc.insert(uniqueIndexRow))!= 0) { if(status == ConglomerateController.ROWISDUPLICATE) { return true ; // okay; we don't insert duplicates } else { if (SanityManager.DEBUG) { if (status != 0) { SanityManager.THROWASSERT("got funky status ("+status+") back from "+ "Unique Index insert()"); } } } } } return false; }
return (connection_.xaState_ == Connection.XA_LOCAL) || (connection_.xaState_ == Connection.XA_LOCAL_START_SENT);
return (connection_.xaState_ == Connection.XA_LOCAL) ;
private boolean willTickleServer(int number, boolean allowAutoCommits) throws SqlException { boolean requiresAutocommit = false; if (resultSetList_ != null) { for (int i = 0; i < number; i++) { if (resultSetList_[i] != null) { if (resultSetList_[i].openOnServer_) { return true; // for the writeClose flow } if (!resultSetList_[i].autoCommitted_ && allowAutoCommits) { requiresAutocommit = true; // for the commit flow } } } } else if (generatedKeysResultSet_ != null && generatedKeysResultSet_.openOnServer_) { generatedKeysResultSet_.writeClose(); } else if (resultSet_ != null) { if (resultSet_.openOnServer_) { return true; // for the writeClose flow } if (!resultSet_.autoCommitted_ && allowAutoCommits) { requiresAutocommit = true; } } if (connection_.autoCommit_ && requiresAutocommit) { // for the auto-commit; if (connection_.isXAConnection_) { return (connection_.xaState_ == Connection.XA_LOCAL) || (connection_.xaState_ == Connection.XA_LOCAL_START_SENT); } else { return true; } } return false; }
Enumeration enum = attributes.elements(); while (enum.hasMoreElements()) { AttributeHolder anAttribute = (AttributeHolder)enum.nextElement();
Enumeration e = attributes.elements(); while (e.hasMoreElements()) { AttributeHolder anAttribute = (AttributeHolder)e.nextElement();
public void check(){ Enumeration enum = attributes.elements(); while (enum.hasMoreElements()) { AttributeHolder anAttribute = (AttributeHolder)enum.nextElement(); //The check for duplicate must be done at the URLCheck level //and not by each specific attribute. Only URLCheck knowns about //all of the attributes and names. checkForDuplicate(anAttribute); //Have each attribute check as much about themself as possible. anAttribute.check( validProps); }
}
public void check(){ Enumeration enum = attributes.elements(); while (enum.hasMoreElements()) { AttributeHolder anAttribute = (AttributeHolder)enum.nextElement(); //The check for duplicate must be done at the URLCheck level //and not by each specific attribute. Only URLCheck knowns about //all of the attributes and names. checkForDuplicate(anAttribute); //Have each attribute check as much about themself as possible. anAttribute.check( validProps); }
getCompilerContext().popCurrentPrivType();
public QueryTreeNode bind() throws StandardException { FromList fromList = (FromList) getNodeFactory().getNode( C_NodeTypes.FROM_LIST, getNodeFactory().doJoinOrderOptimization(), getContextManager()); /* If any underlying ResultSetNode is a SelectNode, then we * need to do a full bind(), including the expressions * (since the fromList may include a FromSubquery). */ DataDictionary dataDictionary = getDataDictionary(); super.bindResultSetsWithTables(dataDictionary); /* ** Get the TableDescriptor for the table we are inserting into */ verifyTargetTable(); // Check the validity of the targetProperties, if they exist if (targetProperties != null) { verifyTargetProperties(dataDictionary); } /* ** Get the resultColumnList representing the columns in the base ** table or VTI. */ getResultColumnList(); /* If we have a target column list, then it must have the same # of * entries as the result set's RCL. */ if (targetColumnList != null) { /* Bind the target column list */ if (targetTableDescriptor != null) { targetColumnList.bindResultColumnsByName(targetTableDescriptor, (DMLStatementNode) this); } else { targetColumnList.bindResultColumnsByName(targetVTI.getResultColumns(), targetVTI, this); } } /* Verify that all underlying ResultSets reclaimed their FromList */ if (SanityManager.DEBUG) { SanityManager.ASSERT(fromList.size() == 0, "fromList.size() is expected to be 0, not " + fromList.size() + " on return from RS.bindExpressions()"); } /* Replace any DEFAULTs with the associated tree */ resultSet.replaceDefaults(targetTableDescriptor, targetColumnList); /* Bind the expressions now that the result columns are bound * NOTE: This will be the 2nd time for those underlying ResultSets * that have tables (no harm done), but it is necessary for those * that do not have tables. It's too hard/not work the effort to * avoid the redundancy. */ super.bindExpressions(); /* ** If the result set is a union, it could be a table constructor. ** Bind any nulls in the result columns of the table constructor ** to the types of the table being inserted into. ** ** The types of ? parameters in row constructors and table constructors ** in an INSERT statement come from the result columns. ** ** If there is a target column list, use that instead of the result ** columns for the whole table, since the columns in the result set ** correspond to the target column list. */ if (targetColumnList != null) { if (resultSet.getResultColumns().size() > targetColumnList.size()) throw StandardException.newException(SQLState.LANG_DB2_INVALID_COLS_SPECIFIED); resultSet.bindUntypedNullsToResultColumns(targetColumnList); resultSet.setTableConstructorTypes(targetColumnList); } else { if (resultSet.getResultColumns().size() > resultColumnList.size()) throw StandardException.newException(SQLState.LANG_DB2_INVALID_COLS_SPECIFIED); resultSet.bindUntypedNullsToResultColumns(resultColumnList); resultSet.setTableConstructorTypes(resultColumnList); } /* Bind the columns of the result set to their expressions */ resultSet.bindResultColumns(fromList); int resCols = resultSet.getResultColumns().size(); DataDictionary dd = getDataDictionary(); if (targetColumnList != null) { if (targetColumnList.size() != resCols) throw StandardException.newException(SQLState.LANG_DB2_INVALID_COLS_SPECIFIED); } else { if (targetTableDescriptor != null && targetTableDescriptor.getNumberOfColumns() != resCols) throw StandardException.newException(SQLState.LANG_DB2_INVALID_COLS_SPECIFIED); } /* See if the ResultSet's RCL needs to be ordered to match the target * list, or "enhanced" to accommodate defaults. It can only need to * be ordered if there is a target column list. It needs to be * enhanced if there are fewer source columns than there are columns * in the table. */ boolean inOrder = true; int numTableColumns = resultColumnList.size(); /* colMap[] will be the size of the target list, which could be larger * than the current size of the source list. In that case, the source * list will be "enhanced" to include defaults. */ int[] colMap = new int[numTableColumns]; // set the fields to an unused value for (int i = 0; i < colMap.length; i++) { colMap[i] = -1; } /* Create the source/target list mapping */ if (targetColumnList != null) { /* ** There is a target column list, so the result columns might ** need to be ordered. Step through the target column list ** and remember the position in the target table of each column. ** Remember if any of the columns are out of order. */ int targetSize = targetColumnList.size(); for (int index = 0; index < targetSize; index++) { int position = ((ResultColumn) (targetColumnList.elementAt(index))). columnDescriptor.getPosition(); if (index != position-1) { inOrder = false; } // position is 1-base; colMap indexes and entries are 0-based. colMap[position-1] = index; } } else { /* ** There is no target column list, so the result columns in the ** source are presumed to be in the same order as the target ** table. */ for (int position = 0; position < resultSet.getResultColumns().size(); position++) { colMap[position] = position; } } // colmap[x] == y means that column x in the target table // maps to column y in the source result set. // colmap[x] == -1 means that column x in the target table // maps to its default value. // both colmap indexes and values are 0-based. /* if the list is in order and complete, we don't have to change * the tree. If it is not, then we call RSN.enhanceRCLForInsert() * which will either * (reorder and/or "enhance" the source RCL within the same RSN) or * (generate and return a PRN with a new reordered/enhanced RCL above * the existing RSN). This way, RSN's that understand how to do projections * can avoid the additional PRN while those that do not will get one. */ /* NOTE - javascope gives confusing branch coverage info here. By * breaking apart the following if condition, I have verified that * we test all cases. (Jerry 7/17/97) */ if (! inOrder || resultSet.resultColumns.size() < numTableColumns) { // one thing we do know is that all of the resultsets underneath // us have their resultColumn names filled in with the names of // the target table columns. That makes generating the mapping // "easier" -- we simply generate the names of the target table columns // that are included. For the missing columns, we generate default // value expressions. resultSet = resultSet.enhanceRCLForInsert(numTableColumns, colMap, dataDictionary, targetTableDescriptor, targetVTI); } if (resultSet instanceof UnionNode) { // If we are inserting a number of rows in VALUES clause, we need to // examine each row for 'autoincrement'. resultColumnList.checkAutoincrementUnion(resultSet); } else resultColumnList.checkAutoincrement(resultSet.getResultColumns()); resultColumnList.checkStorableExpressions(resultSet.getResultColumns()); /* Insert a NormalizeResultSetNode above the source if the source * and target column types and lengths do not match. */ if (! resultColumnList.columnTypesAndLengthsMatch( resultSet.getResultColumns())) { resultSet = resultSet.genNormalizeResultSetNode(resultSet, false); resultColumnList.copyTypesAndLengthsToSource(resultSet.getResultColumns()); } if (targetTableDescriptor != null) { /* Get and bind all constraints on the table */ ResultColumnList sourceRCL = resultSet.getResultColumns(); sourceRCL.copyResultColumnNames(resultColumnList); checkConstraints = bindConstraints(dataDictionary, getNodeFactory(), targetTableDescriptor, null, sourceRCL, (int[]) null, (FormatableBitSet) null, false, true); /* we always include * triggers in core language */ /* Do we need to do a deferred mode insert */ /* ** Deferred if: ** If the target table is also a source table ** Self-referencing foreign key constraint ** trigger */ if (resultSet.referencesTarget( targetTableDescriptor.getName(), true) || requiresDeferredProcessing()) { deferred = true; /* Disallow bulk insert replace when target table * is also a source table. */ if (bulkInsertReplace && resultSet.referencesTarget( targetTableDescriptor.getName(), true)) { throw StandardException.newException(SQLState.LANG_INVALID_BULK_INSERT_REPLACE, targetTableDescriptor.getQualifiedName()); } } /* Get the list of indexes on the table being inserted into */ getAffectedIndexes(targetTableDescriptor); TransactionController tc = getLanguageConnectionContext().getTransactionCompile(); autoincRowLocation = dd.computeAutoincRowLocations(tc, targetTableDescriptor); } else { deferred = VTIDeferModPolicy.deferIt( DeferModification.INSERT_STATEMENT, targetVTI, null, resultSet); } return this; }
DropConstraintConstantAction.dropConstraintAndIndex( getDataDictionary().getDependencyManager(), table, getDataDictionary(), this, lcc.getTransactionExecute(), lcc, true);
public void makeInvalid(int action, LanguageConnectionContext lcc) throws StandardException { if (action == DependencyManager.REVOKE_PRIVILEGE) { //for now, ignore revoke privilege action return; } /* ** SET_CONSTRAINTS/TRIGGERS is the only valid action */ if ((action != DependencyManager.SET_CONSTRAINTS_DISABLE) && (action != DependencyManager.SET_CONSTRAINTS_ENABLE) && (action != DependencyManager.SET_TRIGGERS_ENABLE) && (action != DependencyManager.SET_TRIGGERS_DISABLE) ) { /* ** We should never get here, we should have barfed on ** prepareToInvalidate(). */ if (SanityManager.DEBUG) { DependencyManager dm; dm = getDataDictionary().getDependencyManager(); SanityManager.THROWASSERT("makeInvalid("+ dm.getActionString(action)+ ") not expected to get called"); } } }
if (SanityManager.DEBUG) checkIndexNumber(indexNumber);
public ExecIndexRow buildEmptyIndexRow( int indexNumber, RowLocation rowLocation) throws StandardException { if (SanityManager.DEBUG) checkIndexNumber(indexNumber); /* Build the row */ ExecIndexRow row = getExecutionFactory().getIndexableRow(2); /* 1st column is TABLEID (char(36)) */ row.setColumn(1, getDataValueFactory().getCharDataValue((String) null)); row.setColumn(2, rowLocation); return row; }
if (!add) { rf.setUUIDOfThePassedDescriptor(existingRow, perm); }
public void addRemovePermissionsDescriptor( boolean add, PermissionsDescriptor perm, String grantee, TransactionController tc) throws StandardException { // It is possible for grant statements to look like following // grant execute on function f_abs to mamata2, mamata3; // grant all privileges on t11 to mamata2, mamata3; // This means that dd.addRemovePermissionsDescriptor will be called // twice for TablePermsDescriptor and twice for RoutinePermsDescriptor, // once for each grantee. // First it's called for mamta2. When a row is inserted for mamta2 // into the correct system table for the permission descriptor, the // permission descriptor's uuid gets populated with the uuid of // the row that just got inserted into the system table for mamta2 // Now, before dd.addRemovePermissionsDescriptor leaves so it can // get called for MAMTA3, we should reset the Permission Descriptor's // uuid to null or otherwise, for the next call to // dd.addRemovePermissionDescriptor, we will think that there is a // duplicate row getting inserted for the same uuid. // Same logic applies to ColPermsDescriptor int catalogNumber = perm.getCatalogNumber(); perm.setGrantee( grantee); TabInfo ti = getNonCoreTI( catalogNumber); PermissionsCatalogRowFactory rf = (PermissionsCatalogRowFactory) ti.getCatalogRowFactory(); int primaryIndexNumber = rf.getPrimaryIndexNumber(); ConglomerateController heapCC = tc.openConglomerate( ti.getHeapConglomerate(), false, // do not keep open across commits 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_REPEATABLE_READ); RowLocation rl = null; try { rl = heapCC.newRowLocationTemplate(); } finally { heapCC.close(); heapCC = null; } ExecIndexRow key = rf.buildIndexKeyRow( primaryIndexNumber, perm); ExecRow existingRow = ti.getRow( tc, key, primaryIndexNumber); if( existingRow == null) { if( ! add) //we didn't find an entry in system catalog and this is revoke //so that means there is nothing to revoke. Simply return. //No need to reset permission descriptor's uuid because //no row was ever found in system catalog for the given //permission and hence uuid can't be non-null return; //We didn't find an entry in system catalog and this is grant so //so that means we have to enter a new row in system catalog for //this grant. ExecRow row = ti.getCatalogRowFactory().makeRow( perm, (TupleDescriptor) null); int insertRetCode = ti.insertRow(row, tc, true /* wait */); if( SanityManager.DEBUG) SanityManager.ASSERT( insertRetCode == TabInfo.ROWNOTDUPLICATE, "Race condition in inserting table privilege."); } else { // add/remove these permissions to/from the existing permissions boolean[] colsChanged = new boolean[ existingRow.nColumns()]; boolean[] indicesToUpdate = new boolean[ rf.getNumIndexes()]; int changedColCount = 0; if( add) changedColCount = rf.orPermissions( existingRow, perm, colsChanged); else changedColCount = rf.removePermissions( existingRow, perm, colsChanged); if( changedColCount == 0) { //grant/revoke privilege didn't change anything and hence just //return after resetting the uuid in the permission descriptor perm.setUUID(null); return; } if( changedColCount < 0) { // No permissions left in the current row ti.deleteRow( tc, key, primaryIndexNumber); } else if( changedColCount > 0) { int[] colsToUpdate = new int[changedColCount]; changedColCount = 0; for( int i = 0; i < colsChanged.length; i++) { if( colsChanged[i]) colsToUpdate[ changedColCount++] = i + 1; } if( SanityManager.DEBUG) SanityManager.ASSERT( changedColCount == colsToUpdate.length, "return value of " + rf.getClass().getName() + ".orPermissions does not match the number of booleans it set in colsChanged."); ti.updateRow( key, existingRow, primaryIndexNumber, indicesToUpdate, colsToUpdate, tc, true /* wait */); } } // Remove cached permissions data. The cache may hold permissions data for this key even if // the row in the permissions table is new. In that case the cache may have an entry indicating no // permissions Cacheable cacheEntry = getPermissionsCache().findCached( perm); if( cacheEntry != null) getPermissionsCache().remove( cacheEntry); //Before leaving, reset the uuid in the permission descriptor perm.setUUID(null); } // end of addPermissionsDescriptor
if (aliasType == null) { nullable = 0; return; } if (isFunction) { nullable = (short) JDBC40Translation.FUNCTION_NULLABLE; sqlType = procedure.getReturnType(); columnName = ""; columnType = (short) JDBC40Translation.FUNCTION_RETURN; paramCursor = -2; return; } nullable = (short) DatabaseMetaData.procedureNullable; paramCursor = -1;
public GetProcedureColumns(AliasInfo aliasInfo, String aliasType) throws SQLException { // compile time aliasInfo will be null. if (aliasInfo != null) { isProcedure = aliasType.equals("P"); procedure = (RoutineAliasInfo) aliasInfo; method_count = (short) procedure.getParameterCount(); } }
return (short) (procedure.getParameterModes()[paramCursor]);
return columnType;
public short getShort(int column) throws SQLException { switch (column) { case 2: // COLUMN_TYPE: return (short) (procedure.getParameterModes()[paramCursor]); case 3: // DATA_TYPE: if (sqlType != null) return (short)sqlType.getJDBCTypeId(); else return (short) java.sql.Types.JAVA_OBJECT; case 7: // SCALE: if (sqlType != null) return (short)sqlType.getScale(); // No corresponding SQL type return 0; case 8: // RADIX: if (sqlType != null) { int sqlTypeID = sqlType.getJDBCTypeId(); if (sqlTypeID == java.sql.Types.REAL || sqlTypeID == java.sql.Types.FLOAT || sqlTypeID == java.sql.Types.DOUBLE) { return 2; } return 10; } // No corresponding SQL type return 0; case 9: // NULLABLE: return (short)java.sql.DatabaseMetaData.procedureNullable; case 11: // METHOD_ID: return method_count; case 12: // PARAMETER_ID: return param_number; default: return super.getShort(column); // throw exception } }
return (short)java.sql.DatabaseMetaData.procedureNullable;
return nullable;
public short getShort(int column) throws SQLException { switch (column) { case 2: // COLUMN_TYPE: return (short) (procedure.getParameterModes()[paramCursor]); case 3: // DATA_TYPE: if (sqlType != null) return (short)sqlType.getJDBCTypeId(); else return (short) java.sql.Types.JAVA_OBJECT; case 7: // SCALE: if (sqlType != null) return (short)sqlType.getScale(); // No corresponding SQL type return 0; case 8: // RADIX: if (sqlType != null) { int sqlTypeID = sqlType.getJDBCTypeId(); if (sqlTypeID == java.sql.Types.REAL || sqlTypeID == java.sql.Types.FLOAT || sqlTypeID == java.sql.Types.DOUBLE) { return 2; } return 10; } // No corresponding SQL type return 0; case 9: // NULLABLE: return (short)java.sql.DatabaseMetaData.procedureNullable; case 11: // METHOD_ID: return method_count; case 12: // PARAMETER_ID: return param_number; default: return super.getShort(column); // throw exception } }
return procedure.getParameterNames()[paramCursor];
return columnName;
public String getString(int column) throws SQLException { switch (column) { case 1: // COLUMN_NAME: return procedure.getParameterNames()[paramCursor]; case 4: //_TYPE_NAME: return sqlType.getTypeName(); case 10: // REMARKS: return null; default: return super.getString(column); // throw exception } }
sqlType = procedure.getParameterTypes()[paramCursor];
if (paramCursor > -1) { sqlType = procedure.getParameterTypes()[paramCursor]; columnName = procedure.getParameterNames()[paramCursor]; columnType = (short)translate(procedure.getParameterModes()[paramCursor]); }
public boolean next() throws SQLException { if (++paramCursor >= procedure.getParameterCount()) return false; sqlType = procedure.getParameterTypes()[paramCursor]; param_number = (short) paramCursor; return true; }
protected PreparedStatement getPreparedStatement()
protected PreparedStatement getPreparedStatement() throws SQLException
protected PreparedStatement getPreparedStatement() { return ps; }
return ps;
if (ps instanceof BrokeredPreparedStatement) return (PreparedStatement)( ((BrokeredPreparedStatement) ps).getStatement()); else return ps;
protected PreparedStatement getPreparedStatement() { return ps; }
holdValue = ((Integer) sh.invoke(ps,null)).intValue();
holdValue = ((Integer) sh.invoke(rsstmt,null)).intValue();
protected int getResultSetHoldability() throws SQLException { Statement rsstmt = null; ResultSet rs = getResultSet(); int holdValue = -1; if (rs != null) rsstmt = rs.getStatement(); else rsstmt = getPreparedStatement(); Class[] getResultSetHoldabilityParam = {}; try { Method sh = rsstmt.getClass().getMethod("getResultSetHoldability", getResultSetHoldabilityParam); holdValue = ((Integer) sh.invoke(ps,null)).intValue(); } catch (Exception e) { handleReflectionException(e); } return holdValue; }
this.blksize = blksize; this.qryprctyp = qryblkctl; this.maxblkext = maxblkext; this.outovropt = outovropt; this.qryrowset = qryrowset;
protected void setOPNQRYOptions(int blksize, int qryblkctl, int maxblkext, int outovropt,int qryrowset,int qryclsimpl) { currentDrdaRs.setOPNQRYOptions( blksize, qryblkctl, maxblkext, outovropt, qryrowset, qryclsimpl); }
}
private int parseACCSEC() throws DRDAProtocolException { int securityCheckCode = 0; int securityMechanism = 0; byte [] publicKeyIn = null; reader.markCollection(); int codePoint = reader.getCodePoint(); while (codePoint != -1) { switch(codePoint) { //optional case CodePoint.SECMGRNM: // this is defined to be 0 length if (reader.getDdmLength() != 0) badObjectLength(CodePoint.SECMGRNM); break; //required case CodePoint.SECMEC: checkLength(CodePoint.SECMEC, 2); securityMechanism = reader.readNetworkShort(); if (SanityManager.DEBUG) trace("Security mechanism = " + securityMechanism); // if Property.DRDA_PROP_SECURITYMECHANISM has been set, then // network server only accepts connections which use that // security mechanism. No other types of connections // are accepted. // Make check to see if this property has been set. // if set, and if the client requested security mechanism // is not the same, then return a security check code // that the server does not support/allow this security // mechanism if ( (server.getSecurityMechanism() != NetworkServerControlImpl.INVALID_OR_NOTSET_SECURITYMECHANISM) && securityMechanism != server.getSecurityMechanism()) securityCheckCode = CodePoint.SECCHKCD_NOTSUPPORTED; else { // for plain text userid,password USRIDPWD, and USRIDONL // no need of decryptionManager if (securityMechanism != CodePoint.SECMEC_USRIDPWD && securityMechanism != CodePoint.SECMEC_USRIDONL) { //this is the only other one we understand if (securityMechanism != CodePoint.SECMEC_EUSRIDPWD) securityCheckCode = CodePoint.SECCHKCD_NOTSUPPORTED; else { try { if (decryptionManager == null) decryptionManager = new DecryptionManager(); myPublicKey = decryptionManager.obtainPublicKey(); } catch (SQLException e) { println2Log(null, session.drdaID, e.getMessage()); // Local security service non-retryable error. securityCheckCode = CodePoint.SECCHKCD_0A; } } } } break; //optional (currently required for Cloudscape - may need to revisit) case CodePoint.RDBNAM: String dbname = parseRDBNAM(); Database d = session.getDatabase(dbname); if (d == null) addDatabase(dbname); else database = d; break; //optional - depending on security Mechanism case CodePoint.SECTKN: publicKeyIn = reader.readBytes(); break; default: invalidCodePoint(codePoint); } codePoint = reader.getCodePoint(); } // check for required CodePoint's if (securityMechanism == 0) missingCodePoint(CodePoint.SECMEC); // RESOLVE - when we look further into security we might want to // handle this part of the protocol at the session level without // requiring a database for when authentication is used but there // is no database level security if (database == null) missingCodePoint(CodePoint.RDBNAM); database.securityMechanism = securityMechanism; database.publicKeyIn = publicKeyIn; // need security token if (securityCheckCode == 0 && database.securityMechanism == CodePoint.SECMEC_EUSRIDPWD && database.publicKeyIn == null) securityCheckCode = CodePoint.SECCHKCD_SECTKNMISSING_OR_INVALID; // shouldn't have security token if (securityCheckCode == 0 && (database.securityMechanism == CodePoint.SECMEC_USRIDPWD || database.securityMechanism == CodePoint.SECMEC_USRIDONL) && database.publicKeyIn != null) securityCheckCode = CodePoint.SECCHKCD_SECTKNMISSING_OR_INVALID; if (SanityManager.DEBUG) trace("** ACCSECRD securityCheckCode is: "+securityCheckCode); // If the security check was successful set the session state to // security accesseed. Otherwise go back to attributes exchanged so we // require another ACCSEC if (securityCheckCode == 0) session.setState(session.SECACC); else session.setState(session.ATTEXC); return securityCheckCode; }
String answer = null; switch (questionIndex) { case 1: answer = fillForm.getUnique1(); break; case 2: answer = fillForm.getUnique2(); break; case 3: answer = fillForm.getUnique3(); break; case 4: answer = fillForm.getUnique4(); default: answer = fillForm.getUnique5(); break; }
private String findMatch(FillForm fillForm, HttpSession session, ArrayList<String> commands) { String nextPos = null; Iterator iter = commands.iterator(); while (iter.hasNext() && nextPos == null) { // StringTokenizer st = new StringTokenizer((String) iter.next(), " "); LineParser st = new LineParser((String) iter.next(), ' '); try { int questionIndex = 0; if (st.countTokens() == 3) questionIndex = Integer.parseInt(st.nextToken()); String condition = st.nextToken(); String operation = st.nextToken(); String answer = null; switch (questionIndex) { case 1: answer = fillForm.getUnique1(); break; case 2: answer = fillForm.getUnique2(); break; case 3: answer = fillForm.getUnique3(); break; case 4: answer = fillForm.getUnique4(); default: answer = fillForm.getUnique5(); break; } // Before parsing check if its a jump condition if(condition.equals("Jump")){ nextPos = operation; break; } // parse condition to see if it's a q or a p // a condition should be some like p1==asdajds 11 // 11 p1==asda 11 String operator = null; if (condition.indexOf("==") != -1) { int internalPos = condition.indexOf("=="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { if (answer.equals(condition.substring(internalPos + 2))) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() == Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } else if (condition.indexOf(">=") != -1) { int internalPos = condition.indexOf(">="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { int ans = Integer.parseInt(answer); int res = Integer.parseInt(condition .substring(internalPos + 2)); if (ans >= res) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() >= Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } else if (condition.indexOf("<=") != -1) { int internalPos = condition.indexOf("<="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { int ans = Integer.parseInt(answer); int res = Integer.parseInt(condition .substring(internalPos + 2)); if (ans <= res) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() <= Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } else if (condition.indexOf("!=") != -1) { int internalPos = condition.indexOf("!="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { if (!answer.equals(condition.substring(internalPos + 2))) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() == Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } } catch (Exception e) { logger.info("Error on line of script, ignoring it"); } } return nextPos; }
if (answer.equals(condition.substring(internalPos + 2))) {
if (isEqualTo(questionIndex, condition .substring(internalPos + 2), section, fillForm)) {
private String findMatch(FillForm fillForm, HttpSession session, ArrayList<String> commands) { String nextPos = null; Iterator iter = commands.iterator(); while (iter.hasNext() && nextPos == null) { // StringTokenizer st = new StringTokenizer((String) iter.next(), " "); LineParser st = new LineParser((String) iter.next(), ' '); try { int questionIndex = 0; if (st.countTokens() == 3) questionIndex = Integer.parseInt(st.nextToken()); String condition = st.nextToken(); String operation = st.nextToken(); String answer = null; switch (questionIndex) { case 1: answer = fillForm.getUnique1(); break; case 2: answer = fillForm.getUnique2(); break; case 3: answer = fillForm.getUnique3(); break; case 4: answer = fillForm.getUnique4(); default: answer = fillForm.getUnique5(); break; } // Before parsing check if its a jump condition if(condition.equals("Jump")){ nextPos = operation; break; } // parse condition to see if it's a q or a p // a condition should be some like p1==asdajds 11 // 11 p1==asda 11 String operator = null; if (condition.indexOf("==") != -1) { int internalPos = condition.indexOf("=="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { if (answer.equals(condition.substring(internalPos + 2))) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() == Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } else if (condition.indexOf(">=") != -1) { int internalPos = condition.indexOf(">="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { int ans = Integer.parseInt(answer); int res = Integer.parseInt(condition .substring(internalPos + 2)); if (ans >= res) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() >= Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } else if (condition.indexOf("<=") != -1) { int internalPos = condition.indexOf("<="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { int ans = Integer.parseInt(answer); int res = Integer.parseInt(condition .substring(internalPos + 2)); if (ans <= res) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() <= Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } else if (condition.indexOf("!=") != -1) { int internalPos = condition.indexOf("!="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { if (!answer.equals(condition.substring(internalPos + 2))) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() == Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } } catch (Exception e) { logger.info("Error on line of script, ignoring it"); } } return nextPos; }
int ans = Integer.parseInt(answer); int res = Integer.parseInt(condition .substring(internalPos + 2)); if (ans >= res) {
if (this.isGreaterOrEqual(questionIndex, condition .substring(internalPos + 2), section, fillForm)) {
private String findMatch(FillForm fillForm, HttpSession session, ArrayList<String> commands) { String nextPos = null; Iterator iter = commands.iterator(); while (iter.hasNext() && nextPos == null) { // StringTokenizer st = new StringTokenizer((String) iter.next(), " "); LineParser st = new LineParser((String) iter.next(), ' '); try { int questionIndex = 0; if (st.countTokens() == 3) questionIndex = Integer.parseInt(st.nextToken()); String condition = st.nextToken(); String operation = st.nextToken(); String answer = null; switch (questionIndex) { case 1: answer = fillForm.getUnique1(); break; case 2: answer = fillForm.getUnique2(); break; case 3: answer = fillForm.getUnique3(); break; case 4: answer = fillForm.getUnique4(); default: answer = fillForm.getUnique5(); break; } // Before parsing check if its a jump condition if(condition.equals("Jump")){ nextPos = operation; break; } // parse condition to see if it's a q or a p // a condition should be some like p1==asdajds 11 // 11 p1==asda 11 String operator = null; if (condition.indexOf("==") != -1) { int internalPos = condition.indexOf("=="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { if (answer.equals(condition.substring(internalPos + 2))) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() == Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } else if (condition.indexOf(">=") != -1) { int internalPos = condition.indexOf(">="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { int ans = Integer.parseInt(answer); int res = Integer.parseInt(condition .substring(internalPos + 2)); if (ans >= res) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() >= Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } else if (condition.indexOf("<=") != -1) { int internalPos = condition.indexOf("<="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { int ans = Integer.parseInt(answer); int res = Integer.parseInt(condition .substring(internalPos + 2)); if (ans <= res) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() <= Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } else if (condition.indexOf("!=") != -1) { int internalPos = condition.indexOf("!="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { if (!answer.equals(condition.substring(internalPos + 2))) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() == Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } } catch (Exception e) { logger.info("Error on line of script, ignoring it"); } } return nextPos; }
int ans = Integer.parseInt(answer); int res = Integer.parseInt(condition .substring(internalPos + 2)); if (ans <= res) {
if (this.isLessOrEqual(questionIndex, condition .substring(internalPos + 2), section, fillForm)) {
private String findMatch(FillForm fillForm, HttpSession session, ArrayList<String> commands) { String nextPos = null; Iterator iter = commands.iterator(); while (iter.hasNext() && nextPos == null) { // StringTokenizer st = new StringTokenizer((String) iter.next(), " "); LineParser st = new LineParser((String) iter.next(), ' '); try { int questionIndex = 0; if (st.countTokens() == 3) questionIndex = Integer.parseInt(st.nextToken()); String condition = st.nextToken(); String operation = st.nextToken(); String answer = null; switch (questionIndex) { case 1: answer = fillForm.getUnique1(); break; case 2: answer = fillForm.getUnique2(); break; case 3: answer = fillForm.getUnique3(); break; case 4: answer = fillForm.getUnique4(); default: answer = fillForm.getUnique5(); break; } // Before parsing check if its a jump condition if(condition.equals("Jump")){ nextPos = operation; break; } // parse condition to see if it's a q or a p // a condition should be some like p1==asdajds 11 // 11 p1==asda 11 String operator = null; if (condition.indexOf("==") != -1) { int internalPos = condition.indexOf("=="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { if (answer.equals(condition.substring(internalPos + 2))) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() == Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } else if (condition.indexOf(">=") != -1) { int internalPos = condition.indexOf(">="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { int ans = Integer.parseInt(answer); int res = Integer.parseInt(condition .substring(internalPos + 2)); if (ans >= res) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() >= Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } else if (condition.indexOf("<=") != -1) { int internalPos = condition.indexOf("<="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { int ans = Integer.parseInt(answer); int res = Integer.parseInt(condition .substring(internalPos + 2)); if (ans <= res) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() <= Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } else if (condition.indexOf("!=") != -1) { int internalPos = condition.indexOf("!="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { if (!answer.equals(condition.substring(internalPos + 2))) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() == Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } } catch (Exception e) { logger.info("Error on line of script, ignoring it"); } } return nextPos; }
if (!answer.equals(condition.substring(internalPos + 2))) {
if (!this.isEqualTo(questionIndex, condition .substring(internalPos + 2), section, fillForm)) {
private String findMatch(FillForm fillForm, HttpSession session, ArrayList<String> commands) { String nextPos = null; Iterator iter = commands.iterator(); while (iter.hasNext() && nextPos == null) { // StringTokenizer st = new StringTokenizer((String) iter.next(), " "); LineParser st = new LineParser((String) iter.next(), ' '); try { int questionIndex = 0; if (st.countTokens() == 3) questionIndex = Integer.parseInt(st.nextToken()); String condition = st.nextToken(); String operation = st.nextToken(); String answer = null; switch (questionIndex) { case 1: answer = fillForm.getUnique1(); break; case 2: answer = fillForm.getUnique2(); break; case 3: answer = fillForm.getUnique3(); break; case 4: answer = fillForm.getUnique4(); default: answer = fillForm.getUnique5(); break; } // Before parsing check if its a jump condition if(condition.equals("Jump")){ nextPos = operation; break; } // parse condition to see if it's a q or a p // a condition should be some like p1==asdajds 11 // 11 p1==asda 11 String operator = null; if (condition.indexOf("==") != -1) { int internalPos = condition.indexOf("=="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { if (answer.equals(condition.substring(internalPos + 2))) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() == Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } else if (condition.indexOf(">=") != -1) { int internalPos = condition.indexOf(">="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { int ans = Integer.parseInt(answer); int res = Integer.parseInt(condition .substring(internalPos + 2)); if (ans >= res) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() >= Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } else if (condition.indexOf("<=") != -1) { int internalPos = condition.indexOf("<="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { int ans = Integer.parseInt(answer); int res = Integer.parseInt(condition .substring(internalPos + 2)); if (ans <= res) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() <= Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } else if (condition.indexOf("!=") != -1) { int internalPos = condition.indexOf("!="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { if (!answer.equals(condition.substring(internalPos + 2))) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() == Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } } catch (Exception e) { logger.info("Error on line of script, ignoring it"); } } return nextPos; }
logger.info("Error on line of script, ignoring it");
logger.info("Error on line of script, ignoring it: " + e.getMessage());
private String findMatch(FillForm fillForm, HttpSession session, ArrayList<String> commands) { String nextPos = null; Iterator iter = commands.iterator(); while (iter.hasNext() && nextPos == null) { // StringTokenizer st = new StringTokenizer((String) iter.next(), " "); LineParser st = new LineParser((String) iter.next(), ' '); try { int questionIndex = 0; if (st.countTokens() == 3) questionIndex = Integer.parseInt(st.nextToken()); String condition = st.nextToken(); String operation = st.nextToken(); String answer = null; switch (questionIndex) { case 1: answer = fillForm.getUnique1(); break; case 2: answer = fillForm.getUnique2(); break; case 3: answer = fillForm.getUnique3(); break; case 4: answer = fillForm.getUnique4(); default: answer = fillForm.getUnique5(); break; } // Before parsing check if its a jump condition if(condition.equals("Jump")){ nextPos = operation; break; } // parse condition to see if it's a q or a p // a condition should be some like p1==asdajds 11 // 11 p1==asda 11 String operator = null; if (condition.indexOf("==") != -1) { int internalPos = condition.indexOf("=="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { if (answer.equals(condition.substring(internalPos + 2))) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() == Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } else if (condition.indexOf(">=") != -1) { int internalPos = condition.indexOf(">="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { int ans = Integer.parseInt(answer); int res = Integer.parseInt(condition .substring(internalPos + 2)); if (ans >= res) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() >= Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } else if (condition.indexOf("<=") != -1) { int internalPos = condition.indexOf("<="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { int ans = Integer.parseInt(answer); int res = Integer.parseInt(condition .substring(internalPos + 2)); if (ans <= res) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() <= Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } else if (condition.indexOf("!=") != -1) { int internalPos = condition.indexOf("!="); operator = condition.substring(0, internalPos); if (operator.charAt(0) == 'p') { if (!answer.equals(condition.substring(internalPos + 2))) { try { Integer.parseInt(operation); nextPos = operation; } catch (NumberFormatException nfe) { ArrayList<String> quotas = null; if (session.getAttribute("quotaUpdates") != null) { quotas = (ArrayList<String>) session .getAttribute("quotaUpdates"); } else { quotas = new ArrayList<String>(); } if (!quotas.contains(operation)) quotas.add(operation); session.setAttribute("quotaUpdates", quotas); } } } else if (operator.charAt(0) == 'q') { int qIndex = Integer.parseInt(operator.substring(1)) - 1; Survey s = (Survey) session .getAttribute("CurrentClientSurvey"); s = new CustomSurveyDAO().findBySurrogateKey(s); Iterator<Quota> qIter = s.getQuotas().iterator(); int index = 0; while (qIter.hasNext()) { Quota q = qIter.next(); if (qIndex == index) { if (q.getCompleted() == Integer .parseInt(condition .substring(internalPos + 2))) { // operation should be a string // representation of an int index of a // section nextPos = operation; } break; } index++; } } } } catch (Exception e) { logger.info("Error on line of script, ignoring it"); } } return nextPos; }
pos = Integer.parseInt(nextSection);
pos = Integer.parseInt(nextSection) - 1;
public FlowManageDTO getNextStep(FillForm fillForm, HttpSession session) { FlowManageDTO flowDTO = null; Survey survey = (Survey) session.getAttribute("CurrentClientSurvey"); Section section = (Section) session.getAttribute("CurrentClientSection"); String quotaScript = section.getQuotaMgmtScript(); String flowScript = section.getFlowMgmtScript(); String nextSection = null; try { if (quotaScript != null && !quotaScript.trim().equals("")) { ArrayList<String> commands = new ArrayList<String>(); StringTokenizer st = new StringTokenizer(quotaScript, ";"); while (st.hasMoreElements()) { String temp = st.nextToken(); if (temp.indexOf("\r\n") != -1) temp = temp.substring(temp.indexOf("\r\n") + 2); commands.add(temp); } nextSection = findMatch(fillForm, session, commands); } if (nextSection == null && flowScript != null && !flowScript.trim().equals("")) { ArrayList<String> commands = new ArrayList<String>(); StringTokenizer st = new StringTokenizer(flowScript, ";"); while (st.hasMoreElements()) { String temp = st.nextToken(); if (temp.indexOf("\r\n") != -1) temp = temp.substring(temp.indexOf("\r\n") + 2); commands.add(temp); } nextSection = findMatch(fillForm, session, commands); } } catch (Exception e) { logger.error(e.getMessage()); nextSection = null; } int pos; if (nextSection == null) pos = (fillForm.getNextPos() == null) ? 0 : Integer .parseInt(fillForm.getNextPos()); else pos = Integer.parseInt(nextSection); int size = survey.getSections().size(); int result = size - pos; if (result > 1) { if (nextSection == null) flowDTO = new FlowManageDTO(NEXT, SUBMIT, pos + 1); else flowDTO = new FlowManageDTO(NEXT, SUBMIT, pos); } else { if (nextSection == null) flowDTO = new FlowManageDTO(FINISH, CLOSE, pos + 1); else flowDTO = new FlowManageDTO(FINISH, CLOSE, pos); } return flowDTO; }
flowDTO = new FlowManageDTO(NEXT, SUBMIT, pos);
flowDTO = new FlowManageDTO(NEXT, SUBMIT, pos + 1);
public FlowManageDTO getNextStep(FillForm fillForm, HttpSession session) { FlowManageDTO flowDTO = null; Survey survey = (Survey) session.getAttribute("CurrentClientSurvey"); Section section = (Section) session.getAttribute("CurrentClientSection"); String quotaScript = section.getQuotaMgmtScript(); String flowScript = section.getFlowMgmtScript(); String nextSection = null; try { if (quotaScript != null && !quotaScript.trim().equals("")) { ArrayList<String> commands = new ArrayList<String>(); StringTokenizer st = new StringTokenizer(quotaScript, ";"); while (st.hasMoreElements()) { String temp = st.nextToken(); if (temp.indexOf("\r\n") != -1) temp = temp.substring(temp.indexOf("\r\n") + 2); commands.add(temp); } nextSection = findMatch(fillForm, session, commands); } if (nextSection == null && flowScript != null && !flowScript.trim().equals("")) { ArrayList<String> commands = new ArrayList<String>(); StringTokenizer st = new StringTokenizer(flowScript, ";"); while (st.hasMoreElements()) { String temp = st.nextToken(); if (temp.indexOf("\r\n") != -1) temp = temp.substring(temp.indexOf("\r\n") + 2); commands.add(temp); } nextSection = findMatch(fillForm, session, commands); } } catch (Exception e) { logger.error(e.getMessage()); nextSection = null; } int pos; if (nextSection == null) pos = (fillForm.getNextPos() == null) ? 0 : Integer .parseInt(fillForm.getNextPos()); else pos = Integer.parseInt(nextSection); int size = survey.getSections().size(); int result = size - pos; if (result > 1) { if (nextSection == null) flowDTO = new FlowManageDTO(NEXT, SUBMIT, pos + 1); else flowDTO = new FlowManageDTO(NEXT, SUBMIT, pos); } else { if (nextSection == null) flowDTO = new FlowManageDTO(FINISH, CLOSE, pos + 1); else flowDTO = new FlowManageDTO(FINISH, CLOSE, pos); } return flowDTO; }
flowDTO = new FlowManageDTO(FINISH, CLOSE, pos);
flowDTO = new FlowManageDTO(FINISH, CLOSE, pos + 1);
public FlowManageDTO getNextStep(FillForm fillForm, HttpSession session) { FlowManageDTO flowDTO = null; Survey survey = (Survey) session.getAttribute("CurrentClientSurvey"); Section section = (Section) session.getAttribute("CurrentClientSection"); String quotaScript = section.getQuotaMgmtScript(); String flowScript = section.getFlowMgmtScript(); String nextSection = null; try { if (quotaScript != null && !quotaScript.trim().equals("")) { ArrayList<String> commands = new ArrayList<String>(); StringTokenizer st = new StringTokenizer(quotaScript, ";"); while (st.hasMoreElements()) { String temp = st.nextToken(); if (temp.indexOf("\r\n") != -1) temp = temp.substring(temp.indexOf("\r\n") + 2); commands.add(temp); } nextSection = findMatch(fillForm, session, commands); } if (nextSection == null && flowScript != null && !flowScript.trim().equals("")) { ArrayList<String> commands = new ArrayList<String>(); StringTokenizer st = new StringTokenizer(flowScript, ";"); while (st.hasMoreElements()) { String temp = st.nextToken(); if (temp.indexOf("\r\n") != -1) temp = temp.substring(temp.indexOf("\r\n") + 2); commands.add(temp); } nextSection = findMatch(fillForm, session, commands); } } catch (Exception e) { logger.error(e.getMessage()); nextSection = null; } int pos; if (nextSection == null) pos = (fillForm.getNextPos() == null) ? 0 : Integer .parseInt(fillForm.getNextPos()); else pos = Integer.parseInt(nextSection); int size = survey.getSections().size(); int result = size - pos; if (result > 1) { if (nextSection == null) flowDTO = new FlowManageDTO(NEXT, SUBMIT, pos + 1); else flowDTO = new FlowManageDTO(NEXT, SUBMIT, pos); } else { if (nextSection == null) flowDTO = new FlowManageDTO(FINISH, CLOSE, pos + 1); else flowDTO = new FlowManageDTO(FINISH, CLOSE, pos); } return flowDTO; }
protected int flushScalarStreamSegment (int leftToRead, int bytesToRead)
private void flushScalarStreamSegment ( boolean lastSegment, OutputStream out)
protected int flushScalarStreamSegment (int leftToRead, int bytesToRead) throws DRDAProtocolException { int newBytesToRead = bytesToRead; // either at end of data, end of dss segment, or both. if (leftToRead != 0) { // 32k segment filled and not at end of data. if ((Math.min (2 + leftToRead, 32767)) > (bytes.length - offset)) { try { // Mark current DSS as continued, set its chaining state, // then send the data across. markDssAsContinued(true); // true => for lobs sendBytes (agent.getOutputStream()); } catch (java.io.IOException ioe) { agent.markCommunicationsFailure ("DDMWriter.flushScalarStreamSegment()", "", ioe.getMessage(), "*"); } } else { // DSS is full, but we still have space in the buffer. So // end the DSS, then start the next DSS right after it. endDss(false); // false => don't finalize length. } // Prepare a DSS continuation header for next DSS. dssLengthLocation = offset; bytes[offset++] = (byte) (0xff); bytes[offset++] = (byte) (0xff); newBytesToRead = Math.min (leftToRead,32765); isContinuationDss = true; } else { // we're done writing the data, so end the DSS. endDss(); } return newBytesToRead; }
int newBytesToRead = bytesToRead;
protected int flushScalarStreamSegment (int leftToRead, int bytesToRead) throws DRDAProtocolException { int newBytesToRead = bytesToRead; // either at end of data, end of dss segment, or both. if (leftToRead != 0) { // 32k segment filled and not at end of data. if ((Math.min (2 + leftToRead, 32767)) > (bytes.length - offset)) { try { // Mark current DSS as continued, set its chaining state, // then send the data across. markDssAsContinued(true); // true => for lobs sendBytes (agent.getOutputStream()); } catch (java.io.IOException ioe) { agent.markCommunicationsFailure ("DDMWriter.flushScalarStreamSegment()", "", ioe.getMessage(), "*"); } } else { // DSS is full, but we still have space in the buffer. So // end the DSS, then start the next DSS right after it. endDss(false); // false => don't finalize length. } // Prepare a DSS continuation header for next DSS. dssLengthLocation = offset; bytes[offset++] = (byte) (0xff); bytes[offset++] = (byte) (0xff); newBytesToRead = Math.min (leftToRead,32765); isContinuationDss = true; } else { // we're done writing the data, so end the DSS. endDss(); } return newBytesToRead; }
if (leftToRead != 0) {
if (! lastSegment) {
protected int flushScalarStreamSegment (int leftToRead, int bytesToRead) throws DRDAProtocolException { int newBytesToRead = bytesToRead; // either at end of data, end of dss segment, or both. if (leftToRead != 0) { // 32k segment filled and not at end of data. if ((Math.min (2 + leftToRead, 32767)) > (bytes.length - offset)) { try { // Mark current DSS as continued, set its chaining state, // then send the data across. markDssAsContinued(true); // true => for lobs sendBytes (agent.getOutputStream()); } catch (java.io.IOException ioe) { agent.markCommunicationsFailure ("DDMWriter.flushScalarStreamSegment()", "", ioe.getMessage(), "*"); } } else { // DSS is full, but we still have space in the buffer. So // end the DSS, then start the next DSS right after it. endDss(false); // false => don't finalize length. } // Prepare a DSS continuation header for next DSS. dssLengthLocation = offset; bytes[offset++] = (byte) (0xff); bytes[offset++] = (byte) (0xff); newBytesToRead = Math.min (leftToRead,32765); isContinuationDss = true; } else { // we're done writing the data, so end the DSS. endDss(); } return newBytesToRead; }
if ((Math.min (2 + leftToRead, 32767)) > (bytes.length - offset)) {
protected int flushScalarStreamSegment (int leftToRead, int bytesToRead) throws DRDAProtocolException { int newBytesToRead = bytesToRead; // either at end of data, end of dss segment, or both. if (leftToRead != 0) { // 32k segment filled and not at end of data. if ((Math.min (2 + leftToRead, 32767)) > (bytes.length - offset)) { try { // Mark current DSS as continued, set its chaining state, // then send the data across. markDssAsContinued(true); // true => for lobs sendBytes (agent.getOutputStream()); } catch (java.io.IOException ioe) { agent.markCommunicationsFailure ("DDMWriter.flushScalarStreamSegment()", "", ioe.getMessage(), "*"); } } else { // DSS is full, but we still have space in the buffer. So // end the DSS, then start the next DSS right after it. endDss(false); // false => don't finalize length. } // Prepare a DSS continuation header for next DSS. dssLengthLocation = offset; bytes[offset++] = (byte) (0xff); bytes[offset++] = (byte) (0xff); newBytesToRead = Math.min (leftToRead,32765); isContinuationDss = true; } else { // we're done writing the data, so end the DSS. endDss(); } return newBytesToRead; }
sendBytes (agent.getOutputStream()); } catch (java.io.IOException ioe) {
sendBytes (out, false); }catch (java.io.IOException ioe) {
protected int flushScalarStreamSegment (int leftToRead, int bytesToRead) throws DRDAProtocolException { int newBytesToRead = bytesToRead; // either at end of data, end of dss segment, or both. if (leftToRead != 0) { // 32k segment filled and not at end of data. if ((Math.min (2 + leftToRead, 32767)) > (bytes.length - offset)) { try { // Mark current DSS as continued, set its chaining state, // then send the data across. markDssAsContinued(true); // true => for lobs sendBytes (agent.getOutputStream()); } catch (java.io.IOException ioe) { agent.markCommunicationsFailure ("DDMWriter.flushScalarStreamSegment()", "", ioe.getMessage(), "*"); } } else { // DSS is full, but we still have space in the buffer. So // end the DSS, then start the next DSS right after it. endDss(false); // false => don't finalize length. } // Prepare a DSS continuation header for next DSS. dssLengthLocation = offset; bytes[offset++] = (byte) (0xff); bytes[offset++] = (byte) (0xff); newBytesToRead = Math.min (leftToRead,32765); isContinuationDss = true; } else { // we're done writing the data, so end the DSS. endDss(); } return newBytesToRead; }
} else { endDss(false); }
protected int flushScalarStreamSegment (int leftToRead, int bytesToRead) throws DRDAProtocolException { int newBytesToRead = bytesToRead; // either at end of data, end of dss segment, or both. if (leftToRead != 0) { // 32k segment filled and not at end of data. if ((Math.min (2 + leftToRead, 32767)) > (bytes.length - offset)) { try { // Mark current DSS as continued, set its chaining state, // then send the data across. markDssAsContinued(true); // true => for lobs sendBytes (agent.getOutputStream()); } catch (java.io.IOException ioe) { agent.markCommunicationsFailure ("DDMWriter.flushScalarStreamSegment()", "", ioe.getMessage(), "*"); } } else { // DSS is full, but we still have space in the buffer. So // end the DSS, then start the next DSS right after it. endDss(false); // false => don't finalize length. } // Prepare a DSS continuation header for next DSS. dssLengthLocation = offset; bytes[offset++] = (byte) (0xff); bytes[offset++] = (byte) (0xff); newBytesToRead = Math.min (leftToRead,32765); isContinuationDss = true; } else { // we're done writing the data, so end the DSS. endDss(); } return newBytesToRead; }
newBytesToRead = Math.min (leftToRead,32765);
protected int flushScalarStreamSegment (int leftToRead, int bytesToRead) throws DRDAProtocolException { int newBytesToRead = bytesToRead; // either at end of data, end of dss segment, or both. if (leftToRead != 0) { // 32k segment filled and not at end of data. if ((Math.min (2 + leftToRead, 32767)) > (bytes.length - offset)) { try { // Mark current DSS as continued, set its chaining state, // then send the data across. markDssAsContinued(true); // true => for lobs sendBytes (agent.getOutputStream()); } catch (java.io.IOException ioe) { agent.markCommunicationsFailure ("DDMWriter.flushScalarStreamSegment()", "", ioe.getMessage(), "*"); } } else { // DSS is full, but we still have space in the buffer. So // end the DSS, then start the next DSS right after it. endDss(false); // false => don't finalize length. } // Prepare a DSS continuation header for next DSS. dssLengthLocation = offset; bytes[offset++] = (byte) (0xff); bytes[offset++] = (byte) (0xff); newBytesToRead = Math.min (leftToRead,32765); isContinuationDss = true; } else { // we're done writing the data, so end the DSS. endDss(); } return newBytesToRead; }
} return newBytesToRead;
protected int flushScalarStreamSegment (int leftToRead, int bytesToRead) throws DRDAProtocolException { int newBytesToRead = bytesToRead; // either at end of data, end of dss segment, or both. if (leftToRead != 0) { // 32k segment filled and not at end of data. if ((Math.min (2 + leftToRead, 32767)) > (bytes.length - offset)) { try { // Mark current DSS as continued, set its chaining state, // then send the data across. markDssAsContinued(true); // true => for lobs sendBytes (agent.getOutputStream()); } catch (java.io.IOException ioe) { agent.markCommunicationsFailure ("DDMWriter.flushScalarStreamSegment()", "", ioe.getMessage(), "*"); } } else { // DSS is full, but we still have space in the buffer. So // end the DSS, then start the next DSS right after it. endDss(false); // false => don't finalize length. } // Prepare a DSS continuation header for next DSS. dssLengthLocation = offset; bytes[offset++] = (byte) (0xff); bytes[offset++] = (byte) (0xff); newBytesToRead = Math.min (leftToRead,32765); isContinuationDss = true; } else { // we're done writing the data, so end the DSS. endDss(); } return newBytesToRead; }
}
protected int flushScalarStreamSegment (int leftToRead, int bytesToRead) throws DRDAProtocolException { int newBytesToRead = bytesToRead; // either at end of data, end of dss segment, or both. if (leftToRead != 0) { // 32k segment filled and not at end of data. if ((Math.min (2 + leftToRead, 32767)) > (bytes.length - offset)) { try { // Mark current DSS as continued, set its chaining state, // then send the data across. markDssAsContinued(true); // true => for lobs sendBytes (agent.getOutputStream()); } catch (java.io.IOException ioe) { agent.markCommunicationsFailure ("DDMWriter.flushScalarStreamSegment()", "", ioe.getMessage(), "*"); } } else { // DSS is full, but we still have space in the buffer. So // end the DSS, then start the next DSS right after it. endDss(false); // false => don't finalize length. } // Prepare a DSS continuation header for next DSS. dssLengthLocation = offset; bytes[offset++] = (byte) (0xff); bytes[offset++] = (byte) (0xff); newBytesToRead = Math.min (leftToRead,32765); isContinuationDss = true; } else { // we're done writing the data, so end the DSS. endDss(); } return newBytesToRead; }
protected int prepScalarStream (boolean chainedWithSameCorrelator,
private int prepScalarStream( boolean chainedWithSameCorrelator,
protected int prepScalarStream (boolean chainedWithSameCorrelator, int codePoint, boolean writeNullByte, int leftToRead) throws DRDAProtocolException { int extendedLengthByteCount; int nullIndicatorSize = 0; if (writeNullByte) nullIndicatorSize = 1; extendedLengthByteCount = calculateExtendedLengthByteCount (leftToRead + 4 + nullIndicatorSize); // flush the existing DSS segment if this stream will not fit in the send buffer if (10 + extendedLengthByteCount + nullIndicatorSize + leftToRead + offset > DssConstants.MAX_DSS_LENGTH) { try { // The existing DSS segment was finalized by endDss; all // we have to do is send it across the wire. sendBytes(agent.getOutputStream()); } catch (java.io.IOException e) { agent.markCommunicationsFailure ("DDMWriter.writeScalarStream()", "OutputStream.flush()", e.getMessage(),"*"); } } // buildStreamDss should not call ensure length. beginDss(chainedWithSameCorrelator, DssConstants.GDSFMT_OBJDSS); if (extendedLengthByteCount > 0) { // method should never ensure length writeLengthCodePoint (0x8004 + extendedLengthByteCount, codePoint); if (writeNullByte) writeExtendedLengthBytes (extendedLengthByteCount, leftToRead + 1); else writeExtendedLengthBytes (extendedLengthByteCount, leftToRead); } else { if (writeNullByte) writeLengthCodePoint (leftToRead + 4 + 1, codePoint); else writeLengthCodePoint (leftToRead + 4, codePoint); } // write the null byte, if necessary if (writeNullByte) writeByte(0x0); int bytesToRead; if (writeNullByte) bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - 1 - extendedLengthByteCount); else bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - extendedLengthByteCount); return bytesToRead; }
boolean writeNullByte, int leftToRead) throws DRDAProtocolException
boolean writeNullByte) throws DRDAProtocolException
protected int prepScalarStream (boolean chainedWithSameCorrelator, int codePoint, boolean writeNullByte, int leftToRead) throws DRDAProtocolException { int extendedLengthByteCount; int nullIndicatorSize = 0; if (writeNullByte) nullIndicatorSize = 1; extendedLengthByteCount = calculateExtendedLengthByteCount (leftToRead + 4 + nullIndicatorSize); // flush the existing DSS segment if this stream will not fit in the send buffer if (10 + extendedLengthByteCount + nullIndicatorSize + leftToRead + offset > DssConstants.MAX_DSS_LENGTH) { try { // The existing DSS segment was finalized by endDss; all // we have to do is send it across the wire. sendBytes(agent.getOutputStream()); } catch (java.io.IOException e) { agent.markCommunicationsFailure ("DDMWriter.writeScalarStream()", "OutputStream.flush()", e.getMessage(),"*"); } } // buildStreamDss should not call ensure length. beginDss(chainedWithSameCorrelator, DssConstants.GDSFMT_OBJDSS); if (extendedLengthByteCount > 0) { // method should never ensure length writeLengthCodePoint (0x8004 + extendedLengthByteCount, codePoint); if (writeNullByte) writeExtendedLengthBytes (extendedLengthByteCount, leftToRead + 1); else writeExtendedLengthBytes (extendedLengthByteCount, leftToRead); } else { if (writeNullByte) writeLengthCodePoint (leftToRead + 4 + 1, codePoint); else writeLengthCodePoint (leftToRead + 4, codePoint); } // write the null byte, if necessary if (writeNullByte) writeByte(0x0); int bytesToRead; if (writeNullByte) bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - 1 - extendedLengthByteCount); else bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - extendedLengthByteCount); return bytesToRead; }
int extendedLengthByteCount;
protected int prepScalarStream (boolean chainedWithSameCorrelator, int codePoint, boolean writeNullByte, int leftToRead) throws DRDAProtocolException { int extendedLengthByteCount; int nullIndicatorSize = 0; if (writeNullByte) nullIndicatorSize = 1; extendedLengthByteCount = calculateExtendedLengthByteCount (leftToRead + 4 + nullIndicatorSize); // flush the existing DSS segment if this stream will not fit in the send buffer if (10 + extendedLengthByteCount + nullIndicatorSize + leftToRead + offset > DssConstants.MAX_DSS_LENGTH) { try { // The existing DSS segment was finalized by endDss; all // we have to do is send it across the wire. sendBytes(agent.getOutputStream()); } catch (java.io.IOException e) { agent.markCommunicationsFailure ("DDMWriter.writeScalarStream()", "OutputStream.flush()", e.getMessage(),"*"); } } // buildStreamDss should not call ensure length. beginDss(chainedWithSameCorrelator, DssConstants.GDSFMT_OBJDSS); if (extendedLengthByteCount > 0) { // method should never ensure length writeLengthCodePoint (0x8004 + extendedLengthByteCount, codePoint); if (writeNullByte) writeExtendedLengthBytes (extendedLengthByteCount, leftToRead + 1); else writeExtendedLengthBytes (extendedLengthByteCount, leftToRead); } else { if (writeNullByte) writeLengthCodePoint (leftToRead + 4 + 1, codePoint); else writeLengthCodePoint (leftToRead + 4, codePoint); } // write the null byte, if necessary if (writeNullByte) writeByte(0x0); int bytesToRead; if (writeNullByte) bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - 1 - extendedLengthByteCount); else bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - extendedLengthByteCount); return bytesToRead; }
int nullIndicatorSize = 0; if (writeNullByte) nullIndicatorSize = 1; extendedLengthByteCount = calculateExtendedLengthByteCount (leftToRead + 4 + nullIndicatorSize);
ensureLength( DEFAULT_BUFFER_SIZE - offset ); final int nullIndicatorSize = writeNullByte ? 1:0;
protected int prepScalarStream (boolean chainedWithSameCorrelator, int codePoint, boolean writeNullByte, int leftToRead) throws DRDAProtocolException { int extendedLengthByteCount; int nullIndicatorSize = 0; if (writeNullByte) nullIndicatorSize = 1; extendedLengthByteCount = calculateExtendedLengthByteCount (leftToRead + 4 + nullIndicatorSize); // flush the existing DSS segment if this stream will not fit in the send buffer if (10 + extendedLengthByteCount + nullIndicatorSize + leftToRead + offset > DssConstants.MAX_DSS_LENGTH) { try { // The existing DSS segment was finalized by endDss; all // we have to do is send it across the wire. sendBytes(agent.getOutputStream()); } catch (java.io.IOException e) { agent.markCommunicationsFailure ("DDMWriter.writeScalarStream()", "OutputStream.flush()", e.getMessage(),"*"); } } // buildStreamDss should not call ensure length. beginDss(chainedWithSameCorrelator, DssConstants.GDSFMT_OBJDSS); if (extendedLengthByteCount > 0) { // method should never ensure length writeLengthCodePoint (0x8004 + extendedLengthByteCount, codePoint); if (writeNullByte) writeExtendedLengthBytes (extendedLengthByteCount, leftToRead + 1); else writeExtendedLengthBytes (extendedLengthByteCount, leftToRead); } else { if (writeNullByte) writeLengthCodePoint (leftToRead + 4 + 1, codePoint); else writeLengthCodePoint (leftToRead + 4, codePoint); } // write the null byte, if necessary if (writeNullByte) writeByte(0x0); int bytesToRead; if (writeNullByte) bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - 1 - extendedLengthByteCount); else bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - extendedLengthByteCount); return bytesToRead; }
if (10 + extendedLengthByteCount + nullIndicatorSize + leftToRead + offset > DssConstants.MAX_DSS_LENGTH) {
protected int prepScalarStream (boolean chainedWithSameCorrelator, int codePoint, boolean writeNullByte, int leftToRead) throws DRDAProtocolException { int extendedLengthByteCount; int nullIndicatorSize = 0; if (writeNullByte) nullIndicatorSize = 1; extendedLengthByteCount = calculateExtendedLengthByteCount (leftToRead + 4 + nullIndicatorSize); // flush the existing DSS segment if this stream will not fit in the send buffer if (10 + extendedLengthByteCount + nullIndicatorSize + leftToRead + offset > DssConstants.MAX_DSS_LENGTH) { try { // The existing DSS segment was finalized by endDss; all // we have to do is send it across the wire. sendBytes(agent.getOutputStream()); } catch (java.io.IOException e) { agent.markCommunicationsFailure ("DDMWriter.writeScalarStream()", "OutputStream.flush()", e.getMessage(),"*"); } } // buildStreamDss should not call ensure length. beginDss(chainedWithSameCorrelator, DssConstants.GDSFMT_OBJDSS); if (extendedLengthByteCount > 0) { // method should never ensure length writeLengthCodePoint (0x8004 + extendedLengthByteCount, codePoint); if (writeNullByte) writeExtendedLengthBytes (extendedLengthByteCount, leftToRead + 1); else writeExtendedLengthBytes (extendedLengthByteCount, leftToRead); } else { if (writeNullByte) writeLengthCodePoint (leftToRead + 4 + 1, codePoint); else writeLengthCodePoint (leftToRead + 4, codePoint); } // write the null byte, if necessary if (writeNullByte) writeByte(0x0); int bytesToRead; if (writeNullByte) bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - 1 - extendedLengthByteCount); else bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - extendedLengthByteCount); return bytesToRead; }
}
protected int prepScalarStream (boolean chainedWithSameCorrelator, int codePoint, boolean writeNullByte, int leftToRead) throws DRDAProtocolException { int extendedLengthByteCount; int nullIndicatorSize = 0; if (writeNullByte) nullIndicatorSize = 1; extendedLengthByteCount = calculateExtendedLengthByteCount (leftToRead + 4 + nullIndicatorSize); // flush the existing DSS segment if this stream will not fit in the send buffer if (10 + extendedLengthByteCount + nullIndicatorSize + leftToRead + offset > DssConstants.MAX_DSS_LENGTH) { try { // The existing DSS segment was finalized by endDss; all // we have to do is send it across the wire. sendBytes(agent.getOutputStream()); } catch (java.io.IOException e) { agent.markCommunicationsFailure ("DDMWriter.writeScalarStream()", "OutputStream.flush()", e.getMessage(),"*"); } } // buildStreamDss should not call ensure length. beginDss(chainedWithSameCorrelator, DssConstants.GDSFMT_OBJDSS); if (extendedLengthByteCount > 0) { // method should never ensure length writeLengthCodePoint (0x8004 + extendedLengthByteCount, codePoint); if (writeNullByte) writeExtendedLengthBytes (extendedLengthByteCount, leftToRead + 1); else writeExtendedLengthBytes (extendedLengthByteCount, leftToRead); } else { if (writeNullByte) writeLengthCodePoint (leftToRead + 4 + 1, codePoint); else writeLengthCodePoint (leftToRead + 4, codePoint); } // write the null byte, if necessary if (writeNullByte) writeByte(0x0); int bytesToRead; if (writeNullByte) bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - 1 - extendedLengthByteCount); else bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - extendedLengthByteCount); return bytesToRead; }
if (extendedLengthByteCount > 0) { writeLengthCodePoint (0x8004 + extendedLengthByteCount, codePoint);
writeLengthCodePoint(0x8004,codePoint);
protected int prepScalarStream (boolean chainedWithSameCorrelator, int codePoint, boolean writeNullByte, int leftToRead) throws DRDAProtocolException { int extendedLengthByteCount; int nullIndicatorSize = 0; if (writeNullByte) nullIndicatorSize = 1; extendedLengthByteCount = calculateExtendedLengthByteCount (leftToRead + 4 + nullIndicatorSize); // flush the existing DSS segment if this stream will not fit in the send buffer if (10 + extendedLengthByteCount + nullIndicatorSize + leftToRead + offset > DssConstants.MAX_DSS_LENGTH) { try { // The existing DSS segment was finalized by endDss; all // we have to do is send it across the wire. sendBytes(agent.getOutputStream()); } catch (java.io.IOException e) { agent.markCommunicationsFailure ("DDMWriter.writeScalarStream()", "OutputStream.flush()", e.getMessage(),"*"); } } // buildStreamDss should not call ensure length. beginDss(chainedWithSameCorrelator, DssConstants.GDSFMT_OBJDSS); if (extendedLengthByteCount > 0) { // method should never ensure length writeLengthCodePoint (0x8004 + extendedLengthByteCount, codePoint); if (writeNullByte) writeExtendedLengthBytes (extendedLengthByteCount, leftToRead + 1); else writeExtendedLengthBytes (extendedLengthByteCount, leftToRead); } else { if (writeNullByte) writeLengthCodePoint (leftToRead + 4 + 1, codePoint); else writeLengthCodePoint (leftToRead + 4, codePoint); } // write the null byte, if necessary if (writeNullByte) writeByte(0x0); int bytesToRead; if (writeNullByte) bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - 1 - extendedLengthByteCount); else bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - extendedLengthByteCount); return bytesToRead; }
if (writeNullByte) writeExtendedLengthBytes (extendedLengthByteCount, leftToRead + 1); else writeExtendedLengthBytes (extendedLengthByteCount, leftToRead); } else { if (writeNullByte) writeLengthCodePoint (leftToRead + 4 + 1, codePoint); else writeLengthCodePoint (leftToRead + 4, codePoint); }
protected int prepScalarStream (boolean chainedWithSameCorrelator, int codePoint, boolean writeNullByte, int leftToRead) throws DRDAProtocolException { int extendedLengthByteCount; int nullIndicatorSize = 0; if (writeNullByte) nullIndicatorSize = 1; extendedLengthByteCount = calculateExtendedLengthByteCount (leftToRead + 4 + nullIndicatorSize); // flush the existing DSS segment if this stream will not fit in the send buffer if (10 + extendedLengthByteCount + nullIndicatorSize + leftToRead + offset > DssConstants.MAX_DSS_LENGTH) { try { // The existing DSS segment was finalized by endDss; all // we have to do is send it across the wire. sendBytes(agent.getOutputStream()); } catch (java.io.IOException e) { agent.markCommunicationsFailure ("DDMWriter.writeScalarStream()", "OutputStream.flush()", e.getMessage(),"*"); } } // buildStreamDss should not call ensure length. beginDss(chainedWithSameCorrelator, DssConstants.GDSFMT_OBJDSS); if (extendedLengthByteCount > 0) { // method should never ensure length writeLengthCodePoint (0x8004 + extendedLengthByteCount, codePoint); if (writeNullByte) writeExtendedLengthBytes (extendedLengthByteCount, leftToRead + 1); else writeExtendedLengthBytes (extendedLengthByteCount, leftToRead); } else { if (writeNullByte) writeLengthCodePoint (leftToRead + 4 + 1, codePoint); else writeLengthCodePoint (leftToRead + 4, codePoint); } // write the null byte, if necessary if (writeNullByte) writeByte(0x0); int bytesToRead; if (writeNullByte) bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - 1 - extendedLengthByteCount); else bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - extendedLengthByteCount); return bytesToRead; }
int bytesToRead;
return DssConstants.MAX_DSS_LENGTH - 6 - 4 - nullIndicatorSize;
protected int prepScalarStream (boolean chainedWithSameCorrelator, int codePoint, boolean writeNullByte, int leftToRead) throws DRDAProtocolException { int extendedLengthByteCount; int nullIndicatorSize = 0; if (writeNullByte) nullIndicatorSize = 1; extendedLengthByteCount = calculateExtendedLengthByteCount (leftToRead + 4 + nullIndicatorSize); // flush the existing DSS segment if this stream will not fit in the send buffer if (10 + extendedLengthByteCount + nullIndicatorSize + leftToRead + offset > DssConstants.MAX_DSS_LENGTH) { try { // The existing DSS segment was finalized by endDss; all // we have to do is send it across the wire. sendBytes(agent.getOutputStream()); } catch (java.io.IOException e) { agent.markCommunicationsFailure ("DDMWriter.writeScalarStream()", "OutputStream.flush()", e.getMessage(),"*"); } } // buildStreamDss should not call ensure length. beginDss(chainedWithSameCorrelator, DssConstants.GDSFMT_OBJDSS); if (extendedLengthByteCount > 0) { // method should never ensure length writeLengthCodePoint (0x8004 + extendedLengthByteCount, codePoint); if (writeNullByte) writeExtendedLengthBytes (extendedLengthByteCount, leftToRead + 1); else writeExtendedLengthBytes (extendedLengthByteCount, leftToRead); } else { if (writeNullByte) writeLengthCodePoint (leftToRead + 4 + 1, codePoint); else writeLengthCodePoint (leftToRead + 4, codePoint); } // write the null byte, if necessary if (writeNullByte) writeByte(0x0); int bytesToRead; if (writeNullByte) bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - 1 - extendedLengthByteCount); else bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - extendedLengthByteCount); return bytesToRead; }
if (writeNullByte) bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - 1 - extendedLengthByteCount); else bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - extendedLengthByteCount);
protected int prepScalarStream (boolean chainedWithSameCorrelator, int codePoint, boolean writeNullByte, int leftToRead) throws DRDAProtocolException { int extendedLengthByteCount; int nullIndicatorSize = 0; if (writeNullByte) nullIndicatorSize = 1; extendedLengthByteCount = calculateExtendedLengthByteCount (leftToRead + 4 + nullIndicatorSize); // flush the existing DSS segment if this stream will not fit in the send buffer if (10 + extendedLengthByteCount + nullIndicatorSize + leftToRead + offset > DssConstants.MAX_DSS_LENGTH) { try { // The existing DSS segment was finalized by endDss; all // we have to do is send it across the wire. sendBytes(agent.getOutputStream()); } catch (java.io.IOException e) { agent.markCommunicationsFailure ("DDMWriter.writeScalarStream()", "OutputStream.flush()", e.getMessage(),"*"); } } // buildStreamDss should not call ensure length. beginDss(chainedWithSameCorrelator, DssConstants.GDSFMT_OBJDSS); if (extendedLengthByteCount > 0) { // method should never ensure length writeLengthCodePoint (0x8004 + extendedLengthByteCount, codePoint); if (writeNullByte) writeExtendedLengthBytes (extendedLengthByteCount, leftToRead + 1); else writeExtendedLengthBytes (extendedLengthByteCount, leftToRead); } else { if (writeNullByte) writeLengthCodePoint (leftToRead + 4 + 1, codePoint); else writeLengthCodePoint (leftToRead + 4, codePoint); } // write the null byte, if necessary if (writeNullByte) writeByte(0x0); int bytesToRead; if (writeNullByte) bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - 1 - extendedLengthByteCount); else bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - extendedLengthByteCount); return bytesToRead; }
return bytesToRead;
protected int prepScalarStream (boolean chainedWithSameCorrelator, int codePoint, boolean writeNullByte, int leftToRead) throws DRDAProtocolException { int extendedLengthByteCount; int nullIndicatorSize = 0; if (writeNullByte) nullIndicatorSize = 1; extendedLengthByteCount = calculateExtendedLengthByteCount (leftToRead + 4 + nullIndicatorSize); // flush the existing DSS segment if this stream will not fit in the send buffer if (10 + extendedLengthByteCount + nullIndicatorSize + leftToRead + offset > DssConstants.MAX_DSS_LENGTH) { try { // The existing DSS segment was finalized by endDss; all // we have to do is send it across the wire. sendBytes(agent.getOutputStream()); } catch (java.io.IOException e) { agent.markCommunicationsFailure ("DDMWriter.writeScalarStream()", "OutputStream.flush()", e.getMessage(),"*"); } } // buildStreamDss should not call ensure length. beginDss(chainedWithSameCorrelator, DssConstants.GDSFMT_OBJDSS); if (extendedLengthByteCount > 0) { // method should never ensure length writeLengthCodePoint (0x8004 + extendedLengthByteCount, codePoint); if (writeNullByte) writeExtendedLengthBytes (extendedLengthByteCount, leftToRead + 1); else writeExtendedLengthBytes (extendedLengthByteCount, leftToRead); } else { if (writeNullByte) writeLengthCodePoint (leftToRead + 4 + 1, codePoint); else writeLengthCodePoint (leftToRead + 4, codePoint); } // write the null byte, if necessary if (writeNullByte) writeByte(0x0); int bytesToRead; if (writeNullByte) bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - 1 - extendedLengthByteCount); else bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - extendedLengthByteCount); return bytesToRead; }
private void sendBytes (java.io.OutputStream socketOutputStream) throws java.io.IOException { resetChainState(); try { socketOutputStream.write (bytes, 0, offset); socketOutputStream.flush();
private void sendBytes (java.io.OutputStream socketOutputStream) throws java.io.IOException{ sendBytes(socketOutputStream, true);
private void sendBytes (java.io.OutputStream socketOutputStream) throws java.io.IOException { resetChainState(); try { socketOutputStream.write (bytes, 0, offset); socketOutputStream.flush(); } finally { if ((dssTrace != null) && dssTrace.isComBufferTraceOn()) { dssTrace.writeComBufferData (bytes, 0, offset, DssTrace.TYPE_TRACE_SEND, "Reply", "flush", 5); } clearBuffer(); } }
finally { if ((dssTrace != null) && dssTrace.isComBufferTraceOn()) { dssTrace.writeComBufferData (bytes, 0, offset, DssTrace.TYPE_TRACE_SEND, "Reply", "flush", 5); } clearBuffer(); } }
private void sendBytes (java.io.OutputStream socketOutputStream) throws java.io.IOException { resetChainState(); try { socketOutputStream.write (bytes, 0, offset); socketOutputStream.flush(); } finally { if ((dssTrace != null) && dssTrace.isComBufferTraceOn()) { dssTrace.writeComBufferData (bytes, 0, offset, DssTrace.TYPE_TRACE_SEND, "Reply", "flush", 5); } clearBuffer(); } }
protected int writeScalarStream (boolean chainedWithSameCorrelator,
protected void writeScalarStream (boolean chainedWithSameCorrelator,
protected int writeScalarStream (boolean chainedWithSameCorrelator, int codePoint, int length, java.io.InputStream in, boolean writeNullByte) throws DRDAProtocolException { // Stream equivalent of "beginDss"... int leftToRead = length; int bytesToRead = prepScalarStream (chainedWithSameCorrelator, codePoint, writeNullByte, leftToRead); if (length == 0) return 0; // write the data int bytesRead = 0; int totalBytesRead = 0; do { do { try { bytesRead = in.read (bytes, offset, bytesToRead); totalBytesRead += bytesRead; } catch (java.io.IOException e) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } if (bytesRead == -1) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } else { bytesToRead -= bytesRead; offset += bytesRead; leftToRead -= bytesRead; } } while (bytesToRead > 0); bytesToRead = flushScalarStreamSegment (leftToRead, bytesToRead); } while (leftToRead > 0); // check to make sure that the specified length wasn't too small try { if (in.read() != -1) { totalBytesRead += 1; } } catch (java.io.IOException e) { // Encountered error in stream length verification for // InputStream, parameter #" + parameterIndex + ". // Don't think we need to error for this condition } return totalBytesRead; }
int length, java.io.InputStream in,
EXTDTAInputStream in,
protected int writeScalarStream (boolean chainedWithSameCorrelator, int codePoint, int length, java.io.InputStream in, boolean writeNullByte) throws DRDAProtocolException { // Stream equivalent of "beginDss"... int leftToRead = length; int bytesToRead = prepScalarStream (chainedWithSameCorrelator, codePoint, writeNullByte, leftToRead); if (length == 0) return 0; // write the data int bytesRead = 0; int totalBytesRead = 0; do { do { try { bytesRead = in.read (bytes, offset, bytesToRead); totalBytesRead += bytesRead; } catch (java.io.IOException e) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } if (bytesRead == -1) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } else { bytesToRead -= bytesRead; offset += bytesRead; leftToRead -= bytesRead; } } while (bytesToRead > 0); bytesToRead = flushScalarStreamSegment (leftToRead, bytesToRead); } while (leftToRead > 0); // check to make sure that the specified length wasn't too small try { if (in.read() != -1) { totalBytesRead += 1; } } catch (java.io.IOException e) { // Encountered error in stream length verification for // InputStream, parameter #" + parameterIndex + ". // Don't think we need to error for this condition } return totalBytesRead; }
int leftToRead = length; int bytesToRead = prepScalarStream (chainedWithSameCorrelator,
int spareDssLength = prepScalarStream( chainedWithSameCorrelator,
protected int writeScalarStream (boolean chainedWithSameCorrelator, int codePoint, int length, java.io.InputStream in, boolean writeNullByte) throws DRDAProtocolException { // Stream equivalent of "beginDss"... int leftToRead = length; int bytesToRead = prepScalarStream (chainedWithSameCorrelator, codePoint, writeNullByte, leftToRead); if (length == 0) return 0; // write the data int bytesRead = 0; int totalBytesRead = 0; do { do { try { bytesRead = in.read (bytes, offset, bytesToRead); totalBytesRead += bytesRead; } catch (java.io.IOException e) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } if (bytesRead == -1) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } else { bytesToRead -= bytesRead; offset += bytesRead; leftToRead -= bytesRead; } } while (bytesToRead > 0); bytesToRead = flushScalarStreamSegment (leftToRead, bytesToRead); } while (leftToRead > 0); // check to make sure that the specified length wasn't too small try { if (in.read() != -1) { totalBytesRead += 1; } } catch (java.io.IOException e) { // Encountered error in stream length verification for // InputStream, parameter #" + parameterIndex + ". // Don't think we need to error for this condition } return totalBytesRead; }
writeNullByte, leftToRead); if (length == 0) return 0;
writeNullByte);
protected int writeScalarStream (boolean chainedWithSameCorrelator, int codePoint, int length, java.io.InputStream in, boolean writeNullByte) throws DRDAProtocolException { // Stream equivalent of "beginDss"... int leftToRead = length; int bytesToRead = prepScalarStream (chainedWithSameCorrelator, codePoint, writeNullByte, leftToRead); if (length == 0) return 0; // write the data int bytesRead = 0; int totalBytesRead = 0; do { do { try { bytesRead = in.read (bytes, offset, bytesToRead); totalBytesRead += bytesRead; } catch (java.io.IOException e) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } if (bytesRead == -1) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } else { bytesToRead -= bytesRead; offset += bytesRead; leftToRead -= bytesRead; } } while (bytesToRead > 0); bytesToRead = flushScalarStreamSegment (leftToRead, bytesToRead); } while (leftToRead > 0); // check to make sure that the specified length wasn't too small try { if (in.read() != -1) { totalBytesRead += 1; } } catch (java.io.IOException e) { // Encountered error in stream length verification for // InputStream, parameter #" + parameterIndex + ". // Don't think we need to error for this condition } return totalBytesRead; }
do { do {
protected int writeScalarStream (boolean chainedWithSameCorrelator, int codePoint, int length, java.io.InputStream in, boolean writeNullByte) throws DRDAProtocolException { // Stream equivalent of "beginDss"... int leftToRead = length; int bytesToRead = prepScalarStream (chainedWithSameCorrelator, codePoint, writeNullByte, leftToRead); if (length == 0) return 0; // write the data int bytesRead = 0; int totalBytesRead = 0; do { do { try { bytesRead = in.read (bytes, offset, bytesToRead); totalBytesRead += bytesRead; } catch (java.io.IOException e) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } if (bytesRead == -1) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } else { bytesToRead -= bytesRead; offset += bytesRead; leftToRead -= bytesRead; } } while (bytesToRead > 0); bytesToRead = flushScalarStreamSegment (leftToRead, bytesToRead); } while (leftToRead > 0); // check to make sure that the specified length wasn't too small try { if (in.read() != -1) { totalBytesRead += 1; } } catch (java.io.IOException e) { // Encountered error in stream length verification for // InputStream, parameter #" + parameterIndex + ". // Don't think we need to error for this condition } return totalBytesRead; }
bytesRead = in.read (bytes, offset, bytesToRead);
OutputStream out = placeLayerBStreamingBuffer( agent.getOutputStream() ); boolean isLastSegment = false; while( !isLastSegment ){ int spareBufferLength = bytes.length - offset; if( SanityManager.DEBUG ){ if( PropertyUtil.getSystemProperty("derby.debug.suicideOfLayerBStreaming") != null ) throw new IOException(); } bytesRead = in.read(bytes, offset, Math.min(spareDssLength, spareBufferLength));
protected int writeScalarStream (boolean chainedWithSameCorrelator, int codePoint, int length, java.io.InputStream in, boolean writeNullByte) throws DRDAProtocolException { // Stream equivalent of "beginDss"... int leftToRead = length; int bytesToRead = prepScalarStream (chainedWithSameCorrelator, codePoint, writeNullByte, leftToRead); if (length == 0) return 0; // write the data int bytesRead = 0; int totalBytesRead = 0; do { do { try { bytesRead = in.read (bytes, offset, bytesToRead); totalBytesRead += bytesRead; } catch (java.io.IOException e) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } if (bytesRead == -1) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } else { bytesToRead -= bytesRead; offset += bytesRead; leftToRead -= bytesRead; } } while (bytesToRead > 0); bytesToRead = flushScalarStreamSegment (leftToRead, bytesToRead); } while (leftToRead > 0); // check to make sure that the specified length wasn't too small try { if (in.read() != -1) { totalBytesRead += 1; } } catch (java.io.IOException e) { // Encountered error in stream length verification for // InputStream, parameter #" + parameterIndex + ". // Don't think we need to error for this condition } return totalBytesRead; }
} catch (java.io.IOException e) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } if (bytesRead == -1) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } else { bytesToRead -= bytesRead;
protected int writeScalarStream (boolean chainedWithSameCorrelator, int codePoint, int length, java.io.InputStream in, boolean writeNullByte) throws DRDAProtocolException { // Stream equivalent of "beginDss"... int leftToRead = length; int bytesToRead = prepScalarStream (chainedWithSameCorrelator, codePoint, writeNullByte, leftToRead); if (length == 0) return 0; // write the data int bytesRead = 0; int totalBytesRead = 0; do { do { try { bytesRead = in.read (bytes, offset, bytesToRead); totalBytesRead += bytesRead; } catch (java.io.IOException e) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } if (bytesRead == -1) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } else { bytesToRead -= bytesRead; offset += bytesRead; leftToRead -= bytesRead; } } while (bytesToRead > 0); bytesToRead = flushScalarStreamSegment (leftToRead, bytesToRead); } while (leftToRead > 0); // check to make sure that the specified length wasn't too small try { if (in.read() != -1) { totalBytesRead += 1; } } catch (java.io.IOException e) { // Encountered error in stream length verification for // InputStream, parameter #" + parameterIndex + ". // Don't think we need to error for this condition } return totalBytesRead; }
leftToRead -= bytesRead; } } while (bytesToRead > 0);
spareDssLength -= bytesRead; spareBufferLength -= bytesRead;
protected int writeScalarStream (boolean chainedWithSameCorrelator, int codePoint, int length, java.io.InputStream in, boolean writeNullByte) throws DRDAProtocolException { // Stream equivalent of "beginDss"... int leftToRead = length; int bytesToRead = prepScalarStream (chainedWithSameCorrelator, codePoint, writeNullByte, leftToRead); if (length == 0) return 0; // write the data int bytesRead = 0; int totalBytesRead = 0; do { do { try { bytesRead = in.read (bytes, offset, bytesToRead); totalBytesRead += bytesRead; } catch (java.io.IOException e) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } if (bytesRead == -1) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } else { bytesToRead -= bytesRead; offset += bytesRead; leftToRead -= bytesRead; } } while (bytesToRead > 0); bytesToRead = flushScalarStreamSegment (leftToRead, bytesToRead); } while (leftToRead > 0); // check to make sure that the specified length wasn't too small try { if (in.read() != -1) { totalBytesRead += 1; } } catch (java.io.IOException e) { // Encountered error in stream length verification for // InputStream, parameter #" + parameterIndex + ". // Don't think we need to error for this condition } return totalBytesRead; }
bytesToRead = flushScalarStreamSegment (leftToRead, bytesToRead); } while (leftToRead > 0); try { if (in.read() != -1) { totalBytesRead += 1;
isLastSegment = peekStream(in) < 0; if(isLastSegment || spareDssLength == 0){ flushScalarStreamSegment (isLastSegment, out); if( ! isLastSegment ) spareDssLength = DssConstants.MAX_DSS_LENGTH - 2;
protected int writeScalarStream (boolean chainedWithSameCorrelator, int codePoint, int length, java.io.InputStream in, boolean writeNullByte) throws DRDAProtocolException { // Stream equivalent of "beginDss"... int leftToRead = length; int bytesToRead = prepScalarStream (chainedWithSameCorrelator, codePoint, writeNullByte, leftToRead); if (length == 0) return 0; // write the data int bytesRead = 0; int totalBytesRead = 0; do { do { try { bytesRead = in.read (bytes, offset, bytesToRead); totalBytesRead += bytesRead; } catch (java.io.IOException e) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } if (bytesRead == -1) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } else { bytesToRead -= bytesRead; offset += bytesRead; leftToRead -= bytesRead; } } while (bytesToRead > 0); bytesToRead = flushScalarStreamSegment (leftToRead, bytesToRead); } while (leftToRead > 0); // check to make sure that the specified length wasn't too small try { if (in.read() != -1) { totalBytesRead += 1; } } catch (java.io.IOException e) { // Encountered error in stream length verification for // InputStream, parameter #" + parameterIndex + ". // Don't think we need to error for this condition } return totalBytesRead; }
catch (java.io.IOException e) {
out.flush(); }catch(IOException e){ agent.markCommunicationsFailure ("DDMWriter.writeScalarStream()", "", e.getMessage(), "*");
protected int writeScalarStream (boolean chainedWithSameCorrelator, int codePoint, int length, java.io.InputStream in, boolean writeNullByte) throws DRDAProtocolException { // Stream equivalent of "beginDss"... int leftToRead = length; int bytesToRead = prepScalarStream (chainedWithSameCorrelator, codePoint, writeNullByte, leftToRead); if (length == 0) return 0; // write the data int bytesRead = 0; int totalBytesRead = 0; do { do { try { bytesRead = in.read (bytes, offset, bytesToRead); totalBytesRead += bytesRead; } catch (java.io.IOException e) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } if (bytesRead == -1) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } else { bytesToRead -= bytesRead; offset += bytesRead; leftToRead -= bytesRead; } } while (bytesToRead > 0); bytesToRead = flushScalarStreamSegment (leftToRead, bytesToRead); } while (leftToRead > 0); // check to make sure that the specified length wasn't too small try { if (in.read() != -1) { totalBytesRead += 1; } } catch (java.io.IOException e) { // Encountered error in stream length verification for // InputStream, parameter #" + parameterIndex + ". // Don't think we need to error for this condition } return totalBytesRead; }
return totalBytesRead;
protected int writeScalarStream (boolean chainedWithSameCorrelator, int codePoint, int length, java.io.InputStream in, boolean writeNullByte) throws DRDAProtocolException { // Stream equivalent of "beginDss"... int leftToRead = length; int bytesToRead = prepScalarStream (chainedWithSameCorrelator, codePoint, writeNullByte, leftToRead); if (length == 0) return 0; // write the data int bytesRead = 0; int totalBytesRead = 0; do { do { try { bytesRead = in.read (bytes, offset, bytesToRead); totalBytesRead += bytesRead; } catch (java.io.IOException e) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } if (bytesRead == -1) { padScalarStreamForError (leftToRead, bytesToRead); return totalBytesRead; } else { bytesToRead -= bytesRead; offset += bytesRead; leftToRead -= bytesRead; } } while (bytesToRead > 0); bytesToRead = flushScalarStreamSegment (leftToRead, bytesToRead); } while (leftToRead > 0); // check to make sure that the specified length wasn't too small try { if (in.read() != -1) { totalBytesRead += 1; } } catch (java.io.IOException e) { // Encountered error in stream length verification for // InputStream, parameter #" + parameterIndex + ". // Don't think we need to error for this condition } return totalBytesRead; }
if (isPrivilegeCollectionRequired()) { cc.pushCurrentPrivType(Authorizer.SELECT_PRIV); cc.addRequiredTablePriv(lockTableDescriptor); cc.popCurrentPrivType(); }
public QueryTreeNode bind() throws StandardException { CompilerContext cc = getCompilerContext(); ConglomerateDescriptor cd; DataDictionary dd = getDataDictionary(); SchemaDescriptor sd; String schemaName = tableName.getSchemaName(); sd = getSchemaDescriptor(schemaName); // Users are not allowed to lock system tables if (sd.isSystemSchema()) { throw StandardException.newException(SQLState.LANG_NO_USER_DDL_IN_SYSTEM_SCHEMA, statementToString(), schemaName); } lockTableDescriptor = getTableDescriptor(tableName.getTableName(), sd); if (lockTableDescriptor == null) { // Check if the reference is for a synonym. TableName synonymTab = resolveTableToSynonym(tableName); if (synonymTab == null) throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND, tableName); tableName = synonymTab; sd = getSchemaDescriptor(tableName.getSchemaName()); lockTableDescriptor = getTableDescriptor(synonymTab.getTableName(), sd); if (lockTableDescriptor == null) throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND, tableName); } //throw an exception if user is attempting to lock a temporary table if (lockTableDescriptor.getTableType() == TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE) { throw StandardException.newException(SQLState.LANG_NOT_ALLOWED_FOR_DECLARED_GLOBAL_TEMP_TABLE); } conglomerateNumber = lockTableDescriptor.getHeapConglomerateId(); /* Get the base conglomerate descriptor */ cd = lockTableDescriptor.getConglomerateDescriptor(conglomerateNumber); /* Statement is dependent on the TableDescriptor and ConglomerateDescriptor */ cc.createDependency(lockTableDescriptor); cc.createDependency(cd); return this; }
throw new IOException(ALREADY_CLOSED_ERR_MEASSAGE);
throw new IOException(ALREADY_CLOSED_ERR_MESSAGE);
public int available() throws IOException{ if(closed){ throw new IOException(ALREADY_CLOSED_ERR_MEASSAGE); } return super.available(); }
throw new IOException(ALREADY_CLOSED_ERR_MEASSAGE);
throw new IOException(ALREADY_CLOSED_ERR_MESSAGE);
public int read() throws IOException { if(closed){ throw new IOException(ALREADY_CLOSED_ERR_MEASSAGE); } return super.read(); }
throw new IOException(ALREADY_CLOSED_ERR_MEASSAGE);
throw new IOException(ALREADY_CLOSED_ERR_MESSAGE);
public long skip(long n) throws IOException{ if(closed){ throw new IOException(ALREADY_CLOSED_ERR_MEASSAGE); } return super.skip(n); }
if(!message.equals(e.getMessage())) {
if(SQLState.NOT_IMPLEMENTED.equals (e.getSQLState())) {
void t_getHoldability() { try { int i = rs.getHoldability(); System.out.println("unImplemented Exception not thrown in code"); } catch(SQLException e) { if(!message.equals(e.getMessage())) { System.out.println("Unexpected SQLException"+e); } } catch(Exception e) { System.out.println("Unexpected exception caught"+e); e.printStackTrace(); } }
if(!message.equals(e.getMessage())) {
if(SQLState.NOT_IMPLEMENTED.equals (e.getSQLState())) {
void t_getNClob1() { try { rs.getNClob(0); System.out.println("unImplemented Exception not thrown in code"); } catch(SQLException e) { if(!message.equals(e.getMessage())) { System.out.println("Unexpected SQLException"+e); } } catch(Exception e) { System.out.println("Unexpected exception caught"+e); e.printStackTrace(); } }
if(!message.equals(e.getMessage())) {
if(SQLState.NOT_IMPLEMENTED.equals (e.getSQLState())) {
void t_getNClob2() { try { rs.getNClob(null); System.out.println("unImplemented Exception not thrown in code"); } catch(SQLException e) { if(!message.equals(e.getMessage())) { System.out.println("Unexpected SQLException"+e); } } catch(Exception e) { System.out.println("Unexpected exception caught"+e); e.printStackTrace(); } }
if(!message.equals(e.getMessage())) {
if(SQLState.NOT_IMPLEMENTED.equals (e.getSQLState())) {
void t_getRowId1() { try { rs.getRowId(0); System.out.println("unImplemented Exception not thrown in code"); } catch(SQLException e) { if(!message.equals(e.getMessage())) { System.out.println("Unexpected SQLException"+e); } } catch(Exception e) { System.out.println("Unexpected exception caught"+e); e.printStackTrace(); } }
if(!message.equals(e.getMessage())) {
if(SQLState.NOT_IMPLEMENTED.equals (e.getSQLState())) {
void t_getRowId2(){ try { rs.getRowId(null); System.out.println("unImplemented Exception not thrown in code"); } catch(SQLException e) { if(!message.equals(e.getMessage())) { System.out.println("Unexpected SQLException"+e); } } catch(Exception e) { System.out.println("Unexpected exception caught"+e); e.printStackTrace(); } }
if(!message.equals(e.getMessage())) {
if(SQLState.NOT_IMPLEMENTED.equals (e.getSQLState())) {
void t_getSQLXML1() { try { rs.getSQLXML(0); System.out.println("unImplemented Exception not thrown in code"); } catch(SQLException e) { if(!message.equals(e.getMessage())) { System.out.println("Unexpected SQLException"+e); } } catch(Exception e) { System.out.println("Unexpected exception caught"+e); e.printStackTrace(); } }
if(!message.equals(e.getMessage())) {
if(SQLState.NOT_IMPLEMENTED.equals (e.getSQLState())) {
void t_getSQLXML2() { try { rs.getSQLXML(null); System.out.println("unImplemented Exception not thrown in code"); } catch(SQLException e) { if(!message.equals(e.getMessage())) { System.out.println("Unexpected SQLException"+e); } } catch(Exception e) { System.out.println("Unexpected exception caught"+e); e.printStackTrace(); } }
if(!message.equals(e.getMessage())) {
if(SQLState.NOT_IMPLEMENTED.equals (e.getSQLState())) {
void t_isClosed(){ try { boolean b = rs.isClosed(); System.out.println("unImplemented Exception not thrown in code"); } catch(SQLException e) { if(!message.equals(e.getMessage())) { System.out.println("Unexpected SQLException"+e); } } catch(Exception e) { System.out.println("Unexpected exception caught"+e); e.printStackTrace(); } }
if(!message.equals(e.getMessage())) {
if(SQLState.NOT_IMPLEMENTED.equals (e.getSQLState())) {
void t_updateNClob1() { try { rs.updateNClob(0,null); System.out.println("unImplemented Exception not thrown in code"); } catch(SQLException e) { if(!message.equals(e.getMessage())) { System.out.println("Unexpected SQLException"+e); } } catch(Exception e) { System.out.println("Unexpected exception caught"+e); e.printStackTrace(); } }
if(!message.equals(e.getMessage())) {
if(SQLState.NOT_IMPLEMENTED.equals (e.getSQLState())) {
void t_updateNClob2() { try { rs.updateNClob(null,null); System.out.println("unImplemented Exception not thrown in code"); } catch(SQLException e) { if(!message.equals(e.getMessage())) { System.out.println("Unexpected SQLException"+e); } } catch(Exception e) { System.out.println("Unexpected exception caught"+e); e.printStackTrace(); } }
if(!message.equals(e.getMessage())) {
if(SQLState.NOT_IMPLEMENTED.equals (e.getSQLState())) {
void t_updateNString1() { try { rs.updateNString(0,null); System.out.println("unImplemented Exception not thrown in code"); } catch(SQLException e) { if(!message.equals(e.getMessage())) { System.out.println("Unexpected SQLException"+e); } } catch(Exception e) { System.out.println("Unexpected exception caught"+e); e.printStackTrace(); } }
if(!message.equals(e.getMessage())) {
if(SQLState.NOT_IMPLEMENTED.equals (e.getSQLState())) {
void t_updateNString2() { try { rs.updateNString(null,null); System.out.println("unImplemented Exception not thrown in code"); } catch(SQLException e) { if(!message.equals(e.getMessage())) { System.out.println("Unexpected SQLException"+e); } } catch(Exception e) { System.out.println("Unexpected exception caught"+e); e.printStackTrace(); } }
if(!message.equals(e.getMessage())) {
if(SQLState.NOT_IMPLEMENTED.equals (e.getSQLState())) {
void t_updateRowId1() { try { rs.updateRowId(0,null); System.out.println("unImplemented Exception not thrown in code"); } catch(SQLException e) { if(!message.equals(e.getMessage())) { System.out.println("Unexpected SQLException"+e); } } catch(Exception e) { System.out.println("Unexpected exception caught"+e); e.printStackTrace(); } }
if(!message.equals(e.getMessage())) {
if(SQLState.NOT_IMPLEMENTED.equals (e.getSQLState())) {
void t_updateRowId2(){ try { rs.updateRowId(null,null); System.out.println("unImplemented Exception not thrown in code"); } catch(SQLException e) { if(!message.equals(e.getMessage())) { System.out.println("Unexpected SQLException"+e); } } catch(Exception e) { System.out.println("Unexpected exception caught"+e); e.printStackTrace(); } }
s.execute("CREATE PROCEDURE NONAME(TIMESTAMP, IN P2 VARCHAR(10)) LANGUAGE JAVA PARAMETER STYLE JAVA EXTERNAL NAME 'org.apache.derbyTesting.functionTests.util.ProcedureTest.noname2'"); s.execute("{call noname(current_timestamp, 'foo')}"); s.execute("DROP PROCEDURE NONAME");
public static void testNoParameterNames(Connection conn) throws SQLException { System.out.println("testNoParameterNames"); Statement s = conn.createStatement(); s.execute("CREATE PROCEDURE NONAME(IN INT, IN VARCHAR(10)) LANGUAGE JAVA PARAMETER STYLE JAVA EXTERNAL NAME 'org.apache.derbyTesting.functionTests.util.ProcedureTest.noname'"); s.execute("{call noname(1, 'foo')}"); s.execute("DROP PROCEDURE NONAME"); s.execute("CREATE PROCEDURE NONAME(IN INT, IN P2 VARCHAR(10)) LANGUAGE JAVA PARAMETER STYLE JAVA EXTERNAL NAME 'org.apache.derbyTesting.functionTests.util.ProcedureTest.noname'"); s.execute("{call noname(1, 'foo')}"); s.execute("DROP PROCEDURE NONAME"); }
byte wakeupReason = waitingLock.waitForGrant(actualTimeout); ActiveLock nextWaitingLock = null; Object[] deadlockData = null; try { boolean willQuitWait; Enumeration timeoutLockTable = null; long currentTime = 0; synchronized (this) { if (control.isGrantable(control.firstWaiter() == waitingLock, compatabilitySpace, qualifier)) { control.grant(waitingLock); nextWaitingLock = control.getNextWaiter(waitingLock, true, this); if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } return waitingLock; } waitingLock.clearPotentiallyGranted(); willQuitWait = (wakeupReason != Constants.WAITING_LOCK_GRANT); StandardException deadlockException = null; if (((wakeupReason == Constants.WAITING_LOCK_IN_WAIT) && deadlockWait) || (wakeupReason == Constants.WAITING_LOCK_DEADLOCK)) { deadlockData = Deadlock.look(factory, this, control, waitingLock, wakeupReason); if (deadlockData == null) { deadlockWait = false; actualTimeout = timeout; startWaitTime = 0; willQuitWait = false; } else { willQuitWait = true; } } nextWaitingLock = control.getNextWaiter(waitingLock, willQuitWait, this); if (willQuitWait) { if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } if (SanityManager.DEBUG)
byte wakeupReason = waitingLock.waitForGrant(actualTimeout); ActiveLock nextWaitingLock = null; Object[] deadlockData = null; try { boolean willQuitWait; Enumeration timeoutLockTable = null; long currentTime = 0; synchronized (this) { if (control.isGrantable( control.firstWaiter() == waitingLock, compatabilitySpace, qualifier)) { control.grant(waitingLock); nextWaitingLock = control.getNextWaiter(waitingLock, true, this); if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } return waitingLock; } waitingLock.clearPotentiallyGranted(); willQuitWait = (wakeupReason != Constants.WAITING_LOCK_GRANT); StandardException deadlockException = null; if (((wakeupReason == Constants.WAITING_LOCK_IN_WAIT) && deadlockWait) || (wakeupReason == Constants.WAITING_LOCK_DEADLOCK))
public Lock lockObject(Object compatabilitySpace, Lockable ref, Object qualifier, int timeout, Latch latch) throws StandardException { if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("memoryLeakTrace")) { if (size() > 1000) System.out.println("memoryLeakTrace:LockSet: " + size()); } } Control gc; LockControl control; Lock lockItem; String lockDebug = null; synchronized (this) { gc = getControl(ref); if (gc == null) { // object is not locked, can be granted Lock gl = new Lock(compatabilitySpace, ref, qualifier); gl.grant(); put(ref, gl); return gl; } control = gc.getLockControl(); if (control != gc) { put(ref, control); } if (SanityManager.DEBUG) { SanityManager.ASSERT(ref.equals(control.getLockable())); // ASSERT item is in the list if (getControl(control.getLockable()) != control) { SanityManager.THROWASSERT( "lockObject mismatched lock items " + getControl(control.getLockable()) + " " + control); } } lockItem = control.addLock(this, compatabilitySpace, qualifier); if (lockItem.getCount() != 0) { return lockItem; } if (timeout == C_LockFactory.NO_WAIT) { // remove all trace of lock control.giveUpWait(lockItem, this); if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("DeadlockTrace")) { SanityManager.showTrace(new Throwable()); // The following dumps the lock table as it // exists at the time a timeout is about to // cause a deadlock exception to be thrown. lockDebug = DiagnosticUtil.toDiagString(lockItem) + "\nCould not grant lock with zero timeout, here's the table" + this.toDebugString(); } } return null; } // this is where we need to release the latch if (latch != null) unlock(latch, 1); } // synchronized block boolean deadlockWait = false; int actualTimeout; if (timeout == C_LockFactory.WAIT_FOREVER) { // always check for deadlocks as there should not be any deadlockWait = true; if ((actualTimeout = deadlockTimeout) == C_LockFactory.WAIT_FOREVER) actualTimeout = Property.DEADLOCK_TIMEOUT_DEFAULT * 1000; } else { if (timeout == C_LockFactory.TIMED_WAIT) timeout = actualTimeout = waitTimeout; else actualTimeout = timeout; // five posible cases // i) timeout -1, deadlock -1 -> just wait forever, no deadlock check // ii) timeout >= 0, deadlock -1 -> just wait for timeout, no deadlock check // iii) timeout -1, deadlock >= 0 -> wait for deadlock, then deadlock check, then infinite timeout // iv) timeout >=0, deadlock < timeout -> wait for deadlock, then deadlock check, then wait for (timeout - deadlock) // v) timeout >=0, deadlock >= timeout -> just wait for timeout, no deadlock check if (deadlockTimeout >= 0) { if (actualTimeout < 0) { // infinite wait but perform a deadlock check first deadlockWait = true; actualTimeout = deadlockTimeout; } else if (deadlockTimeout < actualTimeout) { // deadlock wait followed by a timeout wait deadlockWait = true; actualTimeout = deadlockTimeout; // leave timeout as the remaining time timeout -= deadlockTimeout; } } } ActiveLock waitingLock = (ActiveLock) lockItem; lockItem = null; if (deadlockTrace) { // we want to keep a stack trace of this thread just before it goes // into wait state, no need to synchronized because Hashtable.put // is synchronized and the new throwable is local to this thread. lockTraces.put(waitingLock, new Throwable()); } int earlyWakeupCount = 0; long startWaitTime = 0; try {forever: for (;;) { byte wakeupReason = waitingLock.waitForGrant(actualTimeout); ActiveLock nextWaitingLock = null; Object[] deadlockData = null; try { boolean willQuitWait; Enumeration timeoutLockTable = null; long currentTime = 0; synchronized (this) { if (control.isGrantable(control.firstWaiter() == waitingLock, compatabilitySpace, qualifier)) { // Yes, we are granted, put us on the granted queue. control.grant(waitingLock); // Remove from the waiting queue & get next waiter nextWaitingLock = control.getNextWaiter(waitingLock, true, this); // this is where we need to re-obtain the latch, it's // safe to call this lockObject() which will get the // synchronization we already hold, because java allows // nested synchronization and it will be released // automatically if we have to wait if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } return waitingLock; } waitingLock.clearPotentiallyGranted(); // try again later willQuitWait = (wakeupReason != Constants.WAITING_LOCK_GRANT); StandardException deadlockException = null; if (((wakeupReason == Constants.WAITING_LOCK_IN_WAIT) && deadlockWait) || (wakeupReason == Constants.WAITING_LOCK_DEADLOCK)) { // check for a deadlock, even if we were woken up to because // we were selected as a victim we still check because the situation // may have changed. deadlockData = Deadlock.look(factory, this, control, waitingLock, wakeupReason); if (deadlockData == null) { // we don't have a deadlock deadlockWait = false; actualTimeout = timeout; startWaitTime = 0; willQuitWait = false; } else { willQuitWait = true; } } nextWaitingLock = control.getNextWaiter(waitingLock, willQuitWait, this); // If we were not woken by another then we have // timed out. Either deadlock out or timeout if (willQuitWait) { // Even if we deadlocked trying to get the lock, still // reget the latch so that client's need not know // latch was released. if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("DeadlockTrace")) { SanityManager.showTrace(new Throwable()); // The following dumps the lock table as it // exists at the time a timeout is about to // cause a deadlock exception to be thrown. lockDebug = DiagnosticUtil.toDiagString(waitingLock) + "\nGot deadlock/timeout, here's the table" + this.toDebugString(); } } if(!deadlockWait) { if( deadlockTrace ) { // want a copy of the LockTable and the time currentTime = System.currentTimeMillis(); timeoutLockTable = factory.makeVirtualLockTable(); } } } } // synchronized block // need to do this outside of the synchronized block as the // message text building (timeouts and deadlocks) may involve // getting locks to look up table names from identifiers. if (willQuitWait) { if (SanityManager.DEBUG) { if (lockDebug != null) { String type = (deadlockWait ? "deadlock:" : "timeout:"); SanityManager.DEBUG_PRINT( type, "wait on lockitem caused " + type + lockDebug); } } if(!deadlockWait) { if( deadlockTrace ) { //Turn ON derby.locks.deadlockTrace to build the lockTable. throw Timeout.buildException(waitingLock, timeoutLockTable, currentTime); } else { StandardException se = StandardException.newException( SQLState.LOCK_TIMEOUT); throw se; } } if (deadlockData != null) { throw Deadlock.buildException(factory, deadlockData); } } } finally { if (nextWaitingLock != null) { nextWaitingLock.wakeUp(Constants.WAITING_LOCK_GRANT); nextWaitingLock = null; } } if (actualTimeout != C_LockFactory.WAIT_FOREVER) { if (wakeupReason != Constants.WAITING_LOCK_IN_WAIT) earlyWakeupCount++; if (earlyWakeupCount > 5) { long now = System.currentTimeMillis(); if (startWaitTime != 0) { long sleepTime = now - startWaitTime; actualTimeout -= sleepTime; } startWaitTime = now; } } } // for(;;) } finally { if (deadlockTrace) { // I am out of the wait state, either I got my lock or I am the // one who is going to detect the deadlock, don't need the // stack trace anymore. lockTraces.remove(waitingLock); } } }
if (SanityManager.DEBUG_ON("DeadlockTrace"))
deadlockData = Deadlock.look( factory, this, control, waitingLock, wakeupReason); if (deadlockData == null) { deadlockWait = false; actualTimeout = timeout; startWaitTime = 0; willQuitWait = false; } else { willQuitWait = true; } } nextWaitingLock = control.getNextWaiter( waitingLock, willQuitWait, this); if (willQuitWait) { if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } if (SanityManager.DEBUG)
public Lock lockObject(Object compatabilitySpace, Lockable ref, Object qualifier, int timeout, Latch latch) throws StandardException { if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("memoryLeakTrace")) { if (size() > 1000) System.out.println("memoryLeakTrace:LockSet: " + size()); } } Control gc; LockControl control; Lock lockItem; String lockDebug = null; synchronized (this) { gc = getControl(ref); if (gc == null) { // object is not locked, can be granted Lock gl = new Lock(compatabilitySpace, ref, qualifier); gl.grant(); put(ref, gl); return gl; } control = gc.getLockControl(); if (control != gc) { put(ref, control); } if (SanityManager.DEBUG) { SanityManager.ASSERT(ref.equals(control.getLockable())); // ASSERT item is in the list if (getControl(control.getLockable()) != control) { SanityManager.THROWASSERT( "lockObject mismatched lock items " + getControl(control.getLockable()) + " " + control); } } lockItem = control.addLock(this, compatabilitySpace, qualifier); if (lockItem.getCount() != 0) { return lockItem; } if (timeout == C_LockFactory.NO_WAIT) { // remove all trace of lock control.giveUpWait(lockItem, this); if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("DeadlockTrace")) { SanityManager.showTrace(new Throwable()); // The following dumps the lock table as it // exists at the time a timeout is about to // cause a deadlock exception to be thrown. lockDebug = DiagnosticUtil.toDiagString(lockItem) + "\nCould not grant lock with zero timeout, here's the table" + this.toDebugString(); } } return null; } // this is where we need to release the latch if (latch != null) unlock(latch, 1); } // synchronized block boolean deadlockWait = false; int actualTimeout; if (timeout == C_LockFactory.WAIT_FOREVER) { // always check for deadlocks as there should not be any deadlockWait = true; if ((actualTimeout = deadlockTimeout) == C_LockFactory.WAIT_FOREVER) actualTimeout = Property.DEADLOCK_TIMEOUT_DEFAULT * 1000; } else { if (timeout == C_LockFactory.TIMED_WAIT) timeout = actualTimeout = waitTimeout; else actualTimeout = timeout; // five posible cases // i) timeout -1, deadlock -1 -> just wait forever, no deadlock check // ii) timeout >= 0, deadlock -1 -> just wait for timeout, no deadlock check // iii) timeout -1, deadlock >= 0 -> wait for deadlock, then deadlock check, then infinite timeout // iv) timeout >=0, deadlock < timeout -> wait for deadlock, then deadlock check, then wait for (timeout - deadlock) // v) timeout >=0, deadlock >= timeout -> just wait for timeout, no deadlock check if (deadlockTimeout >= 0) { if (actualTimeout < 0) { // infinite wait but perform a deadlock check first deadlockWait = true; actualTimeout = deadlockTimeout; } else if (deadlockTimeout < actualTimeout) { // deadlock wait followed by a timeout wait deadlockWait = true; actualTimeout = deadlockTimeout; // leave timeout as the remaining time timeout -= deadlockTimeout; } } } ActiveLock waitingLock = (ActiveLock) lockItem; lockItem = null; if (deadlockTrace) { // we want to keep a stack trace of this thread just before it goes // into wait state, no need to synchronized because Hashtable.put // is synchronized and the new throwable is local to this thread. lockTraces.put(waitingLock, new Throwable()); } int earlyWakeupCount = 0; long startWaitTime = 0; try {forever: for (;;) { byte wakeupReason = waitingLock.waitForGrant(actualTimeout); ActiveLock nextWaitingLock = null; Object[] deadlockData = null; try { boolean willQuitWait; Enumeration timeoutLockTable = null; long currentTime = 0; synchronized (this) { if (control.isGrantable(control.firstWaiter() == waitingLock, compatabilitySpace, qualifier)) { // Yes, we are granted, put us on the granted queue. control.grant(waitingLock); // Remove from the waiting queue & get next waiter nextWaitingLock = control.getNextWaiter(waitingLock, true, this); // this is where we need to re-obtain the latch, it's // safe to call this lockObject() which will get the // synchronization we already hold, because java allows // nested synchronization and it will be released // automatically if we have to wait if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } return waitingLock; } waitingLock.clearPotentiallyGranted(); // try again later willQuitWait = (wakeupReason != Constants.WAITING_LOCK_GRANT); StandardException deadlockException = null; if (((wakeupReason == Constants.WAITING_LOCK_IN_WAIT) && deadlockWait) || (wakeupReason == Constants.WAITING_LOCK_DEADLOCK)) { // check for a deadlock, even if we were woken up to because // we were selected as a victim we still check because the situation // may have changed. deadlockData = Deadlock.look(factory, this, control, waitingLock, wakeupReason); if (deadlockData == null) { // we don't have a deadlock deadlockWait = false; actualTimeout = timeout; startWaitTime = 0; willQuitWait = false; } else { willQuitWait = true; } } nextWaitingLock = control.getNextWaiter(waitingLock, willQuitWait, this); // If we were not woken by another then we have // timed out. Either deadlock out or timeout if (willQuitWait) { // Even if we deadlocked trying to get the lock, still // reget the latch so that client's need not know // latch was released. if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("DeadlockTrace")) { SanityManager.showTrace(new Throwable()); // The following dumps the lock table as it // exists at the time a timeout is about to // cause a deadlock exception to be thrown. lockDebug = DiagnosticUtil.toDiagString(waitingLock) + "\nGot deadlock/timeout, here's the table" + this.toDebugString(); } } if(!deadlockWait) { if( deadlockTrace ) { // want a copy of the LockTable and the time currentTime = System.currentTimeMillis(); timeoutLockTable = factory.makeVirtualLockTable(); } } } } // synchronized block // need to do this outside of the synchronized block as the // message text building (timeouts and deadlocks) may involve // getting locks to look up table names from identifiers. if (willQuitWait) { if (SanityManager.DEBUG) { if (lockDebug != null) { String type = (deadlockWait ? "deadlock:" : "timeout:"); SanityManager.DEBUG_PRINT( type, "wait on lockitem caused " + type + lockDebug); } } if(!deadlockWait) { if( deadlockTrace ) { //Turn ON derby.locks.deadlockTrace to build the lockTable. throw Timeout.buildException(waitingLock, timeoutLockTable, currentTime); } else { StandardException se = StandardException.newException( SQLState.LOCK_TIMEOUT); throw se; } } if (deadlockData != null) { throw Deadlock.buildException(factory, deadlockData); } } } finally { if (nextWaitingLock != null) { nextWaitingLock.wakeUp(Constants.WAITING_LOCK_GRANT); nextWaitingLock = null; } } if (actualTimeout != C_LockFactory.WAIT_FOREVER) { if (wakeupReason != Constants.WAITING_LOCK_IN_WAIT) earlyWakeupCount++; if (earlyWakeupCount > 5) { long now = System.currentTimeMillis(); if (startWaitTime != 0) { long sleepTime = now - startWaitTime; actualTimeout -= sleepTime; } startWaitTime = now; } } } // for(;;) } finally { if (deadlockTrace) { // I am out of the wait state, either I got my lock or I am the // one who is going to detect the deadlock, don't need the // stack trace anymore. lockTraces.remove(waitingLock); } } }
SanityManager.showTrace(new Throwable()); lockDebug =
if (SanityManager.DEBUG_ON("DeadlockTrace")) { SanityManager.showTrace(new Throwable()); lockDebug =
public Lock lockObject(Object compatabilitySpace, Lockable ref, Object qualifier, int timeout, Latch latch) throws StandardException { if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("memoryLeakTrace")) { if (size() > 1000) System.out.println("memoryLeakTrace:LockSet: " + size()); } } Control gc; LockControl control; Lock lockItem; String lockDebug = null; synchronized (this) { gc = getControl(ref); if (gc == null) { // object is not locked, can be granted Lock gl = new Lock(compatabilitySpace, ref, qualifier); gl.grant(); put(ref, gl); return gl; } control = gc.getLockControl(); if (control != gc) { put(ref, control); } if (SanityManager.DEBUG) { SanityManager.ASSERT(ref.equals(control.getLockable())); // ASSERT item is in the list if (getControl(control.getLockable()) != control) { SanityManager.THROWASSERT( "lockObject mismatched lock items " + getControl(control.getLockable()) + " " + control); } } lockItem = control.addLock(this, compatabilitySpace, qualifier); if (lockItem.getCount() != 0) { return lockItem; } if (timeout == C_LockFactory.NO_WAIT) { // remove all trace of lock control.giveUpWait(lockItem, this); if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("DeadlockTrace")) { SanityManager.showTrace(new Throwable()); // The following dumps the lock table as it // exists at the time a timeout is about to // cause a deadlock exception to be thrown. lockDebug = DiagnosticUtil.toDiagString(lockItem) + "\nCould not grant lock with zero timeout, here's the table" + this.toDebugString(); } } return null; } // this is where we need to release the latch if (latch != null) unlock(latch, 1); } // synchronized block boolean deadlockWait = false; int actualTimeout; if (timeout == C_LockFactory.WAIT_FOREVER) { // always check for deadlocks as there should not be any deadlockWait = true; if ((actualTimeout = deadlockTimeout) == C_LockFactory.WAIT_FOREVER) actualTimeout = Property.DEADLOCK_TIMEOUT_DEFAULT * 1000; } else { if (timeout == C_LockFactory.TIMED_WAIT) timeout = actualTimeout = waitTimeout; else actualTimeout = timeout; // five posible cases // i) timeout -1, deadlock -1 -> just wait forever, no deadlock check // ii) timeout >= 0, deadlock -1 -> just wait for timeout, no deadlock check // iii) timeout -1, deadlock >= 0 -> wait for deadlock, then deadlock check, then infinite timeout // iv) timeout >=0, deadlock < timeout -> wait for deadlock, then deadlock check, then wait for (timeout - deadlock) // v) timeout >=0, deadlock >= timeout -> just wait for timeout, no deadlock check if (deadlockTimeout >= 0) { if (actualTimeout < 0) { // infinite wait but perform a deadlock check first deadlockWait = true; actualTimeout = deadlockTimeout; } else if (deadlockTimeout < actualTimeout) { // deadlock wait followed by a timeout wait deadlockWait = true; actualTimeout = deadlockTimeout; // leave timeout as the remaining time timeout -= deadlockTimeout; } } } ActiveLock waitingLock = (ActiveLock) lockItem; lockItem = null; if (deadlockTrace) { // we want to keep a stack trace of this thread just before it goes // into wait state, no need to synchronized because Hashtable.put // is synchronized and the new throwable is local to this thread. lockTraces.put(waitingLock, new Throwable()); } int earlyWakeupCount = 0; long startWaitTime = 0; try {forever: for (;;) { byte wakeupReason = waitingLock.waitForGrant(actualTimeout); ActiveLock nextWaitingLock = null; Object[] deadlockData = null; try { boolean willQuitWait; Enumeration timeoutLockTable = null; long currentTime = 0; synchronized (this) { if (control.isGrantable(control.firstWaiter() == waitingLock, compatabilitySpace, qualifier)) { // Yes, we are granted, put us on the granted queue. control.grant(waitingLock); // Remove from the waiting queue & get next waiter nextWaitingLock = control.getNextWaiter(waitingLock, true, this); // this is where we need to re-obtain the latch, it's // safe to call this lockObject() which will get the // synchronization we already hold, because java allows // nested synchronization and it will be released // automatically if we have to wait if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } return waitingLock; } waitingLock.clearPotentiallyGranted(); // try again later willQuitWait = (wakeupReason != Constants.WAITING_LOCK_GRANT); StandardException deadlockException = null; if (((wakeupReason == Constants.WAITING_LOCK_IN_WAIT) && deadlockWait) || (wakeupReason == Constants.WAITING_LOCK_DEADLOCK)) { // check for a deadlock, even if we were woken up to because // we were selected as a victim we still check because the situation // may have changed. deadlockData = Deadlock.look(factory, this, control, waitingLock, wakeupReason); if (deadlockData == null) { // we don't have a deadlock deadlockWait = false; actualTimeout = timeout; startWaitTime = 0; willQuitWait = false; } else { willQuitWait = true; } } nextWaitingLock = control.getNextWaiter(waitingLock, willQuitWait, this); // If we were not woken by another then we have // timed out. Either deadlock out or timeout if (willQuitWait) { // Even if we deadlocked trying to get the lock, still // reget the latch so that client's need not know // latch was released. if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("DeadlockTrace")) { SanityManager.showTrace(new Throwable()); // The following dumps the lock table as it // exists at the time a timeout is about to // cause a deadlock exception to be thrown. lockDebug = DiagnosticUtil.toDiagString(waitingLock) + "\nGot deadlock/timeout, here's the table" + this.toDebugString(); } } if(!deadlockWait) { if( deadlockTrace ) { // want a copy of the LockTable and the time currentTime = System.currentTimeMillis(); timeoutLockTable = factory.makeVirtualLockTable(); } } } } // synchronized block // need to do this outside of the synchronized block as the // message text building (timeouts and deadlocks) may involve // getting locks to look up table names from identifiers. if (willQuitWait) { if (SanityManager.DEBUG) { if (lockDebug != null) { String type = (deadlockWait ? "deadlock:" : "timeout:"); SanityManager.DEBUG_PRINT( type, "wait on lockitem caused " + type + lockDebug); } } if(!deadlockWait) { if( deadlockTrace ) { //Turn ON derby.locks.deadlockTrace to build the lockTable. throw Timeout.buildException(waitingLock, timeoutLockTable, currentTime); } else { StandardException se = StandardException.newException( SQLState.LOCK_TIMEOUT); throw se; } } if (deadlockData != null) { throw Deadlock.buildException(factory, deadlockData); } } } finally { if (nextWaitingLock != null) { nextWaitingLock.wakeUp(Constants.WAITING_LOCK_GRANT); nextWaitingLock = null; } } if (actualTimeout != C_LockFactory.WAIT_FOREVER) { if (wakeupReason != Constants.WAITING_LOCK_IN_WAIT) earlyWakeupCount++; if (earlyWakeupCount > 5) { long now = System.currentTimeMillis(); if (startWaitTime != 0) { long sleepTime = now - startWaitTime; actualTimeout -= sleepTime; } startWaitTime = now; } } } // for(;;) } finally { if (deadlockTrace) { // I am out of the wait state, either I got my lock or I am the // one who is going to detect the deadlock, don't need the // stack trace anymore. lockTraces.remove(waitingLock); } } }
} } if(!deadlockWait)
} } if (deadlockTrace && (deadlockData == null)) { currentTime = System.currentTimeMillis(); timeoutLockTable = factory.makeVirtualLockTable(); } } } if (willQuitWait) { if (SanityManager.DEBUG)
public Lock lockObject(Object compatabilitySpace, Lockable ref, Object qualifier, int timeout, Latch latch) throws StandardException { if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("memoryLeakTrace")) { if (size() > 1000) System.out.println("memoryLeakTrace:LockSet: " + size()); } } Control gc; LockControl control; Lock lockItem; String lockDebug = null; synchronized (this) { gc = getControl(ref); if (gc == null) { // object is not locked, can be granted Lock gl = new Lock(compatabilitySpace, ref, qualifier); gl.grant(); put(ref, gl); return gl; } control = gc.getLockControl(); if (control != gc) { put(ref, control); } if (SanityManager.DEBUG) { SanityManager.ASSERT(ref.equals(control.getLockable())); // ASSERT item is in the list if (getControl(control.getLockable()) != control) { SanityManager.THROWASSERT( "lockObject mismatched lock items " + getControl(control.getLockable()) + " " + control); } } lockItem = control.addLock(this, compatabilitySpace, qualifier); if (lockItem.getCount() != 0) { return lockItem; } if (timeout == C_LockFactory.NO_WAIT) { // remove all trace of lock control.giveUpWait(lockItem, this); if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("DeadlockTrace")) { SanityManager.showTrace(new Throwable()); // The following dumps the lock table as it // exists at the time a timeout is about to // cause a deadlock exception to be thrown. lockDebug = DiagnosticUtil.toDiagString(lockItem) + "\nCould not grant lock with zero timeout, here's the table" + this.toDebugString(); } } return null; } // this is where we need to release the latch if (latch != null) unlock(latch, 1); } // synchronized block boolean deadlockWait = false; int actualTimeout; if (timeout == C_LockFactory.WAIT_FOREVER) { // always check for deadlocks as there should not be any deadlockWait = true; if ((actualTimeout = deadlockTimeout) == C_LockFactory.WAIT_FOREVER) actualTimeout = Property.DEADLOCK_TIMEOUT_DEFAULT * 1000; } else { if (timeout == C_LockFactory.TIMED_WAIT) timeout = actualTimeout = waitTimeout; else actualTimeout = timeout; // five posible cases // i) timeout -1, deadlock -1 -> just wait forever, no deadlock check // ii) timeout >= 0, deadlock -1 -> just wait for timeout, no deadlock check // iii) timeout -1, deadlock >= 0 -> wait for deadlock, then deadlock check, then infinite timeout // iv) timeout >=0, deadlock < timeout -> wait for deadlock, then deadlock check, then wait for (timeout - deadlock) // v) timeout >=0, deadlock >= timeout -> just wait for timeout, no deadlock check if (deadlockTimeout >= 0) { if (actualTimeout < 0) { // infinite wait but perform a deadlock check first deadlockWait = true; actualTimeout = deadlockTimeout; } else if (deadlockTimeout < actualTimeout) { // deadlock wait followed by a timeout wait deadlockWait = true; actualTimeout = deadlockTimeout; // leave timeout as the remaining time timeout -= deadlockTimeout; } } } ActiveLock waitingLock = (ActiveLock) lockItem; lockItem = null; if (deadlockTrace) { // we want to keep a stack trace of this thread just before it goes // into wait state, no need to synchronized because Hashtable.put // is synchronized and the new throwable is local to this thread. lockTraces.put(waitingLock, new Throwable()); } int earlyWakeupCount = 0; long startWaitTime = 0; try {forever: for (;;) { byte wakeupReason = waitingLock.waitForGrant(actualTimeout); ActiveLock nextWaitingLock = null; Object[] deadlockData = null; try { boolean willQuitWait; Enumeration timeoutLockTable = null; long currentTime = 0; synchronized (this) { if (control.isGrantable(control.firstWaiter() == waitingLock, compatabilitySpace, qualifier)) { // Yes, we are granted, put us on the granted queue. control.grant(waitingLock); // Remove from the waiting queue & get next waiter nextWaitingLock = control.getNextWaiter(waitingLock, true, this); // this is where we need to re-obtain the latch, it's // safe to call this lockObject() which will get the // synchronization we already hold, because java allows // nested synchronization and it will be released // automatically if we have to wait if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } return waitingLock; } waitingLock.clearPotentiallyGranted(); // try again later willQuitWait = (wakeupReason != Constants.WAITING_LOCK_GRANT); StandardException deadlockException = null; if (((wakeupReason == Constants.WAITING_LOCK_IN_WAIT) && deadlockWait) || (wakeupReason == Constants.WAITING_LOCK_DEADLOCK)) { // check for a deadlock, even if we were woken up to because // we were selected as a victim we still check because the situation // may have changed. deadlockData = Deadlock.look(factory, this, control, waitingLock, wakeupReason); if (deadlockData == null) { // we don't have a deadlock deadlockWait = false; actualTimeout = timeout; startWaitTime = 0; willQuitWait = false; } else { willQuitWait = true; } } nextWaitingLock = control.getNextWaiter(waitingLock, willQuitWait, this); // If we were not woken by another then we have // timed out. Either deadlock out or timeout if (willQuitWait) { // Even if we deadlocked trying to get the lock, still // reget the latch so that client's need not know // latch was released. if (latch != null) { lockObject( compatabilitySpace, latch.getLockable(), latch.getQualifier(), C_LockFactory.WAIT_FOREVER, (Latch) null); } if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("DeadlockTrace")) { SanityManager.showTrace(new Throwable()); // The following dumps the lock table as it // exists at the time a timeout is about to // cause a deadlock exception to be thrown. lockDebug = DiagnosticUtil.toDiagString(waitingLock) + "\nGot deadlock/timeout, here's the table" + this.toDebugString(); } } if(!deadlockWait) { if( deadlockTrace ) { // want a copy of the LockTable and the time currentTime = System.currentTimeMillis(); timeoutLockTable = factory.makeVirtualLockTable(); } } } } // synchronized block // need to do this outside of the synchronized block as the // message text building (timeouts and deadlocks) may involve // getting locks to look up table names from identifiers. if (willQuitWait) { if (SanityManager.DEBUG) { if (lockDebug != null) { String type = (deadlockWait ? "deadlock:" : "timeout:"); SanityManager.DEBUG_PRINT( type, "wait on lockitem caused " + type + lockDebug); } } if(!deadlockWait) { if( deadlockTrace ) { //Turn ON derby.locks.deadlockTrace to build the lockTable. throw Timeout.buildException(waitingLock, timeoutLockTable, currentTime); } else { StandardException se = StandardException.newException( SQLState.LOCK_TIMEOUT); throw se; } } if (deadlockData != null) { throw Deadlock.buildException(factory, deadlockData); } } } finally { if (nextWaitingLock != null) { nextWaitingLock.wakeUp(Constants.WAITING_LOCK_GRANT); nextWaitingLock = null; } } if (actualTimeout != C_LockFactory.WAIT_FOREVER) { if (wakeupReason != Constants.WAITING_LOCK_IN_WAIT) earlyWakeupCount++; if (earlyWakeupCount > 5) { long now = System.currentTimeMillis(); if (startWaitTime != 0) { long sleepTime = now - startWaitTime; actualTimeout -= sleepTime; } startWaitTime = now; } } } // for(;;) } finally { if (deadlockTrace) { // I am out of the wait state, either I got my lock or I am the // one who is going to detect the deadlock, don't need the // stack trace anymore. lockTraces.remove(waitingLock); } } }