use of java.sql.SQLWarning in project derby by apache.
the class DRDAConnThread method parseEXCSQLSETobjects.
/**
* Parse EXCSQLSET objects
* Objects
* TYPDEFNAM - Data type definition name - optional
* TYPDEFOVR - Type defintion overrides - optional
* SQLSTT - SQL Statement - required (a list of at least one)
*
* Objects may follow in one DSS or in several DSS chained together.
*
* @throws DRDAProtocolException
* @throws SQLException
*/
private void parseEXCSQLSETobjects() throws DRDAProtocolException, SQLException {
boolean gotSqlStt = false;
boolean hadUnrecognizedStmt = false;
DRDAStatement drdaStmt = database.getDefaultStatement();
drdaStmt.initialize();
do {
correlationID = reader.readDssHeader();
while (reader.moreDssData()) {
int codePoint = reader.readLengthAndCodePoint(false);
switch(codePoint) {
// optional
case CodePoint.TYPDEFNAM:
setStmtOrDbByteOrder(false, drdaStmt, parseTYPDEFNAM());
break;
// optional
case CodePoint.TYPDEFOVR:
parseTYPDEFOVR(drdaStmt);
break;
// required
case CodePoint.SQLSTT:
String sqlStmt = parseEncodedString();
if (sqlStmt != null) {
// then we have at least one SQL Statement.
gotSqlStt = true;
}
if (sqlStmt.startsWith(TIMEOUT_STATEMENT)) {
String timeoutString = sqlStmt.substring(TIMEOUT_STATEMENT.length());
pendingStatementTimeout = Integer.parseInt(timeoutString);
break;
}
if (canIgnoreStmt(sqlStmt)) {
// hadUnrecognizedStmt = true;
break;
}
if (SanityManager.DEBUG) {
trace("sqlStmt = " + sqlStmt);
}
// initialize statement for reuse
drdaStmt.initialize();
drdaStmt.getStatement().clearWarnings();
try {
drdaStmt.getStatement().executeUpdate(sqlStmt);
} catch (SQLException e) {
// warning later), but don't interfere otherwise.
if (e.getSQLState().equals(SYNTAX_ERR)) {
hadUnrecognizedStmt = true;
} else {
// something else; assume it's serious.
throw e;
}
}
break;
default:
invalidCodePoint(codePoint);
}
}
} while (reader.isChainedWithSameID());
// SQLSTT is required.
if (!gotSqlStt) {
missingCodePoint(CodePoint.SQLSTT);
}
// EXCSQLSET statement to fail).
if (hadUnrecognizedStmt) {
SQLWarning warn = new SQLWarning("One or more SET statements " + "not recognized.", "01000");
throw warn;
}
// end if.
}
use of java.sql.SQLWarning in project derby by apache.
the class DRDAConnThread method writeFDODTA.
/**
* This routine places some data into the current QRYDTA block using
* FDODTA (Formatted Data Object DaTA rules).
*
* There are 3 basic types of processing flow for this routine:
* - In normal non-rowset, non-scrollable cursor flow, this routine
* places a single row into the QRYDTA block and returns TRUE,
* indicating that the caller can call us back to place another
* row into the result set if he wishes. (The caller may need to
* send Externalized Data, which would be a reason for him NOT to
* place any more rows into the QRYDTA).
* - In ROWSET processing, this routine places an entire ROWSET of
* rows into the QRYDTA block and returns FALSE, indicating that
* the QRYDTA block is full and should now be sent.
* - In callable statement processing, this routine places the
* results from the output parameters of the called procedure into
* the QRYDTA block. This code path is really dramatically
* different from the other two paths and shares only a very small
* amount of common code in this routine.
*
* In all cases, it is possible that the data we wish to return may
* not fit into the QRYDTA block, in which case we call splitQRYDTA
* to split the data and remember the remainder data in the result set.
* Splitting the data is relatively rare in the normal cursor case,
* because our caller (writeQRYDTA) uses a coarse estimation
* technique to avoid calling us if he thinks a split is likely.
*
* The overall structure of this routine is implemented as two
* loops:
* - the outer "do ... while ... " loop processes a ROWSET, one row
* at a time. For non-ROWSET cursors, and for callable statements,
* this loop executes only once.
* - the inner "for ... i < numCols ..." loop processes each column
* in the current row, or each output parmeter in the procedure.
*
* Most column data is written directly inline in the QRYDTA block.
* Some data, however, is written as Externalized Data. This is
* commonly used for Large Objects. In that case, an Externalized
* Data Pointer is written into the QRYDTA block, and the actual
* data flows in separate EXTDTA blocks which are returned
* after this QRYDTA block.
*/
private boolean writeFDODTA(DRDAStatement stmt) throws DRDAProtocolException, SQLException {
boolean hasdata;
int blksize = stmt.getBlksize() > 0 ? stmt.getBlksize() : CodePoint.QRYBLKSZ_MAX;
long rowCount = 0;
ResultSet rs = null;
boolean moreData = (stmt.getQryprctyp() == CodePoint.LMTBLKPRC);
int numCols;
if (!stmt.needsToSendParamData) {
rs = stmt.getResultSet();
}
if (rs != null) {
numCols = stmt.getNumRsCols();
if (stmt.isScrollable()) {
hasdata = positionCursor(stmt, rs);
} else {
hasdata = rs.next();
}
} else // it's for a CallableStatement
{
hasdata = stmt.hasOutputParams();
numCols = stmt.getDrdaParamCount();
}
do {
if (!hasdata) {
doneData(stmt, rs);
moreData = false;
return moreData;
}
// Send ResultSet warnings if there are any
SQLWarning sqlw = (rs != null) ? rs.getWarnings() : null;
if (rs != null) {
rs.clearWarnings();
}
// warning should not reach API level.
if (rs != null && rs.rowUpdated()) {
SQLWarning w = new SQLWarning("", SQLState.ROW_UPDATED, ExceptionSeverity.WARNING_SEVERITY);
if (sqlw != null) {
sqlw.setNextWarning(w);
} else {
sqlw = w;
}
}
// SQLSTATE of 02502
if (rs != null && rs.rowDeleted()) {
SQLWarning w = new SQLWarning("", SQLState.ROW_DELETED, ExceptionSeverity.WARNING_SEVERITY);
if (sqlw != null) {
sqlw.setNextWarning(w);
} else {
sqlw = w;
}
}
// Save the position where we start writing the warnings in case
// we need to add more warnings later.
final int sqlcagrpStart = writer.getBufferPosition();
if (sqlw == null) {
writeSQLCAGRP(nullSQLState, 0, -1, -1);
} else {
writeSQLCAGRP(sqlw, 1, -1);
}
// Save the position right after the warnings so we know where to
// insert more warnings later.
final int sqlcagrpEnd = writer.getBufferPosition();
// if we were asked not to return data, mark QRYDTA null; do not
// return yet, need to make rowCount right
// if the row has been deleted return QRYDTA null (delete hole)
boolean noRetrieveRS = (rs != null && (!stmt.getQryrtndta() || rs.rowDeleted()));
if (noRetrieveRS) {
// QRYDTA null indicator: IS NULL
writer.writeByte(0xFF);
} else {
// QRYDTA null indicator: not null
writer.writeByte(0);
}
for (int i = 1; i <= numCols; i++) {
if (noRetrieveRS) {
break;
}
int drdaType;
int ndrdaType;
int precision;
int scale;
boolean valNull;
if (rs != null) {
drdaType = stmt.getRsDRDAType(i) & 0xff;
precision = stmt.getRsPrecision(i);
scale = stmt.getRsScale(i);
ndrdaType = drdaType | 1;
if (SanityManager.DEBUG) {
trace("!!drdaType = " + java.lang.Integer.toHexString(drdaType) + " precision=" + precision + " scale = " + scale);
}
switch(ndrdaType) {
case DRDAConstants.DRDA_TYPE_NLOBBYTES:
case DRDAConstants.DRDA_TYPE_NLOBCMIXED:
EXTDTAInputStream extdtaStream = EXTDTAInputStream.getEXTDTAStream(rs, i, drdaType);
writeFdocaVal(i, extdtaStream, drdaType, precision, scale, extdtaStream.isNull(), stmt, false);
break;
case DRDAConstants.DRDA_TYPE_NINTEGER:
int ival = rs.getInt(i);
valNull = rs.wasNull();
if (SanityManager.DEBUG) {
trace("====== writing int: " + ival + " is null: " + valNull);
}
writeNullability(drdaType, valNull);
if (!valNull) {
writer.writeInt(ival);
}
break;
case DRDAConstants.DRDA_TYPE_NSMALL:
short sval = rs.getShort(i);
valNull = rs.wasNull();
if (SanityManager.DEBUG) {
trace("====== writing small: " + sval + " is null: " + valNull);
}
writeNullability(drdaType, valNull);
if (!valNull) {
writer.writeShort(sval);
}
break;
case DRDAConstants.DRDA_TYPE_NINTEGER8:
long lval = rs.getLong(i);
valNull = rs.wasNull();
if (SanityManager.DEBUG) {
trace("====== writing long: " + lval + " is null: " + valNull);
}
writeNullability(drdaType, valNull);
if (!valNull) {
writer.writeLong(lval);
}
break;
case DRDAConstants.DRDA_TYPE_NFLOAT4:
float fval = rs.getFloat(i);
valNull = rs.wasNull();
if (SanityManager.DEBUG) {
trace("====== writing float: " + fval + " is null: " + valNull);
}
writeNullability(drdaType, valNull);
if (!valNull) {
writer.writeFloat(fval);
}
break;
case DRDAConstants.DRDA_TYPE_NFLOAT8:
double dval = rs.getDouble(i);
valNull = rs.wasNull();
if (SanityManager.DEBUG) {
trace("====== writing double: " + dval + " is null: " + valNull);
}
writeNullability(drdaType, valNull);
if (!valNull) {
writer.writeDouble(dval);
}
break;
case DRDAConstants.DRDA_TYPE_NCHAR:
case DRDAConstants.DRDA_TYPE_NVARCHAR:
case DRDAConstants.DRDA_TYPE_NVARMIX:
case DRDAConstants.DRDA_TYPE_NLONG:
case DRDAConstants.DRDA_TYPE_NLONGMIX:
String valStr = rs.getString(i);
if (SanityManager.DEBUG) {
trace("====== writing char/varchar/mix :" + valStr + ":");
}
writeFdocaVal(i, valStr, drdaType, precision, scale, rs.wasNull(), stmt, false);
break;
default:
Object val = getObjectForWriteFdoca(rs, i, drdaType);
writeFdocaVal(i, val, drdaType, precision, scale, rs.wasNull(), stmt, false);
}
} else {
drdaType = stmt.getParamDRDAType(i) & 0xff;
precision = stmt.getParamPrecision(i);
scale = stmt.getParamScale(i);
if (stmt.isOutputParam(i)) {
int[] outlen = new int[1];
drdaType = FdocaConstants.mapJdbcTypeToDrdaType(stmt.getOutputParamType(i), true, appRequester, outlen);
precision = stmt.getOutputParamPrecision(i);
scale = stmt.getOutputParamScale(i);
if (SanityManager.DEBUG) {
trace("***getting Object " + i);
}
Object val = getObjectForWriteFdoca((CallableStatement) stmt.ps, i, drdaType);
valNull = (val == null);
writeFdocaVal(i, val, drdaType, precision, scale, valNull, stmt, true);
} else {
writeFdocaVal(i, null, drdaType, precision, scale, true, stmt, true);
}
}
}
DataTruncation truncated = stmt.getTruncationWarnings();
if (truncated != null) {
// Some of the data was truncated, so we need to add a
// truncation warning. Save a copy of the row data, then move
// back to the SQLCAGRP section and overwrite it with the new
// warnings, and finally re-insert the row data after the new
// SQLCAGRP section.
byte[] data = writer.getBufferContents(sqlcagrpEnd);
writer.setBufferPosition(sqlcagrpStart);
if (sqlw != null) {
truncated.setNextWarning(sqlw);
}
writeSQLCAGRP(truncated, 1, -1);
writer.writeBytes(data);
stmt.clearTruncationWarnings();
}
// does all this fit in one QRYDTA
if (writer.getDSSLength() > blksize) {
splitQRYDTA(stmt, blksize);
return false;
}
if (rs == null) {
return moreData;
}
// get the next row
rowCount++;
if (rowCount < stmt.getQryrowset()) {
hasdata = rs.next();
} else /*(1) scrollable we return at most a row set; OR (2) no retrieve data
*/
if (stmt.isScrollable() || noRetrieveRS) {
moreData = false;
}
} while (hasdata && rowCount < stmt.getQryrowset());
// for non scrollable cursors
if (!stmt.isScrollable()) {
stmt.rowCount += rowCount;
}
if (!hasdata) {
doneData(stmt, rs);
moreData = false;
}
if (!stmt.isScrollable()) {
stmt.setHasdata(hasdata);
}
return moreData;
}
use of java.sql.SQLWarning in project derby by apache.
the class BrokeredConnection method statementHoldabilityCheck.
/*
** Methods private to the class.
*/
/**
* Check the result set holdability when creating a statement
* object. Section 16.1.3.1 of JDBC 4.0 (proposed final draft)
* says the driver may change the holdabilty and add a SQLWarning
* to the Connection object.
*
* This work-in-progress implementation throws an exception
* to match the old behaviour just as part of incremental development.
*/
final int statementHoldabilityCheck(int resultSetHoldability) throws SQLException {
int holdability = control.checkHoldCursors(resultSetHoldability, true);
if (holdability != resultSetHoldability) {
SQLWarning w = SQLWarningFactory.newSQLWarning(SQLState.HOLDABLE_RESULT_SET_NOT_AVAILABLE);
addWarning(w);
}
return holdability;
}
use of java.sql.SQLWarning in project derby by apache.
the class BasicNoPutResultSetImpl method getWarnings.
public final SQLWarning getWarnings() {
SQLWarning w = warnings;
warnings = null;
return w;
}
use of java.sql.SQLWarning in project debezium by debezium.
the class PostgresReplicationConnection method createReplicationStream.
private ReplicationStream createReplicationStream(final LogSequenceNumber lsn) throws SQLException {
PGReplicationStream s;
try {
s = startPgReplicationStream(lsn, plugin.forceRds() ? messageDecoder::optionsWithoutMetadata : messageDecoder::optionsWithMetadata);
messageDecoder.setContainsMetadata(plugin.forceRds() ? false : true);
} catch (PSQLException e) {
if (e.getMessage().matches("(?s)ERROR: option .* is unknown.*")) {
// It is possible we are connecting to an old wal2json plug-in
LOGGER.warn("Could not register for streaming with metadata in messages, falling back to messages without metadata");
s = startPgReplicationStream(lsn, messageDecoder::optionsWithoutMetadata);
messageDecoder.setContainsMetadata(false);
} else if (e.getMessage().matches("(?s)ERROR: requested WAL segment .* has already been removed.*")) {
LOGGER.error("Cannot rewind to last processed WAL position", e);
throw new ConnectException("The offset to start reading from has been removed from the database write-ahead log. Create a new snapshot and consider setting of PostgreSQL parameter wal_keep_segments = 0.");
} else {
throw e;
}
}
final PGReplicationStream stream = s;
final long lsnLong = lsn.asLong();
return new ReplicationStream() {
private static final int CHECK_WARNINGS_AFTER_COUNT = 100;
private int warningCheckCounter = CHECK_WARNINGS_AFTER_COUNT;
// make sure this is volatile since multiple threads may be interested in this value
private volatile LogSequenceNumber lastReceivedLSN;
@Override
public void read(ReplicationMessageProcessor processor) throws SQLException, InterruptedException {
ByteBuffer read = stream.read();
// the lsn we started from is inclusive, so we need to avoid sending back the same message twice
if (lsnLong >= stream.getLastReceiveLSN().asLong()) {
return;
}
deserializeMessages(read, processor);
}
@Override
public void readPending(ReplicationMessageProcessor processor) throws SQLException, InterruptedException {
ByteBuffer read = stream.readPending();
// the lsn we started from is inclusive, so we need to avoid sending back the same message twice
if (read == null || lsnLong >= stream.getLastReceiveLSN().asLong()) {
return;
}
deserializeMessages(read, processor);
}
private void deserializeMessages(ByteBuffer buffer, ReplicationMessageProcessor processor) throws SQLException, InterruptedException {
lastReceivedLSN = stream.getLastReceiveLSN();
messageDecoder.processMessage(buffer, processor, typeRegistry);
}
@Override
public void close() throws SQLException {
processWarnings(true);
stream.close();
}
@Override
public void flushLastReceivedLsn() throws SQLException {
if (lastReceivedLSN == null) {
// nothing to flush yet, since we haven't read anything...
return;
}
doFlushLsn(lastReceivedLSN);
}
@Override
public void flushLsn(long lsn) throws SQLException {
doFlushLsn(LogSequenceNumber.valueOf(lsn));
}
private void doFlushLsn(LogSequenceNumber lsn) throws SQLException {
stream.setFlushedLSN(lsn);
stream.setAppliedLSN(lsn);
stream.forceUpdateStatus();
}
@Override
public Long lastReceivedLsn() {
return lastReceivedLSN != null ? lastReceivedLSN.asLong() : null;
}
private void processWarnings(final boolean forced) throws SQLException {
if (--warningCheckCounter == 0 || forced) {
warningCheckCounter = CHECK_WARNINGS_AFTER_COUNT;
for (SQLWarning w = connection().getWarnings(); w != null; w = w.getNextWarning()) {
LOGGER.debug("Server-side message: '{}', state = {}, code = {}", w.getMessage(), w.getSQLState(), w.getErrorCode());
}
}
}
};
}
Aggregations