use of java.sql.BatchUpdateException in project ignite by apache.
the class DmlStatementsProcessor method doInsertBatched.
/**
* Execute INSERT statement plan.
*
* @param plan Plan to execute.
* @param cursor Cursor to take inserted data from. I.e. list of batch arguments for each query.
* @param pageSize Batch size for streaming, anything <= 0 for single page operations.
* @return Number of items affected.
* @throws IgniteCheckedException if failed, particularly in case of duplicate keys.
*/
private List<UpdateResult> doInsertBatched(UpdatePlan plan, List<List<List<?>>> cursor, int pageSize) throws IgniteCheckedException {
GridCacheContext cctx = plan.cacheContext();
DmlBatchSender snd = new DmlBatchSender(cctx, pageSize, cursor.size());
int rowNum = 0;
SQLException resEx = null;
for (List<List<?>> qryRow : cursor) {
for (List<?> row : qryRow) {
try {
final IgniteBiTuple keyValPair = plan.processRow(row);
snd.add(keyValPair.getKey(), new InsertEntryProcessor(keyValPair.getValue()), rowNum);
} catch (Exception e) {
String sqlState;
int code;
if (e instanceof IgniteSQLException) {
sqlState = ((IgniteSQLException) e).sqlState();
code = ((IgniteSQLException) e).statusCode();
} else {
sqlState = SqlStateCode.INTERNAL_ERROR;
code = IgniteQueryErrorCode.UNKNOWN;
}
resEx = chainException(resEx, new SQLException(e.getMessage(), sqlState, code, e));
snd.setFailed(rowNum);
}
}
rowNum++;
}
try {
snd.flush();
} catch (Exception e) {
resEx = chainException(resEx, new SQLException(e.getMessage(), SqlStateCode.INTERNAL_ERROR, IgniteQueryErrorCode.UNKNOWN, e));
}
resEx = chainException(resEx, snd.error());
if (!F.isEmpty(snd.failedKeys())) {
SQLException e = new SQLException("Failed to INSERT some keys because they are already in cache [keys=" + snd.failedKeys() + ']', SqlStateCode.CONSTRAINT_VIOLATION, DUPLICATE_KEY);
resEx = chainException(resEx, e);
}
if (resEx != null) {
BatchUpdateException e = new BatchUpdateException(resEx.getMessage(), resEx.getSQLState(), resEx.getErrorCode(), snd.perRowCounterAsArray(), resEx);
throw new IgniteCheckedException(e);
}
int[] cntPerRow = snd.perRowCounterAsArray();
List<UpdateResult> res = new ArrayList<>(cntPerRow.length);
for (int i = 0; i < cntPerRow.length; i++) {
int cnt = cntPerRow[i];
res.add(new UpdateResult(cnt, X.EMPTY_OBJECT_ARRAY));
}
return res;
}
use of java.sql.BatchUpdateException in project derby by apache.
the class BatchUpdateTest method testTransactionErrorBatch.
// try transaction error, in this particular case time out while
// getting the lock
public void testTransactionErrorBatch() throws SQLException {
// conn is just default connection
Connection conn = getConnection();
Connection conn2 = openDefaultConnection();
conn.setAutoCommit(false);
conn2.setAutoCommit(false);
Statement stmt = conn.createStatement();
Statement stmt2 = conn2.createStatement();
int[] updateCount = null;
println("Negative Statement: statement testing time out" + " while getting the lock in the batch");
stmt.execute("insert into t1 values(1)");
stmt2.execute("insert into t1 values(2)");
stmt.addBatch("update t1 set c1=3 where c1=2");
stmt2.addBatch("update t1 set c1=4 where c1=1");
try {
stmt.executeBatch();
fail("Batch is expected to fail");
updateCount = stmt2.executeBatch();
} catch (BatchUpdateException bue) {
/* Ensure the exception is time out while getting lock */
if (usingEmbedded())
assertSQLState("40XL1", bue);
else if (usingDerbyNetClient())
assertSQLState("XJ208", bue);
updateCount = ((BatchUpdateException) bue).getUpdateCounts();
if (updateCount != null) {
if (usingEmbedded())
assertEquals("first statement in the batch caused time out" + " while getting the lock, there should be no update count", 0, updateCount.length);
else if (usingDerbyNetClient())
/* first statement in the batch caused time out while getting
* the lock, there should be 1 update count of -3 */
assertBatchUpdateCounts(new int[] { -3 }, updateCount);
}
}
conn.rollback();
conn2.rollback();
stmt.clearBatch();
stmt2.clearBatch();
stmt.close();
stmt2.close();
commit();
conn2.close();
}
use of java.sql.BatchUpdateException in project derby by apache.
the class PrepareStatementTest method testLargeBatch.
/**
* Test large batch sizes for Statement.addBatch and
* Statement.executeBatch. (This is a test for Jira 428.) Currently,
* there is a hard DRDA limit of 65535 statements per batch (prior to
* DERBY-428, the server failed at around 9000 statements). The different
* JDBC clients support slightly lower limits: the Network Client supports
* 65534 statements in a single batch, the DB2JCC driver v2.4 supports
* 65532 statements, the DB2JCC driver v2.6 supports 32765 statements.
* This test just verifies that a batch of 32765 statements works, and
* that a batch of 100000 statements gets a BatchUpdateException from the
* Network Client.
*/
public void testLargeBatch() throws Exception {
Statement stmt = createStatement();
stmt.execute("create table jira428 (i integer)");
getConnection().setAutoCommit(false);
PreparedStatement ps = prepareStatement("insert into jira428 values (?)");
for (int i = 0; i < 32765; ++i) {
ps.setInt(1, i);
ps.addBatch();
}
ps.executeBatch();
ps.close();
commit();
// The below cannot be run as part of the test for the JCC client
// because the exception forces the connection closed. For
// DerbyNetClient, it's a clean exception that we can catch and
// recover from, so we test that code path:
ps = prepareStatement("insert into jira428 values (?)");
for (int i = 0; i < 100000; ++i) {
ps.setInt(1, i);
ps.addBatch();
}
try {
ps.executeBatch();
assertFalse("Expected exception when >65534 statements per batch", usingDerbyNetClient());
} catch (BatchUpdateException bue) {
assertSQLState("XJ116", bue);
assertFalse("Unexpected exception in embedded framework", usingEmbedded());
}
ps.close();
commit();
}
use of java.sql.BatchUpdateException in project derby by apache.
the class EmbedStatement method executeLargeBatch.
/**
* JDBC 4.2
*
* Submit a batch of commands to the database for execution.
* This method is optional. For use with
* statements which may touch more than Integer.MAX_VALUE rows.
*/
public long[] executeLargeBatch() throws SQLException {
checkExecStatus();
synchronized (getConnectionSynchronization()) {
setupContextStack();
int i = 0;
// As per the jdbc 2.0 specs, close the statement object's current resultset
// if one is open.
// Are there results?
// outside of the lower try/finally since results will
// setup and restore themselves.
clearResultSets();
Vector<Object> stmts = batchStatements;
batchStatements = null;
int size;
if (stmts == null)
size = 0;
else
size = stmts.size();
long[] returnUpdateCountForBatch = new long[size];
SQLException sqle;
try {
for (; i < size; i++) {
// If we saw an interrupt, stop execution of batch now.
// throwIf will likely only throw after at least one stm
// has been executed, since first time around we probably
// didn't do anything to notice interrupts yet.
InterruptStatus.throwIf(lcc);
if (executeBatchElement(stmts.get(i)))
throw newSQLException(SQLState.RESULTSET_RETURN_NOT_ALLOWED);
returnUpdateCountForBatch[i] = getLargeUpdateCount();
}
InterruptStatus.restoreIntrFlagIfSeen(lcc);
return returnUpdateCountForBatch;
} catch (StandardException se) {
sqle = handleException(se);
} catch (SQLException sqle2) {
sqle = sqle2;
} finally {
restoreContextStack();
}
long[] successfulUpdateCount = Arrays.copyOf(returnUpdateCountForBatch, i);
throw new BatchUpdateException(sqle.getMessage(), sqle.getSQLState(), sqle.getErrorCode(), successfulUpdateCount, sqle);
}
}
use of java.sql.BatchUpdateException in project jackrabbit-oak by apache.
the class RDBDocumentStoreJDBCTest method batchFailingInsertResult.
@Test
public void batchFailingInsertResult() throws SQLException {
String table = ((RDBDocumentStore) super.ds).getTable(Collection.NODES).getName();
Connection con = super.rdbDataSource.getConnection();
con.setReadOnly(false);
try {
// remove key-1, key-2, key-3
PreparedStatement st = con.prepareStatement("DELETE FROM " + table + " WHERE ID in (?, ?, ?)");
setIdInStatement(st, 1, "key-1");
setIdInStatement(st, 2, "key-2");
setIdInStatement(st, 3, "key-3");
st.executeUpdate();
st.close();
con.commit();
removeMe.add("key-3");
// insert key-3
st = con.prepareStatement("INSERT INTO " + table + " (id) VALUES (?)");
setIdInStatement(st, 1, "key-3");
st.executeUpdate();
st.close();
con.commit();
removeMe.add("key-1");
removeMe.add("key-2");
// try to insert key-1, key-2, key-3
PreparedStatement batchSt = con.prepareStatement("INSERT INTO " + table + " (id) VALUES (?)");
setIdInStatement(batchSt, 1, "key-1");
batchSt.addBatch();
setIdInStatement(batchSt, 1, "key-2");
batchSt.addBatch();
setIdInStatement(batchSt, 1, "key-3");
batchSt.addBatch();
int[] batchResult = null;
try {
batchSt.executeBatch();
fail("Batch operation should fail");
} catch (BatchUpdateException e) {
batchResult = e.getUpdateCounts();
}
batchSt.close();
con.commit();
// System.out.println(super.dsname + " " + Arrays.toString(batchResult));
boolean partialSuccess = false;
if (batchResult.length >= 2) {
if (isSuccess(batchResult[0]) && isSuccess(batchResult[1])) {
partialSuccess = true;
}
}
if (batchResult.length == 3) {
assertTrue("Row already exists, shouldn't be inserted.", !isSuccess(batchResult[2]));
}
PreparedStatement rst = con.prepareStatement("SELECT id FROM " + table + " WHERE id in (?, ?, ?)");
setIdInStatement(rst, 1, "key-1");
setIdInStatement(rst, 2, "key-2");
setIdInStatement(rst, 3, "key-3");
ResultSet results = rst.executeQuery();
Set<String> ids = new HashSet<String>();
while (results.next()) {
ids.add(getIdFromRS(results, 1));
}
results.close();
rst.close();
if (partialSuccess) {
assertEquals("Some of the rows weren't inserted.", of("key-1", "key-2", "key-3"), ids);
} else {
assertEquals("Failure reported, but rows inserted.", of("key-3"), ids);
}
} finally {
con.close();
}
}
Aggregations