use of java.sql.BatchUpdateException in project jackrabbit-oak by apache.
the class RDBDocumentStoreJDBC method update.
/**
* Update a list of documents using JDBC batches. Some of the updates may fail because of the concurrent
* changes. The method returns a set of successfully updated documents. It's the caller responsibility
* to compare the set with the list of input documents, find out which documents conflicted and take
* appropriate action.
* <p>
* If the {@code upsert} parameter is set to true, the method will also try to insert new documents, those
* which modcount equals to 1.
* <p>
* The order of applying updates will be different than order of the passed list, so there shouldn't be two
* updates related to the same document. An {@link IllegalArgumentException} will be thrown if there are.
*
* @param connection JDBC connection
* @param tmd Table metadata
* @param documents List of documents to update
* @param upsert Insert new documents
* @return set containing ids of successfully updated documents
* @throws SQLException
*/
public <T extends Document> Set<String> update(Connection connection, RDBTableMetaData tmd, List<T> documents, boolean upsert) throws SQLException {
assertNoDuplicatedIds(documents);
Set<String> successfulUpdates = new HashSet<String>();
List<String> updatedKeys = new ArrayList<String>();
int[] batchResults = new int[0];
PreparedStatement stmt = connection.prepareStatement("update " + tmd.getName() + " set MODIFIED = ?, HASBINARY = ?, DELETEDONCE = ?, MODCOUNT = ?, CMODCOUNT = ?, DSIZE = ?, DATA = ?, " + (tmd.hasVersion() ? (" VERSION = " + SCHEMAVERSION + ", ") : "") + "BDATA = ? where ID = ? and MODCOUNT = ?");
try {
boolean batchIsEmpty = true;
for (T document : sortDocuments(documents)) {
Long modcount = (Long) document.get(MODCOUNT);
if (modcount == 1) {
// This is a new document. We'll deal with the inserts later.
continue;
}
String data = this.ser.asString(document, tmd.getColumnOnlyProperties());
Number hasBinary = (Number) document.get(NodeDocument.HAS_BINARY_FLAG);
Boolean deletedOnce = (Boolean) document.get(NodeDocument.DELETED_ONCE);
Long cmodcount = (Long) document.get(COLLISIONSMODCOUNT);
int si = 1;
stmt.setObject(si++, document.get(MODIFIED), Types.BIGINT);
stmt.setObject(si++, hasBinaryAsNullOrInteger(hasBinary), Types.SMALLINT);
stmt.setObject(si++, deletedOnceAsNullOrInteger(deletedOnce), Types.SMALLINT);
stmt.setObject(si++, modcount, Types.BIGINT);
stmt.setObject(si++, cmodcount == null ? Long.valueOf(0) : cmodcount, Types.BIGINT);
stmt.setObject(si++, data.length(), Types.BIGINT);
if (data.length() < tmd.getDataLimitInOctets() / CHAR2OCTETRATIO) {
stmt.setString(si++, data);
stmt.setBinaryStream(si++, null, 0);
} else {
stmt.setString(si++, "\"blob\"");
byte[] bytes = asBytes(data);
stmt.setBytes(si++, bytes);
}
setIdInStatement(tmd, stmt, si++, document.getId());
stmt.setObject(si++, modcount - 1, Types.BIGINT);
stmt.addBatch();
updatedKeys.add(document.getId());
batchIsEmpty = false;
}
if (!batchIsEmpty) {
batchResults = stmt.executeBatch();
connection.commit();
}
} catch (BatchUpdateException ex) {
LOG.debug("Some of the batch updates failed", ex);
batchResults = ex.getUpdateCounts();
} finally {
stmt.close();
}
for (int i = 0; i < batchResults.length; i++) {
int result = batchResults[i];
if (result == 1 || result == Statement.SUCCESS_NO_INFO) {
successfulUpdates.add(updatedKeys.get(i));
}
}
if (upsert) {
List<T> toBeInserted = new ArrayList<T>(documents.size());
for (T doc : documents) {
if ((Long) doc.get(MODCOUNT) == 1) {
toBeInserted.add(doc);
}
}
if (!toBeInserted.isEmpty()) {
for (String id : insert(connection, tmd, toBeInserted)) {
successfulUpdates.add(id);
}
}
}
return successfulUpdates;
}
use of java.sql.BatchUpdateException in project sqlite-jna by gwenn.
the class SqliteStatementTest method testExecuteBatch.
@Test
public void testExecuteBatch() throws Exception {
try (Statement stmt = conn.createStatement()) {
stmt.addBatch("INSERT INTO test_table VALUES (2, 'testing')");
stmt.addBatch("ATTACH ':memory:' as db2");
// stmt.addBatch("SELECT * FROM test_table");
stmt.addBatch("INSERT INTO test_table VALUES (3, 'testing again')");
assertArrayEquals(new int[] { 1, 0, /*, Statement.SUCCESS_NO_INFO*/
1 }, stmt.executeBatch());
final ResultSet catalogs = conn.getMetaData().getCatalogs();
assertArrayEquals(BATCH_ATTACH_RESULT, formatResultSet(catalogs));
catalogs.close();
assertArrayEquals(new int[0], stmt.executeBatch());
stmt.addBatch("INSERT INTO test_table VALUES (4, 'testing again too')");
stmt.addBatch("INSERT INTO test_table VALUES (4, 'testing again too')");
try {
stmt.executeBatch();
fail("executeBatch should not have succeeded");
} catch (BatchUpdateException e) {
}
assertArrayEquals(new int[0], stmt.executeBatch());
final String[] tableDump = { "|1|test|", "|2|testing|", "|3|testing again|", "|4|testing again too|" };
try (ResultSet rs = stmt.executeQuery("SELECT * FROM test_table")) {
assertArrayEquals(tableDump, formatResultSet(rs));
}
stmt.addBatch("INSERT INTO test_table VALUES (2, 'testing')");
stmt.clearBatch();
assertArrayEquals(new int[0], stmt.executeBatch());
}
}
use of java.sql.BatchUpdateException in project sqlite-jna by gwenn.
the class PrepStmt method executeBatch.
@Override
public int[] executeBatch() throws SQLException {
final org.sqlite.Stmt stmt = getStmt();
batching = false;
if (batch == null) {
// FIXME
return new int[0];
}
final int size = batch.size();
SQLException cause = null;
Object[] params;
final int[] changes = new int[size];
for (int i = 0; i < size; ++i) {
try {
params = batch.get(i);
if (params != null) {
for (int j = 0; j < params.length; j++) {
stmt.bindByIndex(j + 1, params[j]);
}
}
changes[i] = executeUpdate();
} catch (SQLException e) {
if (cause != null) {
e.setNextException(cause);
}
cause = e;
changes[i] = EXECUTE_FAILED;
}
}
clearBatch();
if (cause != null) {
throw new BatchUpdateException("batch failed", changes, cause);
}
return changes;
}
use of java.sql.BatchUpdateException in project sqlite-jna by gwenn.
the class Stmt method executeBatch.
@Override
public int[] executeBatch() throws SQLException {
checkOpen();
if (batch == null) {
// FIXME
return new int[0];
}
final int size = batch.size();
SQLException cause = null;
final int[] changes = new int[size];
for (int i = 0; i < size; ++i) {
try {
changes[i] = executeUpdate(batch.get(i));
} catch (SQLException e) {
if (cause != null) {
e.setNextException(cause);
}
cause = e;
changes[i] = EXECUTE_FAILED;
}
}
clearBatch();
if (cause != null) {
throw new BatchUpdateException("batch failed", changes, cause);
}
return changes;
}
use of java.sql.BatchUpdateException in project com.revolsys.open by revolsys.
the class JdbcRecordWriter method write.
@Override
public synchronized void write(final Record record) {
try {
final JdbcRecordDefinition recordDefinition = getRecordDefinition(record);
final RecordStore recordStore = recordDefinition.getRecordStore();
final RecordState state = record.getState();
if (recordStore != this.recordStore) {
if (state != RecordState.DELETED) {
insert(recordDefinition, record);
}
} else {
switch(state) {
case NEW:
insert(recordDefinition, record);
break;
case MODIFIED:
update(recordDefinition, record);
break;
case PERSISTED:
// No action required
break;
case DELETED:
delete(recordDefinition, record);
break;
default:
throw new IllegalStateException("State not known");
}
}
} catch (final RuntimeException e) {
throw e;
} catch (final Error e) {
throw e;
} catch (final BatchUpdateException e) {
for (SQLException e1 = e.getNextException(); e1 != null; e1 = e1.getNextException()) {
LOG.error("Unable to write", e1);
}
throw new RuntimeException("Unable to write", e);
} catch (final Exception e) {
throw new RuntimeException("Unable to write", e);
}
}
Aggregations