use of java.sql.BatchUpdateException in project voltdb by VoltDB.
the class JDBC4Statement method executeBatch.
// Submits a batch of commands to the database for execution and if all commands execute successfully, returns an array of update counts.
@Override
public int[] executeBatch() throws SQLException {
checkClosed();
closeCurrentResult();
if (batch == null || batch.size() == 0) {
return new int[0];
}
int[] updateCounts = new int[batch.size()];
// keep a running total of update counts
int runningUpdateCount = 0;
int i = 0;
try {
for (; i < batch.size(); i++) {
setCurrentResult(null, (int) batch.get(i).execute(sourceConnection.NativeConnection, this.m_timeout, sourceConnection.queryTimeOutUnit)[0].fetchRow(0).getLong(0));
updateCounts[i] = this.lastUpdateCount;
runningUpdateCount += this.lastUpdateCount;
}
} catch (SQLException x) {
updateCounts[i] = EXECUTE_FAILED;
throw new BatchUpdateException(Arrays.copyOf(updateCounts, i + 1), x);
} finally {
clearBatch();
}
// replace the update count from the last statement with the update count
// from the last batch.
this.lastUpdateCount = runningUpdateCount;
return updateCounts;
}
use of java.sql.BatchUpdateException in project ignite by apache.
the class JdbcBatchUpdateTask method call.
/**
* {@inheritDoc}
*/
@Override
public int[] call() throws Exception {
IgniteCache<?, ?> cache = ignite.cache(cacheName);
// Don't create caches on server nodes in order to avoid of data rebalancing.
boolean start = ignite.configuration().isClientMode();
if (cache == null && cacheName == null)
cache = ((IgniteKernal) ignite).context().cache().getOrStartPublicCache(start, !loc && locQry);
if (cache == null) {
if (cacheName == null) {
throw createJdbcSqlException("Failed to execute query. No suitable caches found.", IgniteQueryErrorCode.CACHE_NOT_FOUND);
} else {
throw createJdbcSqlException("Cache not found [cacheName=" + cacheName + ']', IgniteQueryErrorCode.CACHE_NOT_FOUND);
}
}
int batchSize = F.isEmpty(sql) ? sqlBatch.size() : batchArgs.size();
int[] updCntrs = new int[batchSize];
int idx = 0;
try {
if (F.isEmpty(sql)) {
for (; idx < batchSize; idx++) updCntrs[idx] = doSingleUpdate(cache, sqlBatch.get(idx), null);
} else {
for (; idx < batchSize; idx++) updCntrs[idx] = doSingleUpdate(cache, sql, batchArgs.get(idx));
}
} catch (Exception ex) {
throw new BatchUpdateException(Arrays.copyOf(updCntrs, idx), ex);
}
return updCntrs;
}
use of java.sql.BatchUpdateException in project ignite by apache.
the class JdbcRequestHandler method executeBatchedQuery.
/**
* Executes query and updates result counters.
*
* @param qry Query.
* @param updCntsAcc Per query rows updates counter.
* @param firstErr First error data - code and message.
*/
@SuppressWarnings("ForLoopReplaceableByForEach")
private void executeBatchedQuery(SqlFieldsQueryEx qry, List<Integer> updCntsAcc, IgniteBiTuple<Integer, String> firstErr) {
try {
if (cliCtx.isStream()) {
List<Long> cnt = ctx.query().streamBatchedUpdateQuery(qry.getSchema(), cliCtx, qry.getSql(), qry.batchedArguments());
for (int i = 0; i < cnt.size(); i++) updCntsAcc.add(cnt.get(i).intValue());
return;
}
List<FieldsQueryCursor<List<?>>> qryRes = ctx.query().querySqlFields(null, qry, cliCtx, true, true);
for (FieldsQueryCursor<List<?>> cur : qryRes) {
if (cur instanceof BulkLoadContextCursor)
throw new IgniteSQLException("COPY command cannot be executed in batch mode.");
assert !((QueryCursorImpl) cur).isQuery();
Iterator<List<?>> it = cur.iterator();
if (it.hasNext()) {
int val = ((Long) it.next().get(0)).intValue();
updCntsAcc.add(val);
}
}
} catch (Exception e) {
int code;
String msg;
if (e instanceof IgniteSQLException) {
BatchUpdateException batchCause = X.cause(e, BatchUpdateException.class);
if (batchCause != null) {
int[] updCntsOnErr = batchCause.getUpdateCounts();
for (int i = 0; i < updCntsOnErr.length; i++) updCntsAcc.add(updCntsOnErr[i]);
msg = batchCause.getMessage();
code = batchCause.getErrorCode();
} else {
for (int i = 0; i < qry.batchedArguments().size(); i++) updCntsAcc.add(Statement.EXECUTE_FAILED);
msg = e.getMessage();
code = ((IgniteSQLException) e).statusCode();
}
} else {
for (int i = 0; i < qry.batchedArguments().size(); i++) updCntsAcc.add(Statement.EXECUTE_FAILED);
msg = e.getMessage();
code = IgniteQueryErrorCode.UNKNOWN;
}
if (firstErr.isEmpty())
firstErr.set(code, msg);
else
U.error(log, "Failed to execute batch query [qry=" + qry + ']', e);
}
}
use of java.sql.BatchUpdateException in project dbflute-core by dbflute.
the class DfXlsDataHandlerImpl method doWriteDataTable.
// -----------------------------------------------------
// DataTable
// ---------
protected int doWriteDataTable(DfXlsDataResource resource, File file, DfDataTable dataTable) {
final String tableDbName = dataTable.getTableDbName();
if (dataTable.getRowSize() == 0) {
_log.info("*Not found row at the table: " + tableDbName);
return 0;
}
final Map<String, DfColumnMeta> columnMetaMap = getColumnMetaMap(tableDbName);
if (columnMetaMap.isEmpty()) {
throwTableNotFoundException(file, tableDbName);
}
beforeHandlingTable(tableDbName, columnMetaMap);
checkHeaderColumnIfNeeds(resource, file, dataTable, columnMetaMap);
final List<String> columnNameList = extractColumnNameList(dataTable);
final String dataDirectory = resource.getDataDirectory();
final LoggingInsertType loggingInsertType = getLoggingInsertType(dataDirectory);
final boolean suppressBatchUpdate = isMergedSuppressBatchUpdate(resource.getDataDirectory());
Connection conn = null;
PreparedStatement ps = null;
String preparedSql = null;
SQLException retryEx = null;
DfDataRow retryDataRow = null;
try {
conn = _dataSource.getConnection();
int loadedRowCount = 0;
final int rowSize = dataTable.getRowSize();
boolean existsEmptyRow = false;
for (int i = 0; i < rowSize; i++) {
final DfDataRow dataRow = dataTable.getRow(i);
if (ps == null) {
final MyCreatedState myCreatedState = new MyCreatedState();
preparedSql = myCreatedState.buildPreparedSql(dataRow);
ps = conn.prepareStatement(preparedSql);
}
if (doWriteDataRow(// basic resources
resource, // basic resources
file, // basic resources
dataTable, // basic resources
dataRow, // meta data
columnMetaMap, // JDBC resources
conn, // JDBC resources
ps, loggingInsertType, suppressBatchUpdate)) {
// option
++loadedRowCount;
if (existsEmptyRow) {
final int emptyRowNumber = dataRow.getRowNumber() - 1;
throwXlsDataEmptyRowDataException(dataDirectory, file, dataTable, emptyRowNumber);
}
} else {
existsEmptyRow = true;
}
}
if (existsEmptyRow) {
_log.info("...Skipping the terminal garbage row");
}
if (!suppressBatchUpdate) {
boolean beginTransaction = false;
boolean transactionClosed = false;
try {
// transaction to retry after
conn.setAutoCommit(false);
beginTransaction = true;
ps.executeBatch();
conn.commit();
transactionClosed = true;
} catch (SQLException e) {
conn.rollback();
transactionClosed = true;
if (!(e instanceof BatchUpdateException)) {
throw e;
}
_log.info("...Retrying by suppressing batch update: " + tableDbName);
final PreparedStatement retryPs = conn.prepareStatement(preparedSql);
for (int i = 0; i < rowSize; i++) {
final DfDataRow dataRow = dataTable.getRow(i);
try {
doWriteDataRow(// basic resources
resource, // basic resources
file, // basic resources
dataTable, // basic resources
dataRow, // meta data
columnMetaMap, // JDBC resources
conn, // JDBC resources
retryPs, LoggingInsertType.NONE, // option (no logging and suppress batch)
true);
} catch (SQLException rowEx) {
retryEx = rowEx;
retryDataRow = dataRow;
break;
}
}
try {
retryPs.close();
} catch (SQLException ignored) {
}
throw e;
} finally {
if (!transactionClosed) {
// for other exceptions
conn.rollback();
}
if (beginTransaction) {
conn.setAutoCommit(true);
}
}
}
noticeLoadedRowSize(tableDbName, loadedRowCount);
checkImplicitClassification(file, tableDbName, columnNameList);
return loadedRowCount;
} catch (RuntimeException e) {
handleXlsDataRegistartionFailureException(dataDirectory, file, tableDbName, e);
// unreachable
return -1;
} catch (SQLException e) {
handleWriteTableException(dataDirectory, file, dataTable, e, retryEx, retryDataRow, columnNameList);
// unreachable
return -1;
} finally {
closeResource(conn, ps);
// process after (finally) handling table
finallyHandlingTable(tableDbName, columnMetaMap);
}
}
use of java.sql.BatchUpdateException in project nifi by apache.
the class PutSQL method onBatchUpdateError.
private ExceptionHandler.OnError<FunctionContext, StatementFlowFileEnclosure> onBatchUpdateError(final ProcessContext context, final ProcessSession session, final RoutingResult result) {
return RollbackOnFailure.createOnError((c, enclosure, r, e) -> {
// If rollbackOnFailure is enabled, the error will be thrown as ProcessException instead.
if (e instanceof BatchUpdateException && !c.isRollbackOnFailure()) {
// If we get a BatchUpdateException, then we want to determine which FlowFile caused the failure,
// and route that FlowFile to failure while routing those that finished processing to success and those
// that have not yet been executed to retry.
// Currently fragmented transaction does not use batch update.
final int[] updateCounts = ((BatchUpdateException) e).getUpdateCounts();
final List<FlowFile> batchFlowFiles = enclosure.getFlowFiles();
// In the presence of a BatchUpdateException, the driver has the option of either stopping when an error
// occurs, or continuing. If it continues, then it must account for all statements in the batch and for
// those that fail return a Statement.EXECUTE_FAILED for the number of rows updated.
// So we will iterate over all of the update counts returned. If any is equal to Statement.EXECUTE_FAILED,
// we will route the corresponding FlowFile to failure. Otherwise, the FlowFile will go to success
// unless it has not yet been processed (its index in the List > updateCounts.length).
int failureCount = 0;
int successCount = 0;
int retryCount = 0;
for (int i = 0; i < updateCounts.length; i++) {
final int updateCount = updateCounts[i];
final FlowFile flowFile = batchFlowFiles.get(i);
if (updateCount == Statement.EXECUTE_FAILED) {
result.routeTo(flowFile, REL_FAILURE);
failureCount++;
} else {
result.routeTo(flowFile, REL_SUCCESS);
successCount++;
}
}
if (failureCount == 0) {
// if no failures found, the driver decided not to execute the statements after the
// failure, so route the last one to failure.
final FlowFile failedFlowFile = batchFlowFiles.get(updateCounts.length);
result.routeTo(failedFlowFile, REL_FAILURE);
failureCount++;
}
if (updateCounts.length < batchFlowFiles.size()) {
final List<FlowFile> unexecuted = batchFlowFiles.subList(updateCounts.length + 1, batchFlowFiles.size());
for (final FlowFile flowFile : unexecuted) {
result.routeTo(flowFile, REL_RETRY);
retryCount++;
}
}
getLogger().error("Failed to update database due to a failed batch update, {}. There were a total of {} FlowFiles that failed, {} that succeeded, " + "and {} that were not execute and will be routed to retry; ", new Object[] { e, failureCount, successCount, retryCount }, e);
return;
}
// Apply default error handling and logging for other Exceptions.
ExceptionHandler.OnError<RollbackOnFailure, FlowFileGroup> onGroupError = ExceptionHandler.createOnGroupError(context, session, result, REL_FAILURE, REL_RETRY);
onGroupError = onGroupError.andThen((cl, il, rl, el) -> {
switch(r.destination()) {
case Failure:
getLogger().error("Failed to update database for {} due to {}; routing to failure", new Object[] { il.getFlowFiles(), e }, e);
break;
case Retry:
getLogger().error("Failed to update database for {} due to {}; it is possible that retrying the operation will succeed, so routing to retry", new Object[] { il.getFlowFiles(), e }, e);
break;
}
});
onGroupError.apply(c, enclosure, r, e);
});
}
Aggregations