use of java.sql.BatchUpdateException in project dbflute-core by dbflute.
the class DfXlsDataHandlingWriter method doWriteDataTable.
// -----------------------------------------------------
// DataTable
// ---------
protected int doWriteDataTable(DfXlsDataResource resource, File file, DfDataTable dataTable) {
final String tableDbName = dataTable.getTableDbName();
if (dataTable.getRowSize() == 0) {
_log.info("*Not found row at the table: " + tableDbName);
return 0;
}
final Map<String, DfColumnMeta> columnMetaMap = getColumnMetaMap(tableDbName);
if (columnMetaMap.isEmpty()) {
throwTableNotFoundException(file, tableDbName);
}
beforeHandlingTable(tableDbName, columnMetaMap);
checkHeaderColumnIfNeeds(resource, file, dataTable, columnMetaMap);
final List<String> columnNameList = extractColumnNameList(dataTable);
final String dataDirectory = resource.getDataDirectory();
final LoggingInsertType loggingInsertType = getLoggingInsertType(dataDirectory);
final boolean suppressBatchUpdate = isMergedSuppressBatchUpdate(resource.getDataDirectory());
Connection conn = null;
PreparedStatement ps = null;
String preparedSql = null;
SQLException retryEx = null;
DfDataRow retryDataRow = null;
try {
conn = _dataSource.getConnection();
int loadedRowCount = 0;
final int rowSize = dataTable.getRowSize();
boolean existsEmptyRow = false;
for (int i = 0; i < rowSize; i++) {
final DfDataRow dataRow = dataTable.getRow(i);
if (ps == null) {
final MyCreatedState myCreatedState = new MyCreatedState();
preparedSql = myCreatedState.buildPreparedSql(dataRow);
ps = conn.prepareStatement(preparedSql);
}
if (doWriteDataRow(// basic resources
resource, // basic resources
file, // basic resources
dataTable, // basic resources
dataRow, // meta data
columnMetaMap, // JDBC resources
conn, // JDBC resources
ps, loggingInsertType, suppressBatchUpdate)) {
// option
++loadedRowCount;
if (existsEmptyRow) {
final int emptyRowNumber = dataRow.getRowNumber() - 1;
throwXlsDataEmptyRowDataException(dataDirectory, file, dataTable, emptyRowNumber);
}
} else {
existsEmptyRow = true;
}
}
if (existsEmptyRow) {
_log.info("...Skipping the terminal garbage row");
}
if (!suppressBatchUpdate) {
boolean beginTransaction = false;
boolean transactionClosed = false;
try {
// transaction to retry after
conn.setAutoCommit(false);
beginTransaction = true;
ps.executeBatch();
conn.commit();
transactionClosed = true;
} catch (SQLException e) {
conn.rollback();
transactionClosed = true;
if (!(e instanceof BatchUpdateException)) {
throw e;
}
_log.info("...Retrying by suppressing batch update: " + tableDbName);
final PreparedStatement retryPs = conn.prepareStatement(preparedSql);
for (int i = 0; i < rowSize; i++) {
final DfDataRow dataRow = dataTable.getRow(i);
try {
doWriteDataRow(// basic resources
resource, // basic resources
file, // basic resources
dataTable, // basic resources
dataRow, // meta data
columnMetaMap, // JDBC resources
conn, // JDBC resources
retryPs, LoggingInsertType.NONE, // option (no logging and suppress batch)
true);
} catch (SQLException rowEx) {
retryEx = rowEx;
retryDataRow = dataRow;
break;
}
}
try {
retryPs.close();
} catch (SQLException ignored) {
}
throw e;
} finally {
if (!transactionClosed) {
// for other exceptions
conn.rollback();
}
if (beginTransaction) {
conn.setAutoCommit(true);
}
}
}
noticeLoadedRowSize(tableDbName, loadedRowCount);
checkImplicitClassification(file, tableDbName, columnNameList);
return loadedRowCount;
} catch (RuntimeException e) {
handleWriteTableFailureException(dataDirectory, file, tableDbName, e);
// unreachable
return -1;
} catch (SQLException e) {
handleWriteTableSQLException(dataDirectory, file, dataTable, e, retryEx, retryDataRow, columnNameList);
// unreachable
return -1;
} finally {
closeResource(conn, ps);
// process after (finally) handling table
finallyHandlingTable(tableDbName, columnMetaMap);
}
}
use of java.sql.BatchUpdateException in project teiid by teiid.
the class TestJDBCSocketTransport method testBatchedUpdateException.
@Test
public void testBatchedUpdateException() throws Exception {
Statement s = conn.createStatement();
s.execute("create local temporary table x (y integer, primary key (y))");
s.addBatch("insert into x values (1)");
s.addBatch("insert into x values (1)");
try {
s.executeBatch();
fail();
} catch (BatchUpdateException e) {
assertEquals(1, e.getUpdateCounts()[0]);
}
PreparedStatement ps = conn.prepareStatement("insert into x values (?)");
ps.setInt(1, 2);
ps.addBatch();
ps.setInt(1, 2);
ps.addBatch();
try {
ps.executeBatch();
fail();
} catch (BatchUpdateException e) {
assertEquals(1, e.getUpdateCounts()[0]);
}
// make sure no update counts are reported when there's an issue on the first item
ps = conn.prepareStatement("insert into x values (?)");
ps.setInt(1, 2);
ps.addBatch();
ps.setInt(1, 2);
ps.addBatch();
try {
ps.executeBatch();
fail();
} catch (BatchUpdateException e) {
assertEquals(0, e.getUpdateCounts().length);
}
}
use of java.sql.BatchUpdateException in project ballerina by ballerina-lang.
the class AbstractSQLAction method executeBatchUpdate.
protected void executeBatchUpdate(Context context, SQLDatasource datasource, String query, BRefValueArray parameters) {
Connection conn = null;
PreparedStatement stmt = null;
int[] updatedCount;
int paramArrayCount = 0;
try {
conn = datasource.getSQLConnection();
stmt = conn.prepareStatement(query);
setConnectionAutoCommit(conn, false);
if (parameters != null) {
paramArrayCount = (int) parameters.size();
for (int index = 0; index < paramArrayCount; index++) {
BRefValueArray params = (BRefValueArray) parameters.get(index);
createProcessedStatement(conn, stmt, params);
stmt.addBatch();
}
} else {
stmt.addBatch();
}
updatedCount = stmt.executeBatch();
conn.commit();
} catch (BatchUpdateException e) {
updatedCount = e.getUpdateCounts();
} catch (SQLException e) {
throw new BallerinaException("execute batch update failed: " + e.getMessage(), e);
} finally {
setConnectionAutoCommit(conn, true);
SQLDatasourceUtils.cleanupConnection(null, stmt, conn, false);
}
// After a command in a batch update fails to execute properly and a BatchUpdateException is thrown, the driver
// may or may not continue to process the remaining commands in the batch. If the driver does not continue
// processing after a failure, the array returned by the method will have -3 (EXECUTE_FAILED) for those updates.
long[] returnedCount = new long[paramArrayCount];
Arrays.fill(returnedCount, Statement.EXECUTE_FAILED);
BIntArray countArray = new BIntArray(returnedCount);
if (updatedCount != null) {
int iSize = updatedCount.length;
for (int i = 0; i < iSize; ++i) {
countArray.add(i, updatedCount[i]);
}
}
context.setReturnValues(countArray);
}
use of java.sql.BatchUpdateException in project vertigo by KleeGroup.
the class AbstractTaskEngineSQL method execute.
/**
* {@inheritDoc}
*/
@Override
public void execute() {
final SqlConnection connection = obtainConnection();
final SqlStatementBuilder statementBuilder = SqlStatement.builder(getSqlQuery().trim());
setNamedParameters(statementBuilder);
final SqlStatement sqlStatement = statementBuilder.build();
try {
// Execute le Statement JDBC.
final OptionalInt sqlRowcountOpt = doExecute(sqlStatement, connection);
// On positionne le nombre de lignes affectées.
sqlRowcountOpt.ifPresent(this::setRowCount);
} catch (final BatchUpdateException sqle) {
// Gère les erreurs d'exécution Batch JDBC.
throw handleSQLException(connection, sqle.getNextException(), sqlStatement.getSqlQuery());
} catch (final SQLException sqle) {
// Gère les erreurs d'exécution JDBC.
throw handleSQLException(connection, sqle, sqlStatement.getSqlQuery());
}
}
use of java.sql.BatchUpdateException in project mybatis-3 by mybatis.
the class BatchExecutor method doFlushStatements.
@Override
public List<BatchResult> doFlushStatements(boolean isRollback) throws SQLException {
try {
List<BatchResult> results = new ArrayList<>();
if (isRollback) {
return Collections.emptyList();
}
for (int i = 0, n = statementList.size(); i < n; i++) {
Statement stmt = statementList.get(i);
applyTransactionTimeout(stmt);
BatchResult batchResult = batchResultList.get(i);
try {
batchResult.setUpdateCounts(stmt.executeBatch());
MappedStatement ms = batchResult.getMappedStatement();
List<Object> parameterObjects = batchResult.getParameterObjects();
KeyGenerator keyGenerator = ms.getKeyGenerator();
if (Jdbc3KeyGenerator.class.equals(keyGenerator.getClass())) {
Jdbc3KeyGenerator jdbc3KeyGenerator = (Jdbc3KeyGenerator) keyGenerator;
jdbc3KeyGenerator.processBatch(ms, stmt, parameterObjects);
} else if (!NoKeyGenerator.class.equals(keyGenerator.getClass())) {
// issue #141
for (Object parameter : parameterObjects) {
keyGenerator.processAfter(this, ms, stmt, parameter);
}
}
// Close statement to close cursor #1109
closeStatement(stmt);
} catch (BatchUpdateException e) {
StringBuilder message = new StringBuilder();
message.append(batchResult.getMappedStatement().getId()).append(" (batch index #").append(i + 1).append(")").append(" failed.");
if (i > 0) {
message.append(" ").append(i).append(" prior sub executor(s) completed successfully, but will be rolled back.");
}
throw new BatchExecutorException(message.toString(), e, results, batchResult);
}
results.add(batchResult);
}
return results;
} finally {
for (Statement stmt : statementList) {
closeStatement(stmt);
}
currentSql = null;
statementList.clear();
batchResultList.clear();
}
}
Aggregations