use of java.sql.BatchUpdateException in project mssql-jdbc by Microsoft.
the class IntColumnIdentityFilter method getResultSetForForeignKeyInformation.
/**
* The original sp_fkeys stored procedure does not give the required values from JDBC specification. This method creates 2 temporary tables and
* uses join and other operations on them to give the correct values.
*
* @param sp_fkeys_Query
* @return
* @throws SQLServerException
* @throws SQLTimeoutException
*/
private ResultSet getResultSetForForeignKeyInformation(SQLServerResultSet fkeysRS, String cat) throws SQLServerException, SQLTimeoutException {
UUID uuid = UUID.randomUUID();
String fkeys_results_tableName = "[#fkeys_results" + uuid + "]";
String foreign_keys_combined_tableName = "[#foreign_keys_combined_results" + uuid + "]";
String sys_foreign_keys = "sys.foreign_keys";
String fkeys_results_column_definition = "PKTABLE_QUALIFIER sysname, PKTABLE_OWNER sysname, PKTABLE_NAME sysname, PKCOLUMN_NAME sysname, FKTABLE_QUALIFIER sysname, FKTABLE_OWNER sysname, FKTABLE_NAME sysname, FKCOLUMN_NAME sysname, KEY_SEQ smallint, UPDATE_RULE smallint, DELETE_RULE smallint, FK_NAME sysname, PK_NAME sysname, DEFERRABILITY smallint";
String foreign_keys_combined_column_definition = "name sysname, delete_referential_action_desc nvarchar(60), update_referential_action_desc nvarchar(60)," + fkeys_results_column_definition;
// cannot close this statement, otherwise the returned resultset would be closed too.
SQLServerStatement stmt = (SQLServerStatement) connection.createStatement();
/**
* create a temp table that has the same definition as the result of sp_fkeys:
*
* create table #fkeys_results (
* PKTABLE_QUALIFIER sysname,
* PKTABLE_OWNER sysname,
* PKTABLE_NAME sysname,
* PKCOLUMN_NAME sysname,
* FKTABLE_QUALIFIER sysname,
* FKTABLE_OWNER sysname,
* FKTABLE_NAME sysname,
* FKCOLUMN_NAME sysname,
* KEY_SEQ smallint,
* UPDATE_RULE smallint,
* DELETE_RULE smallint,
* FK_NAME sysname,
* PK_NAME sysname,
* DEFERRABILITY smallint
* );
*/
stmt.execute("create table " + fkeys_results_tableName + " (" + fkeys_results_column_definition + ")");
/**
* insert the results of sp_fkeys to the temp table #fkeys_results
*/
SQLServerPreparedStatement ps = (SQLServerPreparedStatement) connection.prepareCall("insert into " + fkeys_results_tableName + "values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)");
try {
while (fkeysRS.next()) {
ps.setString(1, fkeysRS.getString(1));
ps.setString(2, fkeysRS.getString(2));
ps.setString(3, fkeysRS.getString(3));
ps.setString(4, fkeysRS.getString(4));
ps.setString(5, fkeysRS.getString(5));
ps.setString(6, fkeysRS.getString(6));
ps.setString(7, fkeysRS.getString(7));
ps.setString(8, fkeysRS.getString(8));
ps.setInt(9, fkeysRS.getInt(9));
ps.setInt(10, fkeysRS.getInt(10));
ps.setInt(11, fkeysRS.getInt(11));
ps.setString(12, fkeysRS.getString(12));
ps.setString(13, fkeysRS.getString(13));
ps.setInt(14, fkeysRS.getInt(14));
ps.execute();
}
} finally {
if (null != ps) {
ps.close();
}
if (null != fkeysRS) {
fkeysRS.close();
}
}
/**
* create another temp table that has 3 columns from sys.foreign_keys and the rest of columns are the same as #fkeys_results:
*
* create table #foreign_keys_combined_results (
* name sysname,
* delete_referential_action_desc nvarchar(60),
* update_referential_action_desc nvarchar(60),
* ......
* ......
* ......
* );
*/
stmt.addBatch("create table " + foreign_keys_combined_tableName + " (" + foreign_keys_combined_column_definition + ")");
/**
* right join the content of sys.foreign_keys and the content of #fkeys_results base on foreign key name and save the result to the new temp
* table #foreign_keys_combined_results
*/
stmt.addBatch("insert into " + foreign_keys_combined_tableName + " select " + sys_foreign_keys + ".name, " + sys_foreign_keys + ".delete_referential_action_desc, " + sys_foreign_keys + ".update_referential_action_desc," + fkeys_results_tableName + ".PKTABLE_QUALIFIER," + fkeys_results_tableName + ".PKTABLE_OWNER," + fkeys_results_tableName + ".PKTABLE_NAME," + fkeys_results_tableName + ".PKCOLUMN_NAME," + fkeys_results_tableName + ".FKTABLE_QUALIFIER," + fkeys_results_tableName + ".FKTABLE_OWNER," + fkeys_results_tableName + ".FKTABLE_NAME," + fkeys_results_tableName + ".FKCOLUMN_NAME," + fkeys_results_tableName + ".KEY_SEQ," + fkeys_results_tableName + ".UPDATE_RULE," + fkeys_results_tableName + ".DELETE_RULE," + fkeys_results_tableName + ".FK_NAME," + fkeys_results_tableName + ".PK_NAME," + fkeys_results_tableName + ".DEFERRABILITY from " + sys_foreign_keys + " right join " + fkeys_results_tableName + " on " + sys_foreign_keys + ".name=" + fkeys_results_tableName + ".FK_NAME");
/**
* the DELETE_RULE value and UPDATE_RULE value returned from sp_fkeys are not the same as required by JDBC spec. therefore, we need to update
* those values to JDBC required values base on delete_referential_action_desc and update_referential_action_desc returned from sys.foreign_keys
* No Action: 3
* Cascade: 0
* Set Null: 2
* Set Default: 4
*/
stmt.addBatch("update " + foreign_keys_combined_tableName + " set DELETE_RULE=3 where delete_referential_action_desc='NO_ACTION';" + "update " + foreign_keys_combined_tableName + " set DELETE_RULE=0 where delete_referential_action_desc='Cascade';" + "update " + foreign_keys_combined_tableName + " set DELETE_RULE=2 where delete_referential_action_desc='SET_NULL';" + "update " + foreign_keys_combined_tableName + " set DELETE_RULE=4 where delete_referential_action_desc='SET_DEFAULT';" + "update " + foreign_keys_combined_tableName + " set UPDATE_RULE=3 where update_referential_action_desc='NO_ACTION';" + "update " + foreign_keys_combined_tableName + " set UPDATE_RULE=0 where update_referential_action_desc='Cascade';" + "update " + foreign_keys_combined_tableName + " set UPDATE_RULE=2 where update_referential_action_desc='SET_NULL';" + "update " + foreign_keys_combined_tableName + " set UPDATE_RULE=4 where update_referential_action_desc='SET_DEFAULT';");
try {
stmt.executeBatch();
} catch (BatchUpdateException e) {
throw new SQLServerException(e.getMessage(), e.getSQLState(), e.getErrorCode(), null);
}
/**
* now, the #foreign_keys_combined_results table has the correct values for DELETE_RULE and UPDATE_RULE. Then we can return the result of
* the table with the same definition of the resultset return by sp_fkeys (same column definition and same order).
*/
return stmt.executeQuery("select PKTABLE_QUALIFIER as 'PKTABLE_CAT',PKTABLE_OWNER as 'PKTABLE_SCHEM',PKTABLE_NAME,PKCOLUMN_NAME,FKTABLE_QUALIFIER as 'FKTABLE_CAT',FKTABLE_OWNER as 'FKTABLE_SCHEM',FKTABLE_NAME,FKCOLUMN_NAME,KEY_SEQ,UPDATE_RULE,DELETE_RULE,FK_NAME,PK_NAME,DEFERRABILITY from " + foreign_keys_combined_tableName + " order by FKTABLE_QUALIFIER, FKTABLE_OWNER, FKTABLE_NAME, KEY_SEQ");
}
use of java.sql.BatchUpdateException in project mssql-jdbc by Microsoft.
the class BatchExecuteWithErrorsTest method Repro47239large.
/**
* Tests large methods, supported in 42
*
* @throws Exception
*/
@Test
@DisplayName("Regression test for using 'large' methods")
public void Repro47239large() throws Exception {
assumeTrue("JDBC42".equals(Utils.getConfiguredProperty("JDBC_Version")), "Aborting test case as JDBC version is not compatible. ");
// the DBConnection for detecting whether the server is SQL Azure or SQL Server.
con = DriverManager.getConnection(connectionString);
final String warning;
final String error;
final String severe;
if (DBConnection.isSqlAzure(con)) {
// SQL Azure will throw exception for "raiserror WITH LOG", so the following RAISERROR statements have not "with log" option
warning = "RAISERROR ('raiserror level 4',4,1)";
error = "RAISERROR ('raiserror level 11',11,1)";
// On SQL Azure, raising FATAL error by RAISERROR() is not supported and there is no way to
// cut the current connection by a statement inside a SQL batch.
// Details: Although one can simulate a fatal error (that cuts the connections) by dropping the database,
// this simulation cannot be written entirely in TSQL (because it needs a new connection),
// and thus it cannot be put into a TSQL batch and it is useless here.
// So we have to skip the last scenario of this test case, i.e. "Test Severe (connection-closing) errors"
// It is worthwhile to still execute the first 5 test scenarios of this test case, in order to have best test coverage.
// this is a dummy statement that never being executed on SQL Azure
severe = "--Not executed when testing against SQL Azure";
} else {
warning = "RAISERROR ('raiserror level 4',4,1) WITH LOG";
error = "RAISERROR ('raiserror level 11',11,1) WITH LOG";
severe = "RAISERROR ('raiserror level 20',20,1) WITH LOG";
}
con.close();
long[] actualUpdateCounts;
long[] expectedUpdateCounts;
String actualExceptionText;
// SQL Server 2005 driver
Class.forName("com.microsoft.sqlserver.jdbc.SQLServerDriver");
Connection conn = DriverManager.getConnection(connectionString);
Statement stmt = conn.createStatement();
try {
stmt.executeLargeUpdate("drop table " + tableName);
} catch (Exception ignored) {
}
try {
stmt.executeLargeUpdate("create table " + tableName + " (c1_int int, c2_varchar varchar(20), c3_date datetime, c4_int int identity(1,1) primary key)");
} catch (Exception ignored) {
}
// Regular Statement batch update
expectedUpdateCounts = new long[] { 1, -2, 1, -2, 1, -2 };
Statement batchStmt = conn.createStatement();
batchStmt.addBatch(insertStmt);
batchStmt.addBatch(warning);
batchStmt.addBatch(insertStmt);
batchStmt.addBatch(warning);
batchStmt.addBatch(insertStmt);
batchStmt.addBatch(warning);
try {
actualUpdateCounts = batchStmt.executeLargeBatch();
actualExceptionText = "";
} catch (BatchUpdateException bue) {
actualUpdateCounts = bue.getLargeUpdateCounts();
actualExceptionText = bue.getMessage();
log.fine("BatchUpdateException occurred. Message:" + actualExceptionText);
} finally {
batchStmt.close();
}
log.fine("UpdateCounts:");
for (long updateCount : actualUpdateCounts) {
log.fine("" + updateCount + ",");
}
log.fine("");
assertTrue(Arrays.equals(actualUpdateCounts, expectedUpdateCounts), "Test interleaved inserts and warnings");
expectedUpdateCounts = new long[] { -3, 1, 1, 1 };
stmt.addBatch(error);
stmt.addBatch(insertStmt);
stmt.addBatch(insertStmt);
stmt.addBatch(insertStmt);
try {
actualUpdateCounts = stmt.executeLargeBatch();
actualExceptionText = "";
} catch (BatchUpdateException bue) {
actualUpdateCounts = bue.getLargeUpdateCounts();
actualExceptionText = bue.getMessage();
}
log.fine("UpdateCounts:");
for (long updateCount : actualUpdateCounts) {
log.fine("" + updateCount + ",");
}
log.fine("");
assertTrue(Arrays.equals(actualUpdateCounts, expectedUpdateCounts), "Test error followed by inserts");
// 50280
expectedUpdateCounts = new long[] { 1, -3 };
stmt.addBatch(insertStmt);
stmt.addBatch(error16);
try {
actualUpdateCounts = stmt.executeLargeBatch();
actualExceptionText = "";
} catch (BatchUpdateException bue) {
actualUpdateCounts = bue.getLargeUpdateCounts();
actualExceptionText = bue.getMessage();
}
for (long updateCount : actualUpdateCounts) {
log.fine("" + updateCount + ",");
}
log.fine("");
assertTrue(Arrays.equals(actualUpdateCounts, expectedUpdateCounts), "Test insert followed by non-fatal error (50280)");
// Test "soft" errors
conn.setAutoCommit(false);
stmt.addBatch(select);
stmt.addBatch(insertStmt);
stmt.addBatch(select);
stmt.addBatch(insertStmt);
try {
stmt.executeLargeBatch();
assertEquals(false, true, "Soft error test: executeLargeBatch unexpectedly succeeded");
} catch (BatchUpdateException bue) {
assertEquals("A result set was generated for update.", bue.getMessage(), "Soft error test: wrong error message in BatchUpdateException");
assertEquals(Arrays.equals(bue.getLargeUpdateCounts(), new long[] { -3, 1, -3, 1 }), true, "Soft error test: wrong update counts in BatchUpdateException");
}
conn.rollback();
// Defect 128801: Rollback (with conversion error) should throw SQLException
stmt.addBatch(dateConversionError);
stmt.addBatch(insertStmt);
stmt.addBatch(insertStmt);
stmt.addBatch(insertStmt);
try {
stmt.executeLargeBatch();
} catch (BatchUpdateException bue) {
assertThat(bue.getMessage(), containsString("Syntax error converting date"));
} catch (SQLException e) {
assertThat(e.getMessage(), containsString("Conversion failed when converting date"));
}
conn.setAutoCommit(true);
// It is worthwhile to still execute the first 5 test scenarios of this test case, in order to have best test coverage.
if (!DBConnection.isSqlAzure(DriverManager.getConnection(connectionString))) {
// Test Severe (connection-closing) errors
stmt.addBatch(error);
stmt.addBatch(insertStmt);
stmt.addBatch(warning);
stmt.addBatch(insertStmt);
stmt.addBatch(severe);
stmt.addBatch(insertStmt);
stmt.addBatch(insertStmt);
try {
stmt.executeLargeBatch();
assertEquals(false, true, "Test fatal errors batch execution succeeded (should have failed)");
} catch (BatchUpdateException bue) {
assertEquals(false, true, "Test fatal errors returned BatchUpdateException rather than SQLException");
} catch (SQLException e) {
actualExceptionText = e.getMessage();
if (actualExceptionText.endsWith("reset")) {
assertTrue(actualExceptionText.equalsIgnoreCase("Connection reset"), "Test fatal errors");
} else {
assertTrue(actualExceptionText.equalsIgnoreCase("raiserror level 20"), "Test fatal errors");
}
}
}
try {
stmt.executeLargeUpdate("drop table " + tableName);
} catch (Exception ignored) {
}
stmt.close();
conn.close();
}
use of java.sql.BatchUpdateException in project openmrs-core by openmrs.
the class AddConceptMapTypesChangeset method runBatchInsert.
/**
* Executes all the changes to the concept names as a batch update.
*
* @param connection The database connection
*/
private void runBatchInsert(JdbcConnection connection) throws CustomChangeException {
PreparedStatement pStmt = null;
try {
connection.setAutoCommit(false);
Integer userId = DatabaseUpdater.getAuthenticatedUserId();
// if we have no authenticated user(for API users), set as Daemon
if (userId == null || userId < 1) {
userId = getInt(connection, "SELECT min(user_id) FROM users");
// leave it as null rather than setting it to 0
if (userId < 1) {
userId = null;
}
}
// userId is not a param, because it's easier this way if it's null
pStmt = connection.prepareStatement("INSERT INTO concept_map_type " + "(concept_map_type_id, name, is_hidden, retired, creator, date_created, uuid) VALUES(?,?,?,?," + userId + ",?,?)");
int mapTypeId = 1;
for (String map : visibleConceptMapTypeArray) {
String[] mapTypeAndUuid = map.trim().split("\\|");
String mapType = mapTypeAndUuid[0];
String mapUuid = mapTypeAndUuid[1];
pStmt.setInt(1, mapTypeId);
pStmt.setString(2, mapType);
pStmt.setBoolean(3, false);
pStmt.setBoolean(4, false);
pStmt.setDate(5, new Date(Calendar.getInstance().getTimeInMillis()));
pStmt.setString(6, mapUuid);
pStmt.addBatch();
mapTypeId++;
}
for (String map : hiddenConceptMapTypeArray) {
String[] mapTypeAndUuid = map.trim().split("\\|");
String mapType = mapTypeAndUuid[0];
String mapUuid = mapTypeAndUuid[1];
pStmt.setInt(1, mapTypeId);
pStmt.setString(2, mapType);
pStmt.setBoolean(3, true);
pStmt.setBoolean(4, false);
pStmt.setDate(5, new Date(Calendar.getInstance().getTimeInMillis()));
pStmt.setString(6, mapUuid);
pStmt.addBatch();
mapTypeId++;
}
try {
int[] updateCounts = pStmt.executeBatch();
for (int updateCount : updateCounts) {
if (updateCount > -1) {
log.debug("Successfully executed: updateCount=" + updateCount);
} else if (updateCount == Statement.SUCCESS_NO_INFO) {
log.debug("Successfully executed; No Success info");
} else if (updateCount == Statement.EXECUTE_FAILED) {
log.warn("Failed to execute insert");
}
}
log.debug("Committing inserts...");
connection.commit();
} catch (BatchUpdateException be) {
log.warn("Error generated while processsing batch insert", be);
int[] updateCounts = be.getUpdateCounts();
for (int updateCount : updateCounts) {
if (updateCount > -1) {
log.warn("Executed with exception: insertCount=" + updateCount);
} else if (updateCount == Statement.SUCCESS_NO_INFO) {
log.warn("Executed with exception; No Success info");
} else if (updateCount == Statement.EXECUTE_FAILED) {
log.warn("Failed to execute insert with exception");
}
}
try {
log.debug("Rolling back batch", be);
connection.rollback();
} catch (Exception rbe) {
log.warn("Error generated while rolling back batch insert", be);
}
// marks the changeset as a failed one
throw new CustomChangeException("Failed to insert one or more concept map types", be);
}
} catch (DatabaseException | SQLException e) {
throw new CustomChangeException("Failed to insert one or more concept map types:", e);
} finally {
// reset to auto commit mode
try {
connection.setAutoCommit(true);
} catch (DatabaseException e) {
log.warn("Failed to reset auto commit back to true", e);
}
if (pStmt != null) {
try {
pStmt.close();
} catch (SQLException e) {
log.warn("Failed to close the prepared statement object");
}
}
}
}
use of java.sql.BatchUpdateException in project openmrs-core by openmrs.
the class DuplicateLocationAttributeTypeNameChangeSet method execute.
/**
* Method to perform validation and resolution of duplicate LocationAttributeType names
*/
@Override
public void execute(Database database) throws CustomChangeException {
JdbcConnection connection = (JdbcConnection) database.getConnection();
Map<String, HashSet<Integer>> duplicates = new HashMap<>();
Statement stmt = null;
PreparedStatement pStmt = null;
ResultSet rs = null;
boolean autoCommit = true;
try {
// set auto commit mode to false for UPDATE action
autoCommit = connection.getAutoCommit();
connection.setAutoCommit(false);
stmt = connection.createStatement();
rs = stmt.executeQuery("SELECT * FROM location_attribute_type " + "INNER JOIN (SELECT name FROM location_attribute_type GROUP BY name HAVING count(name) > 1) " + "dup ON location_attribute_type.name = dup.name");
Integer id;
String name;
while (rs.next()) {
id = rs.getInt("location_attribute_type_id");
name = rs.getString("name");
if (duplicates.get(name) == null) {
HashSet<Integer> results = new HashSet<>();
results.add(id);
duplicates.put(name, results);
} else {
HashSet<Integer> results = duplicates.get(name);
results.add(id);
}
}
for (Object o : duplicates.entrySet()) {
Map.Entry pairs = (Map.Entry) o;
HashSet values = (HashSet) pairs.getValue();
List<Integer> duplicateNames = new ArrayList<Integer>(values);
int duplicateNameId = 1;
for (int i = 1; i < duplicateNames.size(); i++) {
String newName = pairs.getKey() + "_" + duplicateNameId;
List<List<Object>> duplicateResult;
boolean duplicateName;
Connection con = DatabaseUpdater.getConnection();
do {
String sqlValidatorString = "select * from location_attribute_type where name = '" + newName + "'";
duplicateResult = DatabaseUtil.executeSQL(con, sqlValidatorString, true);
if (!duplicateResult.isEmpty()) {
duplicateNameId += 1;
newName = pairs.getKey() + "_" + duplicateNameId;
duplicateName = true;
} else {
duplicateName = false;
}
} while (duplicateName);
pStmt = connection.prepareStatement("update location_attribute_type set name = ?, changed_by = ?, date_changed = ? where location_attribute_type_id = ?");
if (!duplicateResult.isEmpty()) {
pStmt.setString(1, newName);
}
pStmt.setString(1, newName);
pStmt.setInt(2, DatabaseUpdater.getAuthenticatedUserId());
Calendar cal = Calendar.getInstance();
Date date = new Date(cal.getTimeInMillis());
pStmt.setDate(3, date);
pStmt.setInt(4, duplicateNames.get(i));
duplicateNameId += 1;
pStmt.executeUpdate();
}
}
} catch (BatchUpdateException e) {
log.warn("Error generated while processsing batch insert", e);
try {
log.debug("Rolling back batch", e);
connection.rollback();
} catch (Exception rbe) {
log.warn("Error generated while rolling back batch insert", e);
}
// marks the changeset as a failed one
throw new CustomChangeException("Failed to update one or more duplicate LocationAttributeType names", e);
} catch (Exception e) {
throw new CustomChangeException("Error while updating duplicate LocationAttributeType object names", e);
} finally {
// reset to auto commit mode
try {
connection.commit();
connection.setAutoCommit(autoCommit);
} catch (DatabaseException e) {
log.warn("Failed to reset auto commit back to true", e);
}
if (rs != null) {
try {
rs.close();
} catch (SQLException e) {
log.warn("Failed to close the resultset object");
}
}
if (stmt != null) {
try {
stmt.close();
} catch (SQLException e) {
log.warn("Failed to close the select statement used to identify duplicate LocationAttributeType object names");
}
}
if (pStmt != null) {
try {
pStmt.close();
} catch (SQLException e) {
log.warn("Failed to close the prepared statement used to update duplicate LocationAttributeType object names");
}
}
}
}
use of java.sql.BatchUpdateException in project h2database by h2database.
the class TestCrashAPI method printIfBad.
private void printIfBad(int seed, int id, int objectId, Throwable t) {
if (t instanceof BatchUpdateException) {
// do nothing
} else if (t.getClass().getName().contains("SQLClientInfoException")) {
// do nothing
} else if (t instanceof UnsupportedOperationException) {
// do nothing - new Java8/9 stuff
} else if (t instanceof SQLFeatureNotSupportedException) {
// do nothing
} else if (t instanceof SQLException) {
SQLException s = (SQLException) t;
int errorCode = s.getErrorCode();
if (errorCode == 0) {
printError(seed, id, s);
} else if (errorCode == ErrorCode.OBJECT_CLOSED) {
if (objectId >= 0 && objects.size() > 0) {
// TODO at least call a few more times after close - maybe
// there is still an error
objects.remove(objectId);
}
} else if (errorCode == ErrorCode.GENERAL_ERROR_1) {
// General error [HY000]
printError(seed, id, s);
}
} else {
printError(seed, id, t);
}
}
Aggregations