use of java.sql.SQLTransactionRollbackException in project jdk8u_jdk by JetBrains.
the class SQLTransactionRollbackExceptionTests method test7.
/**
* Create SQLTransactionRollbackException with message, and Throwable
*/
@Test
public void test7() {
SQLTransactionRollbackException ex = new SQLTransactionRollbackException(reason, t);
assertTrue(ex.getMessage().equals(reason) && ex.getSQLState() == null && cause.equals(ex.getCause().toString()) && ex.getErrorCode() == 0);
}
use of java.sql.SQLTransactionRollbackException in project jdk8u_jdk by JetBrains.
the class SQLTransactionRollbackExceptionTests method test1.
/**
* Create SQLTransactionRollbackException with no-arg constructor
*/
@Test
public void test1() {
SQLTransactionRollbackException ex = new SQLTransactionRollbackException();
assertTrue(ex.getMessage() == null && ex.getSQLState() == null && ex.getCause() == null && ex.getErrorCode() == 0);
}
use of java.sql.SQLTransactionRollbackException in project cloudstack by apache.
the class VmRulesetLogDaoImpl method executeWithRetryOnDeadlock.
private int executeWithRetryOnDeadlock(TransactionLegacy txn, String pstmt, List<Long> vmIds) throws SQLException {
int numUpdated = 0;
final int maxTries = 3;
for (int i = 0; i < maxTries; i++) {
try {
PreparedStatement stmtInsert = txn.prepareAutoCloseStatement(pstmt);
int argIndex = 1;
for (Long vmId : vmIds) {
stmtInsert.setLong(argIndex++, vmId);
}
numUpdated = stmtInsert.executeUpdate();
i = maxTries;
} catch (SQLTransactionRollbackException e1) {
if (i < maxTries - 1) {
int delayMs = (i + 1) * 1000;
s_logger.debug("Caught a deadlock exception while inserting security group rule log, retrying in " + delayMs);
try {
Thread.sleep(delayMs);
} catch (InterruptedException ie) {
s_logger.debug("[ignored] interupted while inserting security group rule log.");
}
} else
s_logger.warn("Caught another deadlock exception while retrying inserting security group rule log, giving up");
}
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("Inserted or updated " + numUpdated + " rows");
}
return numUpdated;
}
use of java.sql.SQLTransactionRollbackException in project hive by apache.
the class TxnDbUtil method prepDb.
public static void prepDb(Configuration conf) throws Exception {
// This is a bogus hack because it copies the contents of the SQL file
// intended for creating derby databases, and thus will inexorably get
// out of date with it. I'm open to any suggestions on how to make this
// read the file in a build friendly way.
Connection conn = null;
Statement stmt = null;
try {
conn = getConnection(conf);
stmt = conn.createStatement();
stmt.execute("CREATE TABLE TXNS (" + " TXN_ID bigint PRIMARY KEY," + " TXN_STATE char(1) NOT NULL," + " TXN_STARTED bigint NOT NULL," + " TXN_LAST_HEARTBEAT bigint NOT NULL," + " TXN_USER varchar(128) NOT NULL," + " TXN_HOST varchar(128) NOT NULL)");
stmt.execute("CREATE TABLE TXN_COMPONENTS (" + " TC_TXNID bigint REFERENCES TXNS (TXN_ID)," + " TC_DATABASE varchar(128) NOT NULL," + " TC_TABLE varchar(128)," + " TC_PARTITION varchar(767)," + " TC_OPERATION_TYPE char(1) NOT NULL," + " TC_WRITEID bigint)");
stmt.execute("CREATE TABLE COMPLETED_TXN_COMPONENTS (" + " CTC_TXNID bigint," + " CTC_DATABASE varchar(128) NOT NULL," + " CTC_TABLE varchar(128)," + " CTC_PARTITION varchar(767)," + " CTC_ID bigint GENERATED ALWAYS AS IDENTITY (START WITH 1, INCREMENT BY 1) NOT NULL," + " CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL," + " CTC_WRITEID bigint)");
stmt.execute("CREATE TABLE NEXT_TXN_ID (" + " NTXN_NEXT bigint NOT NULL)");
stmt.execute("INSERT INTO NEXT_TXN_ID VALUES(1)");
stmt.execute("CREATE TABLE TXN_TO_WRITE_ID (" + " T2W_TXNID bigint NOT NULL," + " T2W_DATABASE varchar(128) NOT NULL," + " T2W_TABLE varchar(256) NOT NULL," + " T2W_WRITEID bigint NOT NULL)");
stmt.execute("CREATE TABLE NEXT_WRITE_ID (" + " NWI_DATABASE varchar(128) NOT NULL," + " NWI_TABLE varchar(256) NOT NULL," + " NWI_NEXT bigint NOT NULL)");
stmt.execute("CREATE TABLE HIVE_LOCKS (" + " HL_LOCK_EXT_ID bigint NOT NULL," + " HL_LOCK_INT_ID bigint NOT NULL," + " HL_TXNID bigint NOT NULL," + " HL_DB varchar(128) NOT NULL," + " HL_TABLE varchar(128)," + " HL_PARTITION varchar(767)," + " HL_LOCK_STATE char(1) NOT NULL," + " HL_LOCK_TYPE char(1) NOT NULL," + " HL_LAST_HEARTBEAT bigint NOT NULL," + " HL_ACQUIRED_AT bigint," + " HL_USER varchar(128) NOT NULL," + " HL_HOST varchar(128) NOT NULL," + " HL_HEARTBEAT_COUNT integer," + " HL_AGENT_INFO varchar(128)," + " HL_BLOCKEDBY_EXT_ID bigint," + " HL_BLOCKEDBY_INT_ID bigint," + " PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID))");
stmt.execute("CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID)");
stmt.execute("CREATE TABLE NEXT_LOCK_ID (" + " NL_NEXT bigint NOT NULL)");
stmt.execute("INSERT INTO NEXT_LOCK_ID VALUES(1)");
stmt.execute("CREATE TABLE COMPACTION_QUEUE (" + " CQ_ID bigint PRIMARY KEY," + " CQ_DATABASE varchar(128) NOT NULL," + " CQ_TABLE varchar(128) NOT NULL," + " CQ_PARTITION varchar(767)," + " CQ_STATE char(1) NOT NULL," + " CQ_TYPE char(1) NOT NULL," + " CQ_TBLPROPERTIES varchar(2048)," + " CQ_WORKER_ID varchar(128)," + " CQ_START bigint," + " CQ_RUN_AS varchar(128)," + " CQ_HIGHEST_WRITE_ID bigint," + " CQ_META_INFO varchar(2048) for bit data," + " CQ_HADOOP_JOB_ID varchar(32))");
stmt.execute("CREATE TABLE NEXT_COMPACTION_QUEUE_ID (NCQ_NEXT bigint NOT NULL)");
stmt.execute("INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1)");
stmt.execute("CREATE TABLE COMPLETED_COMPACTIONS (" + " CC_ID bigint PRIMARY KEY," + " CC_DATABASE varchar(128) NOT NULL," + " CC_TABLE varchar(128) NOT NULL," + " CC_PARTITION varchar(767)," + " CC_STATE char(1) NOT NULL," + " CC_TYPE char(1) NOT NULL," + " CC_TBLPROPERTIES varchar(2048)," + " CC_WORKER_ID varchar(128)," + " CC_START bigint," + " CC_END bigint," + " CC_RUN_AS varchar(128)," + " CC_HIGHEST_WRITE_ID bigint," + " CC_META_INFO varchar(2048) for bit data," + " CC_HADOOP_JOB_ID varchar(32))");
stmt.execute("CREATE TABLE AUX_TABLE (" + " MT_KEY1 varchar(128) NOT NULL," + " MT_KEY2 bigint NOT NULL," + " MT_COMMENT varchar(255)," + " PRIMARY KEY(MT_KEY1, MT_KEY2))");
stmt.execute("CREATE TABLE WRITE_SET (" + " WS_DATABASE varchar(128) NOT NULL," + " WS_TABLE varchar(128) NOT NULL," + " WS_PARTITION varchar(767)," + " WS_TXNID bigint NOT NULL," + " WS_COMMIT_ID bigint NOT NULL," + " WS_OPERATION_TYPE char(1) NOT NULL)");
} catch (SQLException e) {
try {
conn.rollback();
} catch (SQLException re) {
LOG.error("Error rolling back: " + re.getMessage());
}
// Another thread might have already created these tables.
if (e.getMessage() != null && e.getMessage().contains("already exists")) {
LOG.info("Txn tables already exist, returning");
return;
}
// This might be a deadlock, if so, let's retry
if (e instanceof SQLTransactionRollbackException && deadlockCnt++ < 5) {
LOG.warn("Caught deadlock, retrying db creation");
prepDb(conf);
} else {
throw e;
}
} finally {
deadlockCnt = 0;
closeResources(conn, stmt, null);
}
}
use of java.sql.SQLTransactionRollbackException in project hive by apache.
the class TxnDbUtil method prepDb.
public static void prepDb() throws Exception {
// This is a bogus hack because it copies the contents of the SQL file
// intended for creating derby databases, and thus will inexorably get
// out of date with it. I'm open to any suggestions on how to make this
// read the file in a build friendly way.
Connection conn = null;
Statement stmt = null;
try {
conn = getConnection();
stmt = conn.createStatement();
stmt.execute("CREATE TABLE TXNS (" + " TXN_ID bigint PRIMARY KEY," + " TXN_STATE char(1) NOT NULL," + " TXN_STARTED bigint NOT NULL," + " TXN_LAST_HEARTBEAT bigint NOT NULL," + " TXN_USER varchar(128) NOT NULL," + " TXN_HOST varchar(128) NOT NULL)");
stmt.execute("CREATE TABLE TXN_COMPONENTS (" + " TC_TXNID bigint REFERENCES TXNS (TXN_ID)," + " TC_DATABASE varchar(128) NOT NULL," + " TC_TABLE varchar(128)," + " TC_PARTITION varchar(767)," + " TC_OPERATION_TYPE char(1) NOT NULL)");
stmt.execute("CREATE TABLE COMPLETED_TXN_COMPONENTS (" + " CTC_TXNID bigint," + " CTC_DATABASE varchar(128) NOT NULL," + " CTC_TABLE varchar(128)," + " CTC_PARTITION varchar(767))");
stmt.execute("CREATE TABLE NEXT_TXN_ID (" + " NTXN_NEXT bigint NOT NULL)");
stmt.execute("INSERT INTO NEXT_TXN_ID VALUES(1)");
stmt.execute("CREATE TABLE HIVE_LOCKS (" + " HL_LOCK_EXT_ID bigint NOT NULL," + " HL_LOCK_INT_ID bigint NOT NULL," + " HL_TXNID bigint," + " HL_DB varchar(128) NOT NULL," + " HL_TABLE varchar(128)," + " HL_PARTITION varchar(767)," + " HL_LOCK_STATE char(1) NOT NULL," + " HL_LOCK_TYPE char(1) NOT NULL," + " HL_LAST_HEARTBEAT bigint NOT NULL," + " HL_ACQUIRED_AT bigint," + " HL_USER varchar(128) NOT NULL," + " HL_HOST varchar(128) NOT NULL," + " HL_HEARTBEAT_COUNT integer," + " HL_AGENT_INFO varchar(128)," + " HL_BLOCKEDBY_EXT_ID bigint," + " HL_BLOCKEDBY_INT_ID bigint," + " PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID))");
stmt.execute("CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID)");
stmt.execute("CREATE TABLE NEXT_LOCK_ID (" + " NL_NEXT bigint NOT NULL)");
stmt.execute("INSERT INTO NEXT_LOCK_ID VALUES(1)");
stmt.execute("CREATE TABLE COMPACTION_QUEUE (" + " CQ_ID bigint PRIMARY KEY," + " CQ_DATABASE varchar(128) NOT NULL," + " CQ_TABLE varchar(128) NOT NULL," + " CQ_PARTITION varchar(767)," + " CQ_STATE char(1) NOT NULL," + " CQ_TYPE char(1) NOT NULL," + " CQ_TBLPROPERTIES varchar(2048)," + " CQ_WORKER_ID varchar(128)," + " CQ_START bigint," + " CQ_RUN_AS varchar(128)," + " CQ_HIGHEST_TXN_ID bigint," + " CQ_META_INFO varchar(2048) for bit data," + " CQ_HADOOP_JOB_ID varchar(32))");
stmt.execute("CREATE TABLE NEXT_COMPACTION_QUEUE_ID (NCQ_NEXT bigint NOT NULL)");
stmt.execute("INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1)");
stmt.execute("CREATE TABLE COMPLETED_COMPACTIONS (" + " CC_ID bigint PRIMARY KEY," + " CC_DATABASE varchar(128) NOT NULL," + " CC_TABLE varchar(128) NOT NULL," + " CC_PARTITION varchar(767)," + " CC_STATE char(1) NOT NULL," + " CC_TYPE char(1) NOT NULL," + " CC_TBLPROPERTIES varchar(2048)," + " CC_WORKER_ID varchar(128)," + " CC_START bigint," + " CC_END bigint," + " CC_RUN_AS varchar(128)," + " CC_HIGHEST_TXN_ID bigint," + " CC_META_INFO varchar(2048) for bit data," + " CC_HADOOP_JOB_ID varchar(32))");
stmt.execute("CREATE TABLE AUX_TABLE (" + " MT_KEY1 varchar(128) NOT NULL," + " MT_KEY2 bigint NOT NULL," + " MT_COMMENT varchar(255)," + " PRIMARY KEY(MT_KEY1, MT_KEY2))");
stmt.execute("CREATE TABLE WRITE_SET (" + " WS_DATABASE varchar(128) NOT NULL," + " WS_TABLE varchar(128) NOT NULL," + " WS_PARTITION varchar(767)," + " WS_TXNID bigint NOT NULL," + " WS_COMMIT_ID bigint NOT NULL," + " WS_OPERATION_TYPE char(1) NOT NULL)");
} catch (SQLException e) {
try {
conn.rollback();
} catch (SQLException re) {
LOG.error("Error rolling back: " + re.getMessage());
}
// This might be a deadlock, if so, let's retry
if (e instanceof SQLTransactionRollbackException && deadlockCnt++ < 5) {
LOG.warn("Caught deadlock, retrying db creation");
prepDb();
} else {
throw e;
}
} finally {
deadlockCnt = 0;
closeResources(conn, stmt, null);
}
}
Aggregations