use of org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse in project hive by apache.
the class TestTxnHandler method testRecoverManyTimeouts.
@Test
public void testRecoverManyTimeouts() throws Exception {
long timeout = txnHandler.setTimeout(1);
try {
txnHandler.openTxns(new OpenTxnRequest(503, "me", "localhost"));
Thread.sleep(1000);
txnHandler.performTimeOuts();
GetOpenTxnsInfoResponse rsp = txnHandler.getOpenTxnsInfo();
int numAborted = 0;
for (TxnInfo txnInfo : rsp.getOpen_txns()) {
assertEquals(TxnState.ABORTED, txnInfo.getState());
numAborted++;
}
assertEquals(503, numAborted);
} finally {
txnHandler.setTimeout(timeout);
}
}
use of org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse in project hive by apache.
the class TestTxnHandler method testReplTimeouts.
@Test
public void testReplTimeouts() throws Exception {
createDatabaseForReplTests("default", MetaStoreUtils.getDefaultCatalog(conf));
long timeout = txnHandler.setTimeout(1);
try {
OpenTxnRequest request = new OpenTxnRequest(3, "me", "localhost");
OpenTxnsResponse response = txnHandler.openTxns(request);
request.setReplPolicy("default.*");
request.setReplSrcTxnIds(response.getTxn_ids());
request.setTxn_type(TxnType.REPL_CREATED);
OpenTxnsResponse responseRepl = txnHandler.openTxns(request);
Thread.sleep(1000);
txnHandler.performTimeOuts();
GetOpenTxnsInfoResponse rsp = txnHandler.getOpenTxnsInfo();
int numAborted = 0;
int numOpen = 0;
for (TxnInfo txnInfo : rsp.getOpen_txns()) {
if (TxnState.ABORTED == txnInfo.getState()) {
assertTrue(response.getTxn_ids().contains(txnInfo.getId()));
numAborted++;
}
if (TxnState.OPEN == txnInfo.getState()) {
assertTrue(responseRepl.getTxn_ids().contains(txnInfo.getId()));
numOpen++;
}
}
assertEquals(3, numAborted);
assertEquals(3, numOpen);
} finally {
txnHandler.setTimeout(timeout);
}
}
use of org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse in project hive by apache.
the class TestDbTxnManager method testExceptions.
@Test
public void testExceptions() throws Exception {
addPartitionOutput(newTable(true), WriteEntity.WriteType.INSERT);
QueryPlan qp = new MockQueryPlan(this, HiveOperation.QUERY);
((DbTxnManager) txnMgr).openTxn(ctx, "NicholasII", TxnType.DEFAULT, HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS) * 2);
Thread.sleep(HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS));
runReaper();
LockException exception = null;
try {
txnMgr.commitTxn();
} catch (LockException ex) {
exception = ex;
}
Assert.assertNotNull("Expected exception1", exception);
Assert.assertEquals("Wrong Exception1", ErrorMsg.TXN_ABORTED, exception.getCanonicalErrorMsg());
exception = null;
((DbTxnManager) txnMgr).openTxn(ctx, "AlexanderIII", TxnType.DEFAULT, HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS) * 2);
Thread.sleep(HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS));
// this will abort the txn
runReaper();
TxnStore txnHandler = TxnUtils.getTxnStore(conf);
GetOpenTxnsInfoResponse txnsInfo = txnHandler.getOpenTxnsInfo();
assertEquals(2, txnsInfo.getTxn_high_water_mark());
assertEquals(2, txnsInfo.getOpen_txns().size());
Assert.assertEquals(TxnState.ABORTED, txnsInfo.getOpen_txns().get(1).getState());
// this is idempotent
txnMgr.rollbackTxn();
}
use of org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse in project hive by apache.
the class TestStreaming method testErrorHandling.
@Test
public void testErrorHandling() throws Exception {
String agentInfo = "UT_" + Thread.currentThread().getName();
runCmdOnDriver("create database testErrors");
runCmdOnDriver("use testErrors");
runCmdOnDriver("create table T(a int, b int) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
HiveEndPoint endPt = new HiveEndPoint(metaStoreURI, "testErrors", "T", null);
StreamingConnection connection = endPt.newConnection(false, agentInfo);
DelimitedInputWriter innerWriter = new DelimitedInputWriter("a,b".split(","), ",", endPt, connection);
FaultyWriter writer = new FaultyWriter(innerWriter);
TransactionBatch txnBatch = connection.fetchTransactionBatch(2, writer);
txnBatch.close();
// this is no-op on closed batch
txnBatch.heartbeat();
// ditto
txnBatch.abort();
GetOpenTxnsInfoResponse r = msClient.showTxns();
Assert.assertEquals("HWM didn't match", 17, r.getTxn_high_water_mark());
List<TxnInfo> ti = r.getOpen_txns();
Assert.assertEquals("wrong status ti(0)", TxnState.ABORTED, ti.get(0).getState());
Assert.assertEquals("wrong status ti(1)", TxnState.ABORTED, ti.get(1).getState());
Exception expectedEx = null;
try {
txnBatch.beginNextTransaction();
} catch (IllegalStateException ex) {
expectedEx = ex;
}
Assert.assertTrue("beginNextTransaction() should have failed", expectedEx != null && expectedEx.getMessage().contains("has been closed()"));
expectedEx = null;
try {
txnBatch.write("name0,1,Hello streaming".getBytes());
} catch (IllegalStateException ex) {
expectedEx = ex;
}
Assert.assertTrue("write() should have failed", expectedEx != null && expectedEx.getMessage().contains("has been closed()"));
expectedEx = null;
try {
txnBatch.commit();
} catch (IllegalStateException ex) {
expectedEx = ex;
}
Assert.assertTrue("commit() should have failed", expectedEx != null && expectedEx.getMessage().contains("has been closed()"));
txnBatch = connection.fetchTransactionBatch(2, writer);
txnBatch.beginNextTransaction();
txnBatch.write("name2,2,Welcome to streaming".getBytes());
txnBatch.write("name4,2,more Streaming unlimited".getBytes());
txnBatch.write("name5,2,even more Streaming unlimited".getBytes());
txnBatch.commit();
// test toString()
String s = txnBatch.toString();
Assert.assertTrue("Actual: " + s, s.contains("LastUsed " + JavaUtils.txnIdToString(txnBatch.getCurrentTxnId())));
Assert.assertTrue("Actual: " + s, s.contains("TxnStatus[CO]"));
expectedEx = null;
txnBatch.beginNextTransaction();
writer.enableErrors();
try {
txnBatch.write("name6,2,Doh!".getBytes());
} catch (StreamingIOFailure ex) {
expectedEx = ex;
txnBatch.getCurrentTransactionState();
// test it doesn't throw ArrayIndexOutOfBounds...
txnBatch.getCurrentTxnId();
}
Assert.assertTrue("Wrong exception: " + (expectedEx != null ? expectedEx.getMessage() : "?"), expectedEx != null && expectedEx.getMessage().contains("Simulated fault occurred"));
expectedEx = null;
try {
txnBatch.commit();
} catch (IllegalStateException ex) {
expectedEx = ex;
}
Assert.assertTrue("commit() should have failed", expectedEx != null && expectedEx.getMessage().contains("has been closed()"));
// test toString()
s = txnBatch.toString();
Assert.assertTrue("Actual: " + s, s.contains("LastUsed " + JavaUtils.txnIdToString(txnBatch.getCurrentTxnId())));
Assert.assertTrue("Actual: " + s, s.contains("TxnStatus[CA]"));
r = msClient.showTxns();
Assert.assertEquals("HWM didn't match", 19, r.getTxn_high_water_mark());
ti = r.getOpen_txns();
Assert.assertEquals("wrong status ti(0)", TxnState.ABORTED, ti.get(0).getState());
Assert.assertEquals("wrong status ti(1)", TxnState.ABORTED, ti.get(1).getState());
// txnid 3 was committed and thus not open
Assert.assertEquals("wrong status ti(2)", TxnState.ABORTED, ti.get(2).getState());
writer.disableErrors();
txnBatch = connection.fetchTransactionBatch(2, writer);
txnBatch.beginNextTransaction();
txnBatch.write("name2,2,Welcome to streaming".getBytes());
writer.enableErrors();
expectedEx = null;
try {
txnBatch.commit();
} catch (StreamingIOFailure ex) {
expectedEx = ex;
}
Assert.assertTrue("Wrong exception: " + (expectedEx != null ? expectedEx.getMessage() : "?"), expectedEx != null && expectedEx.getMessage().contains("Simulated fault occurred"));
r = msClient.showTxns();
Assert.assertEquals("HWM didn't match", 21, r.getTxn_high_water_mark());
ti = r.getOpen_txns();
Assert.assertEquals("wrong status ti(3)", TxnState.ABORTED, ti.get(3).getState());
Assert.assertEquals("wrong status ti(4)", TxnState.ABORTED, ti.get(4).getState());
txnBatch.abort();
}
use of org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse in project hive by apache.
the class TxnHandler method getOpenTxnsInfo.
@Override
@RetrySemantics.ReadOnly
public GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException {
try {
// We need to figure out the current transaction number and the list of
// open transactions. To avoid needing a transaction on the underlying
// database we'll look at the current transaction number first. If it
// subsequently shows up in the open list that's ok.
Connection dbConn = null;
Statement stmt = null;
ResultSet rs = null;
try {
/**
* This method can run at READ_COMMITTED as long as long as
* {@link #openTxns(org.apache.hadoop.hive.metastore.api.OpenTxnRequest)} is atomic.
* More specifically, as long as advancing TransactionID in NEXT_TXN_ID is atomic with
* adding corresponding entries into TXNS. The reason is that any txnid below HWM
* is either in TXNS and thus considered open (Open/Aborted) or it's considered Committed.
*/
dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
stmt = dbConn.createStatement();
String s = "select ntxn_next - 1 from NEXT_TXN_ID";
LOG.debug("Going to execute query <" + s + ">");
rs = stmt.executeQuery(s);
if (!rs.next()) {
throw new MetaException("Transaction tables not properly " + "initialized, no record found in next_txn_id");
}
long hwm = rs.getLong(1);
if (rs.wasNull()) {
throw new MetaException("Transaction tables not properly " + "initialized, null record found in next_txn_id");
}
close(rs);
List<TxnInfo> txnInfos = new ArrayList<>();
// need the WHERE clause below to ensure consistent results with READ_COMMITTED
s = "select txn_id, txn_state, txn_user, txn_host, txn_started, txn_last_heartbeat from " + "TXNS where txn_id <= " + hwm;
LOG.debug("Going to execute query<" + s + ">");
rs = stmt.executeQuery(s);
while (rs.next()) {
char c = rs.getString(2).charAt(0);
TxnState state;
switch(c) {
case TXN_ABORTED:
state = TxnState.ABORTED;
break;
case TXN_OPEN:
state = TxnState.OPEN;
break;
default:
throw new MetaException("Unexpected transaction state " + c + " found in txns table");
}
TxnInfo txnInfo = new TxnInfo(rs.getLong(1), state, rs.getString(3), rs.getString(4));
txnInfo.setStartedTime(rs.getLong(5));
txnInfo.setLastHeartbeatTime(rs.getLong(6));
txnInfos.add(txnInfo);
}
LOG.debug("Going to rollback");
dbConn.rollback();
return new GetOpenTxnsInfoResponse(hwm, txnInfos);
} catch (SQLException e) {
LOG.debug("Going to rollback");
rollbackDBConn(dbConn);
checkRetryable(dbConn, e, "getOpenTxnsInfo");
throw new MetaException("Unable to select from transaction database: " + getMessage(e) + StringUtils.stringifyException(e));
} finally {
close(rs, stmt, dbConn);
}
} catch (RetryException e) {
return getOpenTxnsInfo();
}
}
Aggregations