use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.
the class ReplTxnTask method execute.
@Override
public int execute() {
String replPolicy = work.getReplPolicy();
String tableName = work.getTableName();
ReplicationSpec replicationSpec = work.getReplicationSpec();
if ((tableName != null) && (replicationSpec != null)) {
try {
Database database = Hive.get().getDatabase(work.getDbName());
if (!replicationSpec.allowReplacementInto(database.getParameters())) {
// if the event is already replayed, then no need to replay it again.
LOG.debug("ReplTxnTask: Event is skipped as it is already replayed. Event Id: " + replicationSpec.getReplicationState() + "Event Type: " + work.getOperationType());
return 0;
}
} catch (HiveException e1) {
LOG.error("Get database failed with exception " + e1.getMessage());
return 1;
}
}
try {
HiveTxnManager txnManager = context.getHiveTxnManager();
String user = UserGroupInformation.getCurrentUser().getUserName();
switch(work.getOperationType()) {
case REPL_OPEN_TXN:
List<Long> txnIds = txnManager.replOpenTxn(replPolicy, work.getTxnIds(), user);
assert txnIds.size() == work.getTxnIds().size();
LOG.info("Replayed OpenTxn Event for policy " + replPolicy + " with srcTxn " + work.getTxnIds().toString() + " and target txn id " + txnIds.toString());
return 0;
case REPL_ABORT_TXN:
for (long txnId : work.getTxnIds()) {
txnManager.replRollbackTxn(replPolicy, txnId);
LOG.info("Replayed AbortTxn Event for policy " + replPolicy + " with srcTxn " + txnId);
}
return 0;
case REPL_COMMIT_TXN:
// Currently only one commit txn per event is supported.
assert (work.getTxnIds().size() == 1);
long txnId = work.getTxnIds().get(0);
CommitTxnRequest commitTxnRequest = new CommitTxnRequest(txnId);
commitTxnRequest.setReplPolicy(work.getReplPolicy());
commitTxnRequest.setWriteEventInfos(work.getWriteEventInfos());
commitTxnRequest.setTxn_type(TxnType.REPL_CREATED);
txnManager.replCommitTxn(commitTxnRequest);
LOG.info("Replayed CommitTxn Event for replPolicy: " + replPolicy + " with srcTxn: " + txnId + "WriteEventInfos: " + work.getWriteEventInfos());
return 0;
case REPL_ALLOC_WRITE_ID:
assert work.getTxnToWriteIdList() != null;
String dbName = work.getDbName();
List<TxnToWriteId> txnToWriteIdList = work.getTxnToWriteIdList();
txnManager.replAllocateTableWriteIdsBatch(dbName, tableName, replPolicy, txnToWriteIdList);
LOG.info("Replayed alloc write Id Event for repl policy: " + replPolicy + " db Name : " + dbName + " txnToWriteIdList: " + txnToWriteIdList.toString() + " table name: " + tableName);
return 0;
case REPL_WRITEID_STATE:
txnManager.replTableWriteIdState(work.getValidWriteIdList(), work.getDbName(), tableName, work.getPartNames());
LOG.info("Replicated WriteId state for DbName: " + work.getDbName() + " TableName: " + tableName + " ValidWriteIdList: " + work.getValidWriteIdList());
return 0;
default:
LOG.error("Operation Type " + work.getOperationType() + " is not supported ");
return 1;
}
} catch (Exception e) {
console.printError("Failed with exception " + e.getMessage(), "\n" + StringUtils.stringifyException(e));
setException(e);
LOG.error("ReplTxnTask failed", e);
return ReplUtils.handleException(true, e, work.getDumpDirectory(), work.getMetricCollector(), getName(), conf);
}
}
use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.
the class TestTxnHandler method testAbortTxn.
@Test
public void testAbortTxn() throws Exception {
OpenTxnsResponse openedTxns = txnHandler.openTxns(new OpenTxnRequest(3, "me", "localhost"));
List<Long> txnList = openedTxns.getTxn_ids();
long first = txnList.get(0);
assertEquals(1L, first);
long second = txnList.get(1);
assertEquals(2L, second);
txnHandler.abortTxn(new AbortTxnRequest(1));
List<String> parts = new ArrayList<String>();
parts.add("p=1");
AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest("default", "T");
rqst.setTxnIds(Collections.singletonList(3L));
AllocateTableWriteIdsResponse writeIds = txnHandler.allocateTableWriteIds(rqst);
long writeId = writeIds.getTxnToWriteIds().get(0).getWriteId();
assertEquals(3, writeIds.getTxnToWriteIds().get(0).getTxnId());
assertEquals(1, writeId);
AddDynamicPartitions adp = new AddDynamicPartitions(3, writeId, "default", "T", parts);
adp.setOperationType(DataOperationType.INSERT);
txnHandler.addDynamicPartitions(adp);
GetOpenTxnsInfoResponse txnsInfo = txnHandler.getOpenTxnsInfo();
assertEquals(3, txnsInfo.getTxn_high_water_mark());
assertEquals(3, txnsInfo.getOpen_txns().size());
assertEquals(1L, txnsInfo.getOpen_txns().get(0).getId());
assertEquals(TxnState.ABORTED, txnsInfo.getOpen_txns().get(0).getState());
assertEquals(2L, txnsInfo.getOpen_txns().get(1).getId());
assertEquals(TxnState.OPEN, txnsInfo.getOpen_txns().get(1).getState());
assertEquals(3, txnsInfo.getOpen_txns().get(2).getId());
assertEquals(TxnState.OPEN, txnsInfo.getOpen_txns().get(2).getState());
GetOpenTxnsResponse txns = txnHandler.getOpenTxns();
assertEquals(3, txns.getTxn_high_water_mark());
assertEquals(3, txns.getOpen_txns().size());
boolean[] saw = new boolean[4];
for (int i = 0; i < saw.length; i++) saw[i] = false;
for (Long tid : txns.getOpen_txns()) {
saw[tid.intValue()] = true;
}
for (int i = 1; i < saw.length; i++) assertTrue(saw[i]);
txnHandler.commitTxn(new CommitTxnRequest(2));
// this succeeds as abortTxn is idempotent
txnHandler.abortTxn(new AbortTxnRequest(1));
boolean gotException = false;
try {
txnHandler.abortTxn(new AbortTxnRequest(2));
} catch (NoSuchTxnException ex) {
gotException = true;
// this is the last committed, so it is still in the txns table
Assert.assertEquals("Transaction " + JavaUtils.txnIdToString(2) + " is already committed.", ex.getMessage());
}
Assert.assertTrue(gotException);
gotException = false;
txnHandler.commitTxn(new CommitTxnRequest(3));
try {
txnHandler.abortTxn(new AbortTxnRequest(3));
} catch (NoSuchTxnException ex) {
gotException = true;
// txn 3 is not empty txn, so we get a better msg
Assert.assertEquals("Transaction " + JavaUtils.txnIdToString(3) + " is already committed.", ex.getMessage());
}
Assert.assertTrue(gotException);
txnHandler.setOpenTxnTimeOutMillis(1);
txnHandler.cleanEmptyAbortedAndCommittedTxns();
txnHandler.setOpenTxnTimeOutMillis(1000);
gotException = false;
try {
txnHandler.abortTxn(new AbortTxnRequest(2));
} catch (NoSuchTxnException ex) {
gotException = true;
// now the second transaction is cleared and since it was empty, we do not recognize it anymore
Assert.assertEquals("No such transaction " + JavaUtils.txnIdToString(2), ex.getMessage());
}
Assert.assertTrue(gotException);
gotException = false;
try {
txnHandler.abortTxn(new AbortTxnRequest(4));
} catch (NoSuchTxnException ex) {
gotException = true;
Assert.assertEquals("No such transaction " + JavaUtils.txnIdToString(4), ex.getMessage());
}
Assert.assertTrue(gotException);
}
use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.
the class TestTxnHandler method heartbeatTxnRangeOneCommitted.
@Test
public void heartbeatTxnRangeOneCommitted() throws Exception {
long txnid = openTxn();
assertEquals(1, txnid);
txnHandler.commitTxn(new CommitTxnRequest(1));
txnid = openTxn();
txnid = openTxn();
HeartbeatTxnRangeResponse rsp = txnHandler.heartbeatTxnRange(new HeartbeatTxnRangeRequest(1, 3));
assertEquals(1, rsp.getNosuchSize());
Long txn = rsp.getNosuch().iterator().next();
assertEquals(1L, (long) txn);
assertEquals(0, rsp.getAborted().size());
}
use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.
the class TestCompactionTxnHandler method testFindPotentialCompactions.
@Test
public void testFindPotentialCompactions() throws Exception {
// Test that committing unlocks
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setTablename("yourtable");
comp.setPartitionname("mypartition=myvalue");
comp.setOperationType(DataOperationType.UPDATE);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
assertEquals(0, txnHandler.numLocksInLockTable());
Set<CompactionInfo> potentials = txnHandler.findPotentialCompactions(100, -1L);
assertEquals(2, potentials.size());
boolean sawMyTable = false, sawYourTable = false;
for (CompactionInfo ci : potentials) {
sawMyTable |= (ci.dbname.equals("mydb") && ci.tableName.equals("mytable") && ci.partName == null);
sawYourTable |= (ci.dbname.equals("mydb") && ci.tableName.equals("yourtable") && ci.partName.equals("mypartition=myvalue"));
}
assertTrue(sawMyTable);
assertTrue(sawYourTable);
potentials = txnHandler.findPotentialCompactions(100, -1, 1);
assertEquals(2, potentials.size());
// simulate auto-compaction interval
TimeUnit.SECONDS.sleep(2);
potentials = txnHandler.findPotentialCompactions(100, -1, 1);
assertEquals(0, potentials.size());
// simulate prev failed compaction
CompactionRequest rqst = new CompactionRequest("mydb", "mytable", CompactionType.MINOR);
txnHandler.compact(rqst);
CompactionInfo ci = txnHandler.findNextToCompact(aFindNextCompactRequest("fred", WORKER_VERSION));
txnHandler.markFailed(ci);
potentials = txnHandler.findPotentialCompactions(100, -1, 1);
assertEquals(1, potentials.size());
}
use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.
the class TestCompactionTxnHandler method addDynamicPartitions.
@Test
public void addDynamicPartitions() throws Exception {
String dbName = "default";
String tableName = "adp_table";
OpenTxnsResponse openTxns = txnHandler.openTxns(new OpenTxnRequest(1, "me", "localhost"));
long txnId = openTxns.getTxn_ids().get(0);
AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest(dbName, tableName);
rqst.setTxnIds(openTxns.getTxn_ids());
AllocateTableWriteIdsResponse writeIds = txnHandler.allocateTableWriteIds(rqst);
long writeId = writeIds.getTxnToWriteIds().get(0).getWriteId();
assertEquals(txnId, writeIds.getTxnToWriteIds().get(0).getTxnId());
assertEquals(1, writeId);
// lock a table, as in dynamic partitions
LockComponent lc = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, dbName);
lc.setIsDynamicPartitionWrite(true);
lc.setTablename(tableName);
DataOperationType dop = DataOperationType.UPDATE;
lc.setOperationType(dop);
LockRequest lr = new LockRequest(Arrays.asList(lc), "me", "localhost");
lr.setTxnid(txnId);
LockResponse lock = txnHandler.lock(lr);
assertEquals(LockState.ACQUIRED, lock.getState());
AddDynamicPartitions adp = new AddDynamicPartitions(txnId, writeId, dbName, tableName, Arrays.asList("ds=yesterday", "ds=today"));
adp.setOperationType(dop);
txnHandler.addDynamicPartitions(adp);
txnHandler.commitTxn(new CommitTxnRequest(txnId));
Set<CompactionInfo> potentials = txnHandler.findPotentialCompactions(1000, -1L);
assertEquals(2, potentials.size());
SortedSet<CompactionInfo> sorted = new TreeSet<CompactionInfo>(potentials);
int i = 0;
for (CompactionInfo ci : sorted) {
assertEquals(dbName, ci.dbname);
assertEquals(tableName, ci.tableName);
switch(i++) {
case 0:
assertEquals("ds=today", ci.partName);
break;
case 1:
assertEquals("ds=yesterday", ci.partName);
break;
default:
throw new RuntimeException("What?");
}
}
}
Aggregations