use of org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest in project hive by apache.
the class TestTxnHandler method testAbortTxn.
@Test
public void testAbortTxn() throws Exception {
OpenTxnsResponse openedTxns = txnHandler.openTxns(new OpenTxnRequest(3, "me", "localhost"));
List<Long> txnList = openedTxns.getTxn_ids();
long first = txnList.get(0);
assertEquals(1L, first);
long second = txnList.get(1);
assertEquals(2L, second);
txnHandler.abortTxn(new AbortTxnRequest(1));
List<String> parts = new ArrayList<String>();
parts.add("p=1");
AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest("default", "T");
rqst.setTxnIds(Collections.singletonList(3L));
AllocateTableWriteIdsResponse writeIds = txnHandler.allocateTableWriteIds(rqst);
long writeId = writeIds.getTxnToWriteIds().get(0).getWriteId();
assertEquals(3, writeIds.getTxnToWriteIds().get(0).getTxnId());
assertEquals(1, writeId);
AddDynamicPartitions adp = new AddDynamicPartitions(3, writeId, "default", "T", parts);
adp.setOperationType(DataOperationType.INSERT);
txnHandler.addDynamicPartitions(adp);
GetOpenTxnsInfoResponse txnsInfo = txnHandler.getOpenTxnsInfo();
assertEquals(3, txnsInfo.getTxn_high_water_mark());
assertEquals(3, txnsInfo.getOpen_txns().size());
assertEquals(1L, txnsInfo.getOpen_txns().get(0).getId());
assertEquals(TxnState.ABORTED, txnsInfo.getOpen_txns().get(0).getState());
assertEquals(2L, txnsInfo.getOpen_txns().get(1).getId());
assertEquals(TxnState.OPEN, txnsInfo.getOpen_txns().get(1).getState());
assertEquals(3, txnsInfo.getOpen_txns().get(2).getId());
assertEquals(TxnState.OPEN, txnsInfo.getOpen_txns().get(2).getState());
GetOpenTxnsResponse txns = txnHandler.getOpenTxns();
assertEquals(3, txns.getTxn_high_water_mark());
assertEquals(3, txns.getOpen_txns().size());
boolean[] saw = new boolean[4];
for (int i = 0; i < saw.length; i++) saw[i] = false;
for (Long tid : txns.getOpen_txns()) {
saw[tid.intValue()] = true;
}
for (int i = 1; i < saw.length; i++) assertTrue(saw[i]);
txnHandler.commitTxn(new CommitTxnRequest(2));
// this succeeds as abortTxn is idempotent
txnHandler.abortTxn(new AbortTxnRequest(1));
boolean gotException = false;
try {
txnHandler.abortTxn(new AbortTxnRequest(2));
} catch (NoSuchTxnException ex) {
gotException = true;
// this is the last committed, so it is still in the txns table
Assert.assertEquals("Transaction " + JavaUtils.txnIdToString(2) + " is already committed.", ex.getMessage());
}
Assert.assertTrue(gotException);
gotException = false;
txnHandler.commitTxn(new CommitTxnRequest(3));
try {
txnHandler.abortTxn(new AbortTxnRequest(3));
} catch (NoSuchTxnException ex) {
gotException = true;
// txn 3 is not empty txn, so we get a better msg
Assert.assertEquals("Transaction " + JavaUtils.txnIdToString(3) + " is already committed.", ex.getMessage());
}
Assert.assertTrue(gotException);
txnHandler.setOpenTxnTimeOutMillis(1);
txnHandler.cleanEmptyAbortedAndCommittedTxns();
txnHandler.setOpenTxnTimeOutMillis(1000);
gotException = false;
try {
txnHandler.abortTxn(new AbortTxnRequest(2));
} catch (NoSuchTxnException ex) {
gotException = true;
// now the second transaction is cleared and since it was empty, we do not recognize it anymore
Assert.assertEquals("No such transaction " + JavaUtils.txnIdToString(2), ex.getMessage());
}
Assert.assertTrue(gotException);
gotException = false;
try {
txnHandler.abortTxn(new AbortTxnRequest(4));
} catch (NoSuchTxnException ex) {
gotException = true;
Assert.assertEquals("No such transaction " + JavaUtils.txnIdToString(4), ex.getMessage());
}
Assert.assertTrue(gotException);
}
use of org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest in project hive by apache.
the class TestTxnHandler method testReplAllocWriteId.
@Test
public void testReplAllocWriteId() throws Exception {
int numTxn = 2;
String[] output = TestTxnDbUtil.queryToString(conf, "SELECT MAX(\"TXN_ID\") + 1 FROM \"TXNS\"").split("\n");
long startTxnId = Long.parseLong(output[1].trim());
List<Long> srcTxnIdList = LongStream.rangeClosed(startTxnId, numTxn + startTxnId - 1).boxed().collect(Collectors.toList());
List<Long> targetTxnList = replOpenTxnForTest(startTxnId, numTxn, "destdb.*");
assert (targetTxnList.size() == numTxn);
List<TxnToWriteId> srcTxnToWriteId;
List<TxnToWriteId> targetTxnToWriteId;
srcTxnToWriteId = new ArrayList<>();
for (int idx = 0; idx < numTxn; idx++) {
srcTxnToWriteId.add(new TxnToWriteId(startTxnId + idx, idx + 1));
}
AllocateTableWriteIdsRequest allocMsg = new AllocateTableWriteIdsRequest("destdb", "tbl1");
allocMsg.setReplPolicy("destdb.*");
allocMsg.setSrcTxnToWriteIdList(srcTxnToWriteId);
targetTxnToWriteId = txnHandler.allocateTableWriteIds(allocMsg).getTxnToWriteIds();
for (int idx = 0; idx < targetTxnList.size(); idx++) {
assertEquals(targetTxnToWriteId.get(idx).getWriteId(), srcTxnToWriteId.get(idx).getWriteId());
assertEquals(Long.valueOf(targetTxnToWriteId.get(idx).getTxnId()), targetTxnList.get(idx));
}
// idempotent case for destdb db
targetTxnToWriteId = txnHandler.allocateTableWriteIds(allocMsg).getTxnToWriteIds();
for (int idx = 0; idx < targetTxnList.size(); idx++) {
assertEquals(targetTxnToWriteId.get(idx).getWriteId(), srcTxnToWriteId.get(idx).getWriteId());
assertEquals(Long.valueOf(targetTxnToWriteId.get(idx).getTxnId()), targetTxnList.get(idx));
}
// invalid case
boolean failed = false;
srcTxnToWriteId = new ArrayList<>();
srcTxnToWriteId.add(new TxnToWriteId(startTxnId, 2 * numTxn + 1));
allocMsg = new AllocateTableWriteIdsRequest("destdb", "tbl2");
allocMsg.setReplPolicy("destdb.*");
allocMsg.setSrcTxnToWriteIdList(srcTxnToWriteId);
// the next write id.
try {
txnHandler.allocateTableWriteIds(allocMsg).getTxnToWriteIds();
} catch (IllegalStateException e) {
failed = true;
}
assertFalse(failed);
replAbortTxnForTest(srcTxnIdList, "destdb.*");
// Test for aborted transactions. Idempotent case where allocate write id when txn is already
// aborted should do nothing.
failed = false;
try {
txnHandler.allocateTableWriteIds(allocMsg).getTxnToWriteIds();
} catch (RuntimeException e) {
failed = true;
}
assertFalse(failed);
}
use of org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest in project hive by apache.
the class TestCompactionTxnHandler method allocateTableWriteIds.
private long allocateTableWriteIds(String dbName, String tblName, long txnid) throws Exception {
AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest(dbName, tblName);
rqst.setTxnIds(Collections.singletonList(txnid));
AllocateTableWriteIdsResponse writeIds = txnHandler.allocateTableWriteIds(rqst);
return writeIds.getTxnToWriteIds().get(0).getWriteId();
}
use of org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest in project hive by apache.
the class TestCompactionTxnHandler method addDynamicPartitions.
@Test
public void addDynamicPartitions() throws Exception {
String dbName = "default";
String tableName = "adp_table";
OpenTxnsResponse openTxns = txnHandler.openTxns(new OpenTxnRequest(1, "me", "localhost"));
long txnId = openTxns.getTxn_ids().get(0);
AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest(dbName, tableName);
rqst.setTxnIds(openTxns.getTxn_ids());
AllocateTableWriteIdsResponse writeIds = txnHandler.allocateTableWriteIds(rqst);
long writeId = writeIds.getTxnToWriteIds().get(0).getWriteId();
assertEquals(txnId, writeIds.getTxnToWriteIds().get(0).getTxnId());
assertEquals(1, writeId);
// lock a table, as in dynamic partitions
LockComponent lc = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, dbName);
lc.setIsDynamicPartitionWrite(true);
lc.setTablename(tableName);
DataOperationType dop = DataOperationType.UPDATE;
lc.setOperationType(dop);
LockRequest lr = new LockRequest(Arrays.asList(lc), "me", "localhost");
lr.setTxnid(txnId);
LockResponse lock = txnHandler.lock(lr);
assertEquals(LockState.ACQUIRED, lock.getState());
AddDynamicPartitions adp = new AddDynamicPartitions(txnId, writeId, dbName, tableName, Arrays.asList("ds=yesterday", "ds=today"));
adp.setOperationType(dop);
txnHandler.addDynamicPartitions(adp);
txnHandler.commitTxn(new CommitTxnRequest(txnId));
Set<CompactionInfo> potentials = txnHandler.findPotentialCompactions(1000, -1L);
assertEquals(2, potentials.size());
SortedSet<CompactionInfo> sorted = new TreeSet<CompactionInfo>(potentials);
int i = 0;
for (CompactionInfo ci : sorted) {
assertEquals(dbName, ci.dbname);
assertEquals(tableName, ci.tableName);
switch(i++) {
case 0:
assertEquals("ds=today", ci.partName);
break;
case 1:
assertEquals("ds=yesterday", ci.partName);
break;
default:
throw new RuntimeException("What?");
}
}
}
use of org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest in project hive by apache.
the class HMSClient method allocateTableWriteIds.
boolean allocateTableWriteIds(String dbName, String tableName, List<Long> openTxns) throws TException {
AllocateTableWriteIdsRequest awiRqst = new AllocateTableWriteIdsRequest(dbName, tableName);
openTxns.forEach(t -> {
awiRqst.addToTxnIds(t);
});
client.allocate_table_write_ids(awiRqst);
return true;
}
Aggregations