use of org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse in project hive by apache.
the class TestTxnCommands2 method execDDLOpAndCompactionConcurrently.
private void execDDLOpAndCompactionConcurrently(String opType, boolean isPartioned) throws Exception {
String tblName = "hive12352";
String partName = "test";
runStatementOnDriver("DROP TABLE if exists " + tblName);
runStatementOnDriver("CREATE TABLE " + tblName + "(a INT, b STRING)" + (isPartioned ? "partitioned by (p STRING)" : "") + " STORED AS ORC TBLPROPERTIES ( 'transactional'='true' )");
// create some data
runStatementOnDriver("INSERT INTO " + tblName + (isPartioned ? " PARTITION (p='" + partName + "')" : "") + " VALUES (1, 'foo'),(2, 'bar'),(3, 'baz')");
runStatementOnDriver("UPDATE " + tblName + " SET b = 'blah' WHERE a = 3");
// run Worker to execute compaction
CompactionRequest req = new CompactionRequest("default", tblName, CompactionType.MAJOR);
if (isPartioned) {
req.setPartitionname("p=" + partName);
}
txnHandler.compact(req);
CompactorMR compactorMr = Mockito.spy(new CompactorMR());
Mockito.doAnswer((Answer<JobConf>) invocationOnMock -> {
JobConf job = (JobConf) invocationOnMock.callRealMethod();
job.setMapperClass(SlowCompactorMap.class);
return job;
}).when(compactorMr).createBaseJobConf(any(), any(), any(), any(), any(), any());
Worker worker = Mockito.spy(new Worker());
worker.setConf(hiveConf);
worker.init(new AtomicBoolean(true));
Mockito.doReturn(compactorMr).when(worker).getMrCompactor();
CompletableFuture<Void> compactionJob = CompletableFuture.runAsync(worker);
Thread.sleep(1000);
int compHistory = 0;
switch(opType) {
case "DROP_TABLE":
runStatementOnDriver("DROP TABLE " + tblName);
runStatementOnDriver("CREATE TABLE " + tblName + "(a INT, b STRING) " + " STORED AS ORC TBLPROPERTIES ( 'transactional'='true' )");
break;
case "TRUNCATE_TABLE":
runStatementOnDriver("TRUNCATE TABLE " + tblName);
compHistory = 1;
break;
case "DROP_PARTITION":
{
runStatementOnDriver("ALTER TABLE " + tblName + " DROP PARTITION (p='" + partName + "')");
runStatementOnDriver("ALTER TABLE " + tblName + " ADD PARTITION (p='" + partName + "')");
break;
}
case "TRUNCATE_PARTITION":
{
runStatementOnDriver("TRUNCATE TABLE " + tblName + " PARTITION (p='" + partName + "')");
compHistory = 1;
break;
}
}
compactionJob.join();
ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals("Unexpected number of compactions in history", compHistory, resp.getCompactsSize());
if (compHistory != 0) {
Assert.assertEquals("Unexpected 0th compaction state", TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(0).getState());
}
GetOpenTxnsResponse openResp = txnHandler.getOpenTxns();
Assert.assertEquals(openResp.toString(), 0, openResp.getOpen_txnsSize());
FileSystem fs = FileSystem.get(hiveConf);
FileStatus[] status = fs.listStatus(new Path(getWarehouseDir() + "/" + tblName + (isPartioned ? "/p=" + partName : "")), FileUtils.HIDDEN_FILES_PATH_FILTER);
Assert.assertEquals(0, status.length);
}
use of org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse in project hive by apache.
the class TestCompactionTxnHandler method testMarkCleanedCleansTxnsAndTxnComponents.
// TODO test changes to mark cleaned to clean txns and txn_components
@Test
public void testMarkCleanedCleansTxnsAndTxnComponents() throws Exception {
long txnid = openTxn();
long mytableWriteId = allocateTableWriteIds("mydb", "mytable", txnid);
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setOperationType(DataOperationType.INSERT);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
txnHandler.abortTxn(new AbortTxnRequest(txnid));
txnid = openTxn();
comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setTablename("yourtable");
comp.setOperationType(DataOperationType.DELETE);
components = new ArrayList<LockComponent>(1);
components.add(comp);
req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
txnHandler.abortTxn(new AbortTxnRequest(txnid));
txnid = openTxn();
long fooWriteId = allocateTableWriteIds("mydb", "foo", txnid);
comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setTablename("foo");
comp.setPartitionname("bar=compact");
comp.setOperationType(DataOperationType.UPDATE);
components = new ArrayList<LockComponent>(1);
components.add(comp);
req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setTablename("foo");
comp.setPartitionname("baz=compact");
comp.setOperationType(DataOperationType.UPDATE);
components = new ArrayList<LockComponent>(1);
components.add(comp);
req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
txnHandler.abortTxn(new AbortTxnRequest(txnid));
CompactionInfo ci;
// Now clean them and check that they are removed from the count.
CompactionRequest rqst = new CompactionRequest("mydb", "mytable", CompactionType.MAJOR);
txnHandler.compact(rqst);
assertEquals(0, txnHandler.findReadyToClean(0, 0).size());
ci = txnHandler.findNextToCompact(aFindNextCompactRequest("fred", WORKER_VERSION));
assertNotNull(ci);
ci.highestWriteId = mytableWriteId;
txnHandler.updateCompactorState(ci, 0);
txnHandler.markCompacted(ci);
Thread.sleep(txnHandler.getOpenTxnTimeOutMillis());
List<CompactionInfo> toClean = txnHandler.findReadyToClean(0, 0);
assertEquals(1, toClean.size());
txnHandler.markCleaned(ci);
// Check that we are cleaning up the empty aborted transactions
GetOpenTxnsResponse txnList = txnHandler.getOpenTxns();
assertEquals(3, txnList.getOpen_txnsSize());
// Create one aborted for low water mark
txnid = openTxn();
txnHandler.abortTxn(new AbortTxnRequest(txnid));
Thread.sleep(txnHandler.getOpenTxnTimeOutMillis());
txnHandler.cleanEmptyAbortedAndCommittedTxns();
txnList = txnHandler.getOpenTxns();
assertEquals(3, txnList.getOpen_txnsSize());
rqst = new CompactionRequest("mydb", "foo", CompactionType.MAJOR);
rqst.setPartitionname("bar");
txnHandler.compact(rqst);
assertEquals(0, txnHandler.findReadyToClean(0, 0).size());
ci = txnHandler.findNextToCompact(aFindNextCompactRequest("fred", WORKER_VERSION));
assertNotNull(ci);
ci.highestWriteId = fooWriteId;
txnHandler.updateCompactorState(ci, 0);
txnHandler.markCompacted(ci);
toClean = txnHandler.findReadyToClean(0, 0);
assertEquals(1, toClean.size());
txnHandler.markCleaned(ci);
txnHandler.openTxns(new OpenTxnRequest(1, "me", "localhost"));
// The open txn will became the low water mark
Thread.sleep(txnHandler.getOpenTxnTimeOutMillis());
txnHandler.setOpenTxnTimeOutMillis(1);
txnHandler.cleanEmptyAbortedAndCommittedTxns();
txnList = txnHandler.getOpenTxns();
assertEquals(3, txnList.getOpen_txnsSize());
txnHandler.setOpenTxnTimeOutMillis(1000);
}
use of org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse in project hive by apache.
the class TestTxnHandlerNoConnectionPool method testOpenTxn.
@Test
public void testOpenTxn() throws Exception {
long first = openTxn();
assertEquals(1L, first);
long second = openTxn();
assertEquals(2L, second);
GetOpenTxnsInfoResponse txnsInfo = txnHandler.getOpenTxnsInfo();
assertEquals(2L, txnsInfo.getTxn_high_water_mark());
assertEquals(2, txnsInfo.getOpen_txns().size());
assertEquals(1L, txnsInfo.getOpen_txns().get(0).getId());
assertEquals(TxnState.OPEN, txnsInfo.getOpen_txns().get(0).getState());
assertEquals(2L, txnsInfo.getOpen_txns().get(1).getId());
assertEquals(TxnState.OPEN, txnsInfo.getOpen_txns().get(1).getState());
assertEquals("me", txnsInfo.getOpen_txns().get(1).getUser());
assertEquals("localhost", txnsInfo.getOpen_txns().get(1).getHostname());
GetOpenTxnsResponse txns = txnHandler.getOpenTxns();
assertEquals(2L, txns.getTxn_high_water_mark());
assertEquals(2, txns.getOpen_txns().size());
boolean[] saw = new boolean[3];
for (int i = 0; i < saw.length; i++) saw[i] = false;
for (Long tid : txns.getOpen_txns()) {
saw[tid.intValue()] = true;
}
for (int i = 1; i < saw.length; i++) assertTrue(saw[i]);
}
use of org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse in project hive by apache.
the class TestTxnHandler method testValidTxnsNoneOpen.
@Test
public void testValidTxnsNoneOpen() throws Exception {
txnHandler.openTxns(new OpenTxnRequest(2, "me", "localhost"));
txnHandler.commitTxn(new CommitTxnRequest(1));
txnHandler.commitTxn(new CommitTxnRequest(2));
GetOpenTxnsInfoResponse txnsInfo = txnHandler.getOpenTxnsInfo();
assertEquals(2L, txnsInfo.getTxn_high_water_mark());
assertEquals(0, txnsInfo.getOpen_txns().size());
GetOpenTxnsResponse txns = txnHandler.getOpenTxns();
assertEquals(2L, txns.getTxn_high_water_mark());
assertEquals(0, txns.getOpen_txns().size());
}
use of org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse in project hive by apache.
the class TestTxnHandler method testOpenTxn.
@Test
public void testOpenTxn() throws Exception {
long first = openTxn();
assertEquals(1L, first);
long second = openTxn();
assertEquals(2L, second);
GetOpenTxnsInfoResponse txnsInfo = txnHandler.getOpenTxnsInfo();
assertEquals(2L, txnsInfo.getTxn_high_water_mark());
assertEquals(2, txnsInfo.getOpen_txns().size());
assertEquals(1L, txnsInfo.getOpen_txns().get(0).getId());
assertEquals(TxnState.OPEN, txnsInfo.getOpen_txns().get(0).getState());
assertEquals(2L, txnsInfo.getOpen_txns().get(1).getId());
assertEquals(TxnState.OPEN, txnsInfo.getOpen_txns().get(1).getState());
assertEquals("me", txnsInfo.getOpen_txns().get(1).getUser());
assertEquals("localhost", txnsInfo.getOpen_txns().get(1).getHostname());
GetOpenTxnsResponse txns = txnHandler.getOpenTxns();
assertEquals(2L, txns.getTxn_high_water_mark());
assertEquals(2, txns.getOpen_txns().size());
boolean[] saw = new boolean[3];
for (int i = 0; i < saw.length; i++) saw[i] = false;
for (Long tid : txns.getOpen_txns()) {
saw[tid.intValue()] = true;
}
for (int i = 1; i < saw.length; i++) assertTrue(saw[i]);
}
Aggregations