Search in sources :

Example 11 with GetOpenTxnsResponse

use of org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse in project hive by apache.

the class TestTxnCommands2 method execDDLOpAndCompactionConcurrently.

private void execDDLOpAndCompactionConcurrently(String opType, boolean isPartioned) throws Exception {
    String tblName = "hive12352";
    String partName = "test";
    runStatementOnDriver("DROP TABLE if exists " + tblName);
    runStatementOnDriver("CREATE TABLE " + tblName + "(a INT, b STRING)" + (isPartioned ? "partitioned by (p STRING)" : "") + " STORED AS ORC  TBLPROPERTIES ( 'transactional'='true' )");
    // create some data
    runStatementOnDriver("INSERT INTO " + tblName + (isPartioned ? " PARTITION (p='" + partName + "')" : "") + " VALUES (1, 'foo'),(2, 'bar'),(3, 'baz')");
    runStatementOnDriver("UPDATE " + tblName + " SET b = 'blah' WHERE a = 3");
    // run Worker to execute compaction
    CompactionRequest req = new CompactionRequest("default", tblName, CompactionType.MAJOR);
    if (isPartioned) {
        req.setPartitionname("p=" + partName);
    }
    txnHandler.compact(req);
    CompactorMR compactorMr = Mockito.spy(new CompactorMR());
    Mockito.doAnswer((Answer<JobConf>) invocationOnMock -> {
        JobConf job = (JobConf) invocationOnMock.callRealMethod();
        job.setMapperClass(SlowCompactorMap.class);
        return job;
    }).when(compactorMr).createBaseJobConf(any(), any(), any(), any(), any(), any());
    Worker worker = Mockito.spy(new Worker());
    worker.setConf(hiveConf);
    worker.init(new AtomicBoolean(true));
    Mockito.doReturn(compactorMr).when(worker).getMrCompactor();
    CompletableFuture<Void> compactionJob = CompletableFuture.runAsync(worker);
    Thread.sleep(1000);
    int compHistory = 0;
    switch(opType) {
        case "DROP_TABLE":
            runStatementOnDriver("DROP TABLE " + tblName);
            runStatementOnDriver("CREATE TABLE " + tblName + "(a INT, b STRING) " + " STORED AS ORC  TBLPROPERTIES ( 'transactional'='true' )");
            break;
        case "TRUNCATE_TABLE":
            runStatementOnDriver("TRUNCATE TABLE " + tblName);
            compHistory = 1;
            break;
        case "DROP_PARTITION":
            {
                runStatementOnDriver("ALTER TABLE " + tblName + " DROP PARTITION (p='" + partName + "')");
                runStatementOnDriver("ALTER TABLE " + tblName + " ADD PARTITION (p='" + partName + "')");
                break;
            }
        case "TRUNCATE_PARTITION":
            {
                runStatementOnDriver("TRUNCATE TABLE " + tblName + " PARTITION (p='" + partName + "')");
                compHistory = 1;
                break;
            }
    }
    compactionJob.join();
    ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest());
    Assert.assertEquals("Unexpected number of compactions in history", compHistory, resp.getCompactsSize());
    if (compHistory != 0) {
        Assert.assertEquals("Unexpected 0th compaction state", TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(0).getState());
    }
    GetOpenTxnsResponse openResp = txnHandler.getOpenTxns();
    Assert.assertEquals(openResp.toString(), 0, openResp.getOpen_txnsSize());
    FileSystem fs = FileSystem.get(hiveConf);
    FileStatus[] status = fs.listStatus(new Path(getWarehouseDir() + "/" + tblName + (isPartioned ? "/p=" + partName : "")), FileUtils.HIDDEN_FILES_PATH_FILTER);
    Assert.assertEquals(0, status.length);
}
Also used : CompactionRequest(org.apache.hadoop.hive.metastore.api.CompactionRequest) Arrays(java.util.Arrays) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) TestTxnDbUtil(org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil) FileSystem(org.apache.hadoop.fs.FileSystem) LoggerFactory(org.slf4j.LoggerFactory) OpenTxnsResponse(org.apache.hadoop.hive.metastore.api.OpenTxnsResponse) Writable(org.apache.hadoop.io.Writable) FileStatus(org.apache.hadoop.fs.FileStatus) CompactionType(org.apache.hadoop.hive.metastore.api.CompactionType) BucketCodec(org.apache.hadoop.hive.ql.io.BucketCodec) CommitTxnRequest(org.apache.hadoop.hive.metastore.api.CommitTxnRequest) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) Map(java.util.Map) GetOpenTxnsResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse) Path(org.apache.hadoop.fs.Path) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) OpenTxnRequest(org.apache.hadoop.hive.metastore.api.OpenTxnRequest) TxnStore(org.apache.hadoop.hive.metastore.txn.TxnStore) Set(java.util.Set) CompactorMR(org.apache.hadoop.hive.ql.txn.compactor.CompactorMR) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) List(java.util.List) MetastoreConf(org.apache.hadoop.hive.metastore.conf.MetastoreConf) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) CommandProcessorException(org.apache.hadoop.hive.ql.processors.CommandProcessorException) FileUtils(org.apache.hadoop.hive.common.FileUtils) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) OrcFile(org.apache.orc.OrcFile) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) AcidOpenTxnsCounterService(org.apache.hadoop.hive.metastore.txn.AcidOpenTxnsCounterService) Reader(org.apache.orc.Reader) Answer(org.mockito.stubbing.Answer) AcidHouseKeeperService(org.apache.hadoop.hive.metastore.txn.AcidHouseKeeperService) ExpectedException(org.junit.rules.ExpectedException) TxnManagerFactory(org.apache.hadoop.hive.ql.lockmgr.TxnManagerFactory) Logger(org.slf4j.Logger) HiveConf(org.apache.hadoop.hive.conf.HiveConf) AcidOutputFormat(org.apache.hadoop.hive.ql.io.AcidOutputFormat) TypeDescription(org.apache.orc.TypeDescription) IOException(java.io.IOException) Test(org.junit.Test) Field(java.lang.reflect.Field) Worker(org.apache.hadoop.hive.ql.txn.compactor.Worker) File(java.io.File) TimeUnit(java.util.concurrent.TimeUnit) JobConf(org.apache.hadoop.mapred.JobConf) Mockito(org.mockito.Mockito) ValidTxnWriteIdList(org.apache.hadoop.hive.common.ValidTxnWriteIdList) Rule(org.junit.Rule) Ignore(org.junit.Ignore) Assert(org.junit.Assert) MetastoreTaskThread(org.apache.hadoop.hive.metastore.MetastoreTaskThread) Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) CompactorMR(org.apache.hadoop.hive.ql.txn.compactor.CompactorMR) FileSystem(org.apache.hadoop.fs.FileSystem) Worker(org.apache.hadoop.hive.ql.txn.compactor.Worker) GetOpenTxnsResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) CompactionRequest(org.apache.hadoop.hive.metastore.api.CompactionRequest) JobConf(org.apache.hadoop.mapred.JobConf)

Example 12 with GetOpenTxnsResponse

use of org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse in project hive by apache.

the class TestCompactionTxnHandler method testMarkCleanedCleansTxnsAndTxnComponents.

// TODO test changes to mark cleaned to clean txns and txn_components
@Test
public void testMarkCleanedCleansTxnsAndTxnComponents() throws Exception {
    long txnid = openTxn();
    long mytableWriteId = allocateTableWriteIds("mydb", "mytable", txnid);
    LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
    comp.setTablename("mytable");
    comp.setOperationType(DataOperationType.INSERT);
    List<LockComponent> components = new ArrayList<LockComponent>(1);
    components.add(comp);
    LockRequest req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnid);
    LockResponse res = txnHandler.lock(req);
    assertTrue(res.getState() == LockState.ACQUIRED);
    txnHandler.abortTxn(new AbortTxnRequest(txnid));
    txnid = openTxn();
    comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
    comp.setTablename("yourtable");
    comp.setOperationType(DataOperationType.DELETE);
    components = new ArrayList<LockComponent>(1);
    components.add(comp);
    req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnid);
    res = txnHandler.lock(req);
    assertTrue(res.getState() == LockState.ACQUIRED);
    txnHandler.abortTxn(new AbortTxnRequest(txnid));
    txnid = openTxn();
    long fooWriteId = allocateTableWriteIds("mydb", "foo", txnid);
    comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
    comp.setTablename("foo");
    comp.setPartitionname("bar=compact");
    comp.setOperationType(DataOperationType.UPDATE);
    components = new ArrayList<LockComponent>(1);
    components.add(comp);
    req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnid);
    res = txnHandler.lock(req);
    assertTrue(res.getState() == LockState.ACQUIRED);
    comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
    comp.setTablename("foo");
    comp.setPartitionname("baz=compact");
    comp.setOperationType(DataOperationType.UPDATE);
    components = new ArrayList<LockComponent>(1);
    components.add(comp);
    req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnid);
    res = txnHandler.lock(req);
    assertTrue(res.getState() == LockState.ACQUIRED);
    txnHandler.abortTxn(new AbortTxnRequest(txnid));
    CompactionInfo ci;
    // Now clean them and check that they are removed from the count.
    CompactionRequest rqst = new CompactionRequest("mydb", "mytable", CompactionType.MAJOR);
    txnHandler.compact(rqst);
    assertEquals(0, txnHandler.findReadyToClean(0, 0).size());
    ci = txnHandler.findNextToCompact(aFindNextCompactRequest("fred", WORKER_VERSION));
    assertNotNull(ci);
    ci.highestWriteId = mytableWriteId;
    txnHandler.updateCompactorState(ci, 0);
    txnHandler.markCompacted(ci);
    Thread.sleep(txnHandler.getOpenTxnTimeOutMillis());
    List<CompactionInfo> toClean = txnHandler.findReadyToClean(0, 0);
    assertEquals(1, toClean.size());
    txnHandler.markCleaned(ci);
    // Check that we are cleaning up the empty aborted transactions
    GetOpenTxnsResponse txnList = txnHandler.getOpenTxns();
    assertEquals(3, txnList.getOpen_txnsSize());
    // Create one aborted for low water mark
    txnid = openTxn();
    txnHandler.abortTxn(new AbortTxnRequest(txnid));
    Thread.sleep(txnHandler.getOpenTxnTimeOutMillis());
    txnHandler.cleanEmptyAbortedAndCommittedTxns();
    txnList = txnHandler.getOpenTxns();
    assertEquals(3, txnList.getOpen_txnsSize());
    rqst = new CompactionRequest("mydb", "foo", CompactionType.MAJOR);
    rqst.setPartitionname("bar");
    txnHandler.compact(rqst);
    assertEquals(0, txnHandler.findReadyToClean(0, 0).size());
    ci = txnHandler.findNextToCompact(aFindNextCompactRequest("fred", WORKER_VERSION));
    assertNotNull(ci);
    ci.highestWriteId = fooWriteId;
    txnHandler.updateCompactorState(ci, 0);
    txnHandler.markCompacted(ci);
    toClean = txnHandler.findReadyToClean(0, 0);
    assertEquals(1, toClean.size());
    txnHandler.markCleaned(ci);
    txnHandler.openTxns(new OpenTxnRequest(1, "me", "localhost"));
    // The open txn will became the low water mark
    Thread.sleep(txnHandler.getOpenTxnTimeOutMillis());
    txnHandler.setOpenTxnTimeOutMillis(1);
    txnHandler.cleanEmptyAbortedAndCommittedTxns();
    txnList = txnHandler.getOpenTxns();
    assertEquals(3, txnList.getOpen_txnsSize());
    txnHandler.setOpenTxnTimeOutMillis(1000);
}
Also used : LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) ArrayList(java.util.ArrayList) AbortTxnRequest(org.apache.hadoop.hive.metastore.api.AbortTxnRequest) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) GetOpenTxnsResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse) OpenTxnRequest(org.apache.hadoop.hive.metastore.api.OpenTxnRequest) CompactionRequest(org.apache.hadoop.hive.metastore.api.CompactionRequest) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) Test(org.junit.Test)

Example 13 with GetOpenTxnsResponse

use of org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse in project hive by apache.

the class TestTxnHandlerNoConnectionPool method testOpenTxn.

@Test
public void testOpenTxn() throws Exception {
    long first = openTxn();
    assertEquals(1L, first);
    long second = openTxn();
    assertEquals(2L, second);
    GetOpenTxnsInfoResponse txnsInfo = txnHandler.getOpenTxnsInfo();
    assertEquals(2L, txnsInfo.getTxn_high_water_mark());
    assertEquals(2, txnsInfo.getOpen_txns().size());
    assertEquals(1L, txnsInfo.getOpen_txns().get(0).getId());
    assertEquals(TxnState.OPEN, txnsInfo.getOpen_txns().get(0).getState());
    assertEquals(2L, txnsInfo.getOpen_txns().get(1).getId());
    assertEquals(TxnState.OPEN, txnsInfo.getOpen_txns().get(1).getState());
    assertEquals("me", txnsInfo.getOpen_txns().get(1).getUser());
    assertEquals("localhost", txnsInfo.getOpen_txns().get(1).getHostname());
    GetOpenTxnsResponse txns = txnHandler.getOpenTxns();
    assertEquals(2L, txns.getTxn_high_water_mark());
    assertEquals(2, txns.getOpen_txns().size());
    boolean[] saw = new boolean[3];
    for (int i = 0; i < saw.length; i++) saw[i] = false;
    for (Long tid : txns.getOpen_txns()) {
        saw[tid.intValue()] = true;
    }
    for (int i = 1; i < saw.length; i++) assertTrue(saw[i]);
}
Also used : GetOpenTxnsResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse) GetOpenTxnsInfoResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse) Test(org.junit.Test)

Example 14 with GetOpenTxnsResponse

use of org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse in project hive by apache.

the class TestTxnHandler method testValidTxnsNoneOpen.

@Test
public void testValidTxnsNoneOpen() throws Exception {
    txnHandler.openTxns(new OpenTxnRequest(2, "me", "localhost"));
    txnHandler.commitTxn(new CommitTxnRequest(1));
    txnHandler.commitTxn(new CommitTxnRequest(2));
    GetOpenTxnsInfoResponse txnsInfo = txnHandler.getOpenTxnsInfo();
    assertEquals(2L, txnsInfo.getTxn_high_water_mark());
    assertEquals(0, txnsInfo.getOpen_txns().size());
    GetOpenTxnsResponse txns = txnHandler.getOpenTxns();
    assertEquals(2L, txns.getTxn_high_water_mark());
    assertEquals(0, txns.getOpen_txns().size());
}
Also used : CommitTxnRequest(org.apache.hadoop.hive.metastore.api.CommitTxnRequest) GetOpenTxnsResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse) OpenTxnRequest(org.apache.hadoop.hive.metastore.api.OpenTxnRequest) GetOpenTxnsInfoResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse) Test(org.junit.Test)

Example 15 with GetOpenTxnsResponse

use of org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse in project hive by apache.

the class TestTxnHandler method testOpenTxn.

@Test
public void testOpenTxn() throws Exception {
    long first = openTxn();
    assertEquals(1L, first);
    long second = openTxn();
    assertEquals(2L, second);
    GetOpenTxnsInfoResponse txnsInfo = txnHandler.getOpenTxnsInfo();
    assertEquals(2L, txnsInfo.getTxn_high_water_mark());
    assertEquals(2, txnsInfo.getOpen_txns().size());
    assertEquals(1L, txnsInfo.getOpen_txns().get(0).getId());
    assertEquals(TxnState.OPEN, txnsInfo.getOpen_txns().get(0).getState());
    assertEquals(2L, txnsInfo.getOpen_txns().get(1).getId());
    assertEquals(TxnState.OPEN, txnsInfo.getOpen_txns().get(1).getState());
    assertEquals("me", txnsInfo.getOpen_txns().get(1).getUser());
    assertEquals("localhost", txnsInfo.getOpen_txns().get(1).getHostname());
    GetOpenTxnsResponse txns = txnHandler.getOpenTxns();
    assertEquals(2L, txns.getTxn_high_water_mark());
    assertEquals(2, txns.getOpen_txns().size());
    boolean[] saw = new boolean[3];
    for (int i = 0; i < saw.length; i++) saw[i] = false;
    for (Long tid : txns.getOpen_txns()) {
        saw[tid.intValue()] = true;
    }
    for (int i = 1; i < saw.length; i++) assertTrue(saw[i]);
}
Also used : GetOpenTxnsResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse) GetOpenTxnsInfoResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse) Test(org.junit.Test)

Aggregations

GetOpenTxnsResponse (org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse)20 Test (org.junit.Test)17 OpenTxnRequest (org.apache.hadoop.hive.metastore.api.OpenTxnRequest)11 ArrayList (java.util.ArrayList)7 CommitTxnRequest (org.apache.hadoop.hive.metastore.api.CommitTxnRequest)6 GetOpenTxnsInfoResponse (org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse)6 BitSet (java.util.BitSet)5 AbortTxnRequest (org.apache.hadoop.hive.metastore.api.AbortTxnRequest)4 OpenTxnsResponse (org.apache.hadoop.hive.metastore.api.OpenTxnsResponse)4 File (java.io.File)3 HashSet (java.util.HashSet)3 List (java.util.List)3 Set (java.util.Set)3 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)3 FileStatus (org.apache.hadoop.fs.FileStatus)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 Path (org.apache.hadoop.fs.Path)3 FileUtils (org.apache.hadoop.hive.common.FileUtils)3 HiveConf (org.apache.hadoop.hive.conf.HiveConf)3 ShowCompactRequest (org.apache.hadoop.hive.metastore.api.ShowCompactRequest)3