Search in sources :

Example 26 with ShowCompactResponseElement

use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.

the class TestCompactionMetrics method testUpdateCompactionMetrics.

@Test
public void testUpdateCompactionMetrics() {
    ShowCompactResponse scr = new ShowCompactResponse();
    List<ShowCompactResponseElement> elements = new ArrayList<>();
    elements.add(generateElement(1, "db", "tb", null, CompactionType.MAJOR, TxnStore.FAILED_RESPONSE));
    // Check for overwrite
    elements.add(generateElement(2, "db", "tb", null, CompactionType.MAJOR, TxnStore.INITIATED_RESPONSE));
    elements.add(generateElement(3, "db", "tb2", null, CompactionType.MINOR, TxnStore.INITIATED_RESPONSE));
    elements.add(generateElement(5, "db", "tb3", "p1", CompactionType.MINOR, TxnStore.DID_NOT_INITIATE_RESPONSE));
    // Check for overwrite where the order is different
    elements.add(generateElement(4, "db", "tb3", "p1", CompactionType.MINOR, TxnStore.FAILED_RESPONSE));
    elements.add(generateElement(6, "db1", "tb", null, CompactionType.MINOR, TxnStore.FAILED_RESPONSE, System.currentTimeMillis(), true, "4.0.0", "4.0.0", 10));
    elements.add(generateElement(7, "db1", "tb2", null, CompactionType.MINOR, TxnStore.FAILED_RESPONSE));
    elements.add(generateElement(8, "db1", "tb3", null, CompactionType.MINOR, TxnStore.FAILED_RESPONSE));
    elements.add(generateElement(9, "db2", "tb", null, CompactionType.MINOR, TxnStore.SUCCEEDED_RESPONSE));
    elements.add(generateElement(10, "db2", "tb2", null, CompactionType.MINOR, TxnStore.SUCCEEDED_RESPONSE));
    elements.add(generateElement(11, "db2", "tb3", null, CompactionType.MINOR, TxnStore.SUCCEEDED_RESPONSE));
    elements.add(generateElement(12, "db2", "tb4", null, CompactionType.MINOR, TxnStore.SUCCEEDED_RESPONSE));
    elements.add(generateElement(13, "db3", "tb3", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE));
    // test null initiator version and worker version
    elements.add(generateElement(14, "db3", "tb4", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, System.currentTimeMillis(), false, null, null, 20));
    elements.add(generateElement(15, "db3", "tb5", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, System.currentTimeMillis(), true, "4.0.0", "4.0.0", 30));
    elements.add(generateElement(16, "db3", "tb6", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE));
    elements.add(generateElement(17, "db3", "tb7", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, System.currentTimeMillis(), true, "4.0.0", "4.0.0", 40));
    scr.setCompacts(elements);
    AcidMetricService.updateMetricsFromShowCompact(scr);
    Assert.assertEquals(1, Metrics.getOrCreateGauge(MetricsConstants.COMPACTION_STATUS_PREFIX + replaceWhitespace(TxnStore.DID_NOT_INITIATE_RESPONSE)).intValue());
    Assert.assertEquals(2, Metrics.getOrCreateGauge(MetricsConstants.COMPACTION_STATUS_PREFIX + TxnStore.INITIATED_RESPONSE).intValue());
    Assert.assertEquals(3, Metrics.getOrCreateGauge(MetricsConstants.COMPACTION_STATUS_PREFIX + TxnStore.FAILED_RESPONSE).intValue());
    Assert.assertEquals(4, Metrics.getOrCreateGauge(MetricsConstants.COMPACTION_STATUS_PREFIX + TxnStore.SUCCEEDED_RESPONSE).intValue());
    Assert.assertEquals(5, Metrics.getOrCreateGauge(MetricsConstants.COMPACTION_STATUS_PREFIX + TxnStore.WORKING_RESPONSE).intValue());
    Assert.assertEquals(0, Metrics.getOrCreateGauge(MetricsConstants.COMPACTION_STATUS_PREFIX + replaceWhitespace(TxnStore.CLEANING_RESPONSE)).intValue());
    Assert.assertEquals(1, Metrics.getOrCreateGauge(MetricsConstants.COMPACTION_NUM_INITIATORS).intValue());
    Assert.assertEquals(1, Metrics.getOrCreateGauge(MetricsConstants.COMPACTION_NUM_WORKERS).intValue());
    Assert.assertEquals(1, Metrics.getOrCreateGauge(MetricsConstants.COMPACTION_NUM_INITIATOR_VERSIONS).intValue());
    Assert.assertEquals(1, Metrics.getOrCreateGauge(MetricsConstants.COMPACTION_NUM_WORKER_VERSIONS).intValue());
}
Also used : ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) ArrayList(java.util.ArrayList) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Example 27 with ShowCompactResponseElement

use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.

the class TestCompactionMetrics method testInitiatedAgeMetrics.

@Test
public void testInitiatedAgeMetrics() {
    ShowCompactResponse scr = new ShowCompactResponse();
    long start = System.currentTimeMillis() - 1000L;
    List<ShowCompactResponseElement> elements = ImmutableList.of(generateElement(15, "db3", "tb5", null, CompactionType.MINOR, TxnStore.INITIATED_RESPONSE, start));
    scr.setCompacts(elements);
    AcidMetricService.updateMetricsFromShowCompact(scr);
    long diff = (System.currentTimeMillis() - start) / 1000;
    // Check that we have at least 1s old compaction age, but not more than expected
    int age = Metrics.getOrCreateGauge(MetricsConstants.COMPACTION_OLDEST_ENQUEUE_AGE).intValue();
    Assert.assertTrue(age <= diff);
    Assert.assertTrue(age >= 1);
}
Also used : ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Example 28 with ShowCompactResponseElement

use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.

the class TestWorker method majorTableLegacy.

@Test
public void majorTableLegacy() throws Exception {
    LOG.debug("Starting majorTableLegacy");
    Table t = newTable("default", "matl", false);
    addLegacyFile(t, null, 20);
    addDeltaFile(t, null, 21L, 22L, 2);
    addDeltaFile(t, null, 23L, 24L, 2);
    burnThroughTransactions("default", "matl", 25);
    CompactionRequest rqst = new CompactionRequest("default", "matl", CompactionType.MAJOR);
    txnHandler.compact(rqst);
    startWorker();
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
    Assert.assertEquals(1, compacts.size());
    Assert.assertEquals("ready for cleaning", compacts.get(0).getState());
    // There should still now be 5 directories in the location
    FileSystem fs = FileSystem.get(conf);
    FileStatus[] stat = fs.listStatus(new Path(t.getSd().getLocation()));
    // Assert.assertEquals(4, stat.length);
    // Find the new delta file and make sure it has the right contents
    boolean sawNewBase = false;
    for (int i = 0; i < stat.length; i++) {
        if (stat[i].getPath().getName().equals("base_0000024_v0000026")) {
            sawNewBase = true;
            FileStatus[] buckets = fs.listStatus(stat[i].getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER);
            Assert.assertEquals(2, buckets.length);
            Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]"));
            Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]"));
            Assert.assertEquals(624L, buckets[0].getLen());
            Assert.assertEquals(624L, buckets[1].getLen());
        } else {
            LOG.debug("This is not the file you are looking for " + stat[i].getPath().getName());
        }
    }
    Assert.assertTrue(toString(stat), sawNewBase);
}
Also used : Path(org.apache.hadoop.fs.Path) Table(org.apache.hadoop.hive.metastore.api.Table) FileStatus(org.apache.hadoop.fs.FileStatus) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) FileSystem(org.apache.hadoop.fs.FileSystem) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) CompactionRequest(org.apache.hadoop.hive.metastore.api.CompactionRequest) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Example 29 with ShowCompactResponseElement

use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.

the class TestWorker method minorPartitionWithBase.

@Test
public void minorPartitionWithBase() throws Exception {
    Table t = newTable("default", "mpwb", true);
    Partition p = newPartition(t, "today");
    addBaseFile(t, p, 20L, 20);
    addDeltaFile(t, p, 21L, 22L, 2);
    addDeltaFile(t, p, 23L, 24L, 2);
    burnThroughTransactions("default", "mpwb", 25);
    CompactionRequest rqst = new CompactionRequest("default", "mpwb", CompactionType.MINOR);
    rqst.setPartitionname("ds=today");
    txnHandler.compact(rqst);
    // this will create delta_20_24 and delete_delta_20_24. See MockRawReader
    startWorker();
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
    Assert.assertEquals(1, compacts.size());
    Assert.assertEquals("ready for cleaning", compacts.get(0).getState());
    // There should still be four directories in the location.
    FileSystem fs = FileSystem.get(conf);
    FileStatus[] stat = fs.listStatus(new Path(p.getSd().getLocation()));
    Assert.assertEquals(5, stat.length);
    // Find the new delta file and make sure it has the right contents
    boolean sawNewDelta = false;
    for (int i = 0; i < stat.length; i++) {
        if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(21, 24) + "_v0000026")) {
            sawNewDelta = true;
            FileStatus[] buckets = fs.listStatus(stat[i].getPath(), AcidUtils.hiddenFileFilter);
            Assert.assertEquals(2, buckets.length);
            Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]"));
            Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]"));
            Assert.assertEquals(104L, buckets[0].getLen());
            Assert.assertEquals(104L, buckets[1].getLen());
        }
        if (stat[i].getPath().getName().equals(makeDeleteDeltaDirNameCompacted(21, 24))) {
            sawNewDelta = true;
            FileStatus[] buckets = fs.listStatus(stat[i].getPath(), AcidUtils.hiddenFileFilter);
            Assert.assertEquals(2, buckets.length);
            Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]"));
            Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]"));
            Assert.assertEquals(104L, buckets[0].getLen());
            Assert.assertEquals(104L, buckets[1].getLen());
        } else {
            LOG.debug("This is not the delta file you are looking for " + stat[i].getPath().getName());
        }
    }
    Assert.assertTrue(toString(stat), sawNewDelta);
}
Also used : Path(org.apache.hadoop.fs.Path) Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) FileStatus(org.apache.hadoop.fs.FileStatus) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) FileSystem(org.apache.hadoop.fs.FileSystem) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) CompactionRequest(org.apache.hadoop.hive.metastore.api.CompactionRequest) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Example 30 with ShowCompactResponseElement

use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.

the class TestCleaner method delayedCleanupAfterMinorAndMajorCompaction.

@Test
public void delayedCleanupAfterMinorAndMajorCompaction() throws Exception {
    Table t = newTable("default", "dcamimcop", true);
    Partition p = newPartition(t, "today");
    addBaseFile(t, p, 20L, 20);
    addDeltaFile(t, p, 21L, 21L, 1);
    addDeltaFile(t, p, 22L, 22L, 1);
    burnThroughTransactions("default", "dcamimcop", 22);
    CompactionRequest rqst = new CompactionRequest("default", "dcamimcop", CompactionType.MINOR);
    rqst.setPartitionname("ds=today");
    compactInTxn(rqst);
    addDeltaFile(t, p, 21L, 22L, 2);
    // one more delta after compact
    addDeltaFile(t, p, 23L, 23L, 1);
    burnThroughTransactions("default", "dcamimcop", 1);
    conf.setBoolVar(HIVE_COMPACTOR_DELAYED_CLEANUP_ENABLED, true);
    conf.setTimeVar(HIVE_COMPACTOR_CLEANER_RETENTION_TIME, 5, TimeUnit.SECONDS);
    // putting current thread to sleep to get pass the retention time
    Thread.sleep(conf.getTimeVar(HIVE_COMPACTOR_CLEANER_RETENTION_TIME, TimeUnit.MILLISECONDS));
    rqst = new CompactionRequest("default", "dcamimcop", CompactionType.MAJOR);
    rqst.setPartitionname("ds=today");
    long compactTxn = compactInTxn(rqst);
    addBaseFile(t, p, 23L, 23, compactTxn);
    // This should clean the minor and leave the major, thus it should leave delta_23
    startCleaner();
    // Check there are no compactions requests left.
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    Assert.assertEquals(2, rsp.getCompactsSize());
    for (ShowCompactResponseElement c : rsp.getCompacts()) {
        if (c.getType() == CompactionType.MAJOR) {
            Assert.assertEquals(TxnStore.CLEANING_RESPONSE, c.getState());
        } else {
            Assert.assertEquals(TxnStore.SUCCEEDED_RESPONSE, c.getState());
        }
    }
    // Check that the files are removed
    List<Path> paths = getDirectories(conf, t, p);
    // base_20, minor delta, delta_23 and base_23
    Assert.assertEquals(4, paths.size());
    // putting current thread to sleep to get pass the retention time
    Thread.sleep(conf.getTimeVar(HIVE_COMPACTOR_CLEANER_RETENTION_TIME, TimeUnit.MILLISECONDS));
    startCleaner();
    // Check there are no compactions requests left.
    rsp = txnHandler.showCompact(new ShowCompactRequest());
    Assert.assertEquals(2, rsp.getCompactsSize());
    Assert.assertEquals(TxnStore.SUCCEEDED_RESPONSE, rsp.getCompacts().get(0).getState());
    Assert.assertEquals(TxnStore.SUCCEEDED_RESPONSE, rsp.getCompacts().get(1).getState());
    // Check that the files are removed
    paths = getDirectories(conf, t, p);
    Assert.assertEquals(1, paths.size());
    Assert.assertEquals("base_23_v25", paths.get(0).getName());
}
Also used : Path(org.apache.hadoop.fs.Path) Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) CompactionRequest(org.apache.hadoop.hive.metastore.api.CompactionRequest) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Aggregations

ShowCompactResponseElement (org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement)100 ShowCompactResponse (org.apache.hadoop.hive.metastore.api.ShowCompactResponse)87 Test (org.junit.Test)81 ShowCompactRequest (org.apache.hadoop.hive.metastore.api.ShowCompactRequest)79 Table (org.apache.hadoop.hive.metastore.api.Table)58 CompactionRequest (org.apache.hadoop.hive.metastore.api.CompactionRequest)46 ArrayList (java.util.ArrayList)42 Partition (org.apache.hadoop.hive.metastore.api.Partition)27 LockComponent (org.apache.hadoop.hive.metastore.api.LockComponent)26 LockRequest (org.apache.hadoop.hive.metastore.api.LockRequest)26 FileSystem (org.apache.hadoop.fs.FileSystem)25 LockResponse (org.apache.hadoop.hive.metastore.api.LockResponse)25 Path (org.apache.hadoop.fs.Path)24 FileStatus (org.apache.hadoop.fs.FileStatus)20 CommitTxnRequest (org.apache.hadoop.hive.metastore.api.CommitTxnRequest)17 TxnStore (org.apache.hadoop.hive.metastore.txn.TxnStore)16 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)11 CompactionInfo (org.apache.hadoop.hive.metastore.txn.CompactionInfo)11 IOException (java.io.IOException)9 List (java.util.List)9