Search in sources :

Example 1 with ShowCompactResponseElement

use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.

the class DDLTask method showCompactions.

private int showCompactions(Hive db, ShowCompactionsDesc desc) throws HiveException {
    // Call the metastore to get the status of all known compactions (completed get purged eventually)
    ShowCompactResponse rsp = db.showCompactions();
    // Write the results into the file
    final String noVal = " --- ";
    DataOutputStream os = getOutputStream(desc.getResFile());
    try {
        // Write a header
        os.writeBytes("CompactionId");
        os.write(separator);
        os.writeBytes("Database");
        os.write(separator);
        os.writeBytes("Table");
        os.write(separator);
        os.writeBytes("Partition");
        os.write(separator);
        os.writeBytes("Type");
        os.write(separator);
        os.writeBytes("State");
        os.write(separator);
        os.writeBytes("Worker");
        os.write(separator);
        os.writeBytes("Start Time");
        os.write(separator);
        os.writeBytes("Duration(ms)");
        os.write(separator);
        os.writeBytes("HadoopJobId");
        os.write(terminator);
        if (rsp.getCompacts() != null) {
            for (ShowCompactResponseElement e : rsp.getCompacts()) {
                os.writeBytes(Long.toString(e.getId()));
                os.write(separator);
                os.writeBytes(e.getDbname());
                os.write(separator);
                os.writeBytes(e.getTablename());
                os.write(separator);
                String part = e.getPartitionname();
                os.writeBytes(part == null ? noVal : part);
                os.write(separator);
                os.writeBytes(e.getType().toString());
                os.write(separator);
                os.writeBytes(e.getState());
                os.write(separator);
                String wid = e.getWorkerid();
                os.writeBytes(wid == null ? noVal : wid);
                os.write(separator);
                os.writeBytes(e.isSetStart() ? Long.toString(e.getStart()) : noVal);
                os.write(separator);
                os.writeBytes(e.isSetEndTime() ? Long.toString(e.getEndTime() - e.getStart()) : noVal);
                os.write(separator);
                os.writeBytes(e.isSetHadoopJobId() ? e.getHadoopJobId() : noVal);
                os.write(terminator);
            }
        }
    } catch (IOException e) {
        LOG.warn("show compactions: ", e);
        return 1;
    } finally {
        IOUtils.closeStream(os);
    }
    return 0;
}
Also used : ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) DataOutputStream(java.io.DataOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement)

Example 2 with ShowCompactResponseElement

use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.

the class DDLTask method compact.

private int compact(Hive db, AlterTableSimpleDesc desc) throws HiveException {
    Table tbl = db.getTable(desc.getTableName());
    if (!AcidUtils.isTransactionalTable(tbl)) {
        throw new HiveException(ErrorMsg.NONACID_COMPACTION_NOT_SUPPORTED, tbl.getDbName(), tbl.getTableName());
    }
    String partName = null;
    if (desc.getPartSpec() == null) {
        // Compaction can only be done on the whole table if the table is non-partitioned.
        if (tbl.isPartitioned()) {
            throw new HiveException(ErrorMsg.NO_COMPACTION_PARTITION);
        }
    } else {
        Map<String, String> partSpec = desc.getPartSpec();
        List<Partition> partitions = db.getPartitions(tbl, partSpec);
        if (partitions.size() > 1) {
            throw new HiveException(ErrorMsg.TOO_MANY_COMPACTION_PARTITIONS);
        } else if (partitions.size() == 0) {
            throw new HiveException(ErrorMsg.INVALID_PARTITION_SPEC);
        }
        partName = partitions.get(0).getName();
    }
    CompactionResponse resp = db.compact2(tbl.getDbName(), tbl.getTableName(), partName, desc.getCompactionType(), desc.getProps());
    if (resp.isAccepted()) {
        console.printInfo("Compaction enqueued with id " + resp.getId());
    } else {
        console.printInfo("Compaction already enqueued with id " + resp.getId() + "; State is " + resp.getState());
    }
    if (desc.isBlocking() && resp.isAccepted()) {
        StringBuilder progressDots = new StringBuilder();
        long waitTimeMs = 1000;
        wait: while (true) {
            // double wait time until 5min
            waitTimeMs = waitTimeMs * 2;
            waitTimeMs = waitTimeMs < 5 * 60 * 1000 ? waitTimeMs : 5 * 60 * 1000;
            try {
                Thread.sleep(waitTimeMs);
            } catch (InterruptedException ex) {
                console.printInfo("Interrupted while waiting for compaction with id=" + resp.getId());
                break;
            }
            // this could be expensive when there are a lot of compactions....
            // todo: update to search by ID once HIVE-13353 is done
            ShowCompactResponse allCompactions = db.showCompactions();
            for (ShowCompactResponseElement compaction : allCompactions.getCompacts()) {
                if (resp.getId() != compaction.getId()) {
                    continue;
                }
                switch(compaction.getState()) {
                    case TxnStore.WORKING_RESPONSE:
                    case TxnStore.INITIATED_RESPONSE:
                        // still working
                        console.printInfo(progressDots.toString());
                        progressDots.append(".");
                        continue wait;
                    default:
                        // done
                        console.printInfo("Compaction with id " + resp.getId() + " finished with status: " + compaction.getState());
                        break wait;
                }
            }
        }
    }
    return 0;
}
Also used : Partition(org.apache.hadoop.hive.ql.metadata.Partition) AlterTableExchangePartition(org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition) TextMetaDataTable(org.apache.hadoop.hive.ql.metadata.formatting.TextMetaDataTable) Table(org.apache.hadoop.hive.ql.metadata.Table) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) CompactionResponse(org.apache.hadoop.hive.metastore.api.CompactionResponse) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement)

Example 3 with ShowCompactResponseElement

use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.

the class TestCompactor method testCompactionOnDataLoadedInPath.

/**
 * Tests compaction of tables that were populated by LOAD DATA INPATH statements.
 *
 * In this scenario original ORC files are a structured in the following way:
 * comp3
 * |--delta_0000001_0000001_0000
 *    |--000000_0
 * |--delta_0000002_0000002_0000
 *    |--000000_0
 *    |--000001_0
 *
 * ..where comp3 table is not bucketed.
 *
 * @throws Exception
 */
@Test
public void testCompactionOnDataLoadedInPath() throws Exception {
    // Setup of LOAD INPATH scenario.
    executeStatementOnDriver("drop table if exists comp0", driver);
    executeStatementOnDriver("drop table if exists comp1", driver);
    executeStatementOnDriver("drop table if exists comp3", driver);
    executeStatementOnDriver("create external table comp0 (a string)", driver);
    executeStatementOnDriver("insert into comp0 values ('1111111111111')", driver);
    executeStatementOnDriver("insert into comp0 values ('2222222222222')", driver);
    executeStatementOnDriver("insert into comp0 values ('3333333333333')", driver);
    executeStatementOnDriver("create external table comp1 stored as orc as select * from comp0", driver);
    executeStatementOnDriver("create table comp3 (a string) stored as orc " + "TBLPROPERTIES ('transactional'='true')", driver);
    IMetaStoreClient hmsClient = new HiveMetaStoreClient(conf);
    Table table = hmsClient.getTable("default", "comp1");
    FileSystem fs = FileSystem.get(conf);
    Path path000 = fs.listStatus(new Path(table.getSd().getLocation()))[0].getPath();
    Path path001 = new Path(path000.toString().replace("000000", "000001"));
    Path path002 = new Path(path000.toString().replace("000000", "000002"));
    fs.copyFromLocalFile(path000, path001);
    fs.copyFromLocalFile(path000, path002);
    executeStatementOnDriver("load data inpath '" + path002.toString() + "' into table comp3", driver);
    executeStatementOnDriver("load data inpath '" + path002.getParent().toString() + "' into table comp3", driver);
    // Run compaction.
    TxnStore txnHandler = TxnUtils.getTxnStore(conf);
    CompactionRequest rqst = new CompactionRequest("default", "comp3", CompactionType.MAJOR);
    txnHandler.compact(rqst);
    runWorker(conf);
    ShowCompactRequest scRqst = new ShowCompactRequest();
    List<ShowCompactResponseElement> compacts = txnHandler.showCompact(scRqst).getCompacts();
    assertEquals(1, compacts.size());
    assertEquals(TxnStore.CLEANING_RESPONSE, compacts.get(0).getState());
    runCleaner(conf);
    compacts = txnHandler.showCompact(scRqst).getCompacts();
    assertEquals(1, compacts.size());
    assertEquals(TxnStore.SUCCEEDED_RESPONSE, compacts.get(0).getState());
    // Check compacted content and file structure.
    table = hmsClient.getTable("default", "comp3");
    List<String> rs = execSelectAndDumpData("select * from comp3", driver, "select");
    assertEquals(9, rs.size());
    assertEquals(3, rs.stream().filter(p -> "1111111111111".equals(p)).count());
    assertEquals(3, rs.stream().filter(p -> "2222222222222".equals(p)).count());
    assertEquals(3, rs.stream().filter(p -> "3333333333333".equals(p)).count());
    FileStatus[] files = fs.listStatus(new Path(table.getSd().getLocation()));
    // base dir
    assertEquals(1, files.length);
    assertEquals("base_0000002_v0000012", files[0].getPath().getName());
    files = fs.listStatus(files[0].getPath(), AcidUtils.bucketFileFilter);
    // files
    assertEquals(2, files.length);
    Arrays.stream(files).filter(p -> "bucket_00000".equals(p.getPath().getName())).count();
    Arrays.stream(files).filter(p -> "bucket_00001".equals(p.getPath().getName())).count();
    // Another insert into the newly compacted table.
    executeStatementOnDriver("insert into comp3 values ('4444444444444')", driver);
    // Compact with extra row too.
    txnHandler.compact(rqst);
    runWorker(conf);
    compacts = txnHandler.showCompact(scRqst).getCompacts();
    assertEquals(2, compacts.size());
    assertEquals(TxnStore.CLEANING_RESPONSE, compacts.get(0).getState());
    runCleaner(conf);
    compacts = txnHandler.showCompact(scRqst).getCompacts();
    assertEquals(2, compacts.size());
    assertEquals(TxnStore.SUCCEEDED_RESPONSE, compacts.get(0).getState());
    // Check compacted content and file structure.
    rs = execSelectAndDumpData("select * from comp3", driver, "select");
    assertEquals(10, rs.size());
    assertEquals(3, rs.stream().filter(p -> "1111111111111".equals(p)).count());
    assertEquals(3, rs.stream().filter(p -> "2222222222222".equals(p)).count());
    assertEquals(3, rs.stream().filter(p -> "3333333333333".equals(p)).count());
    assertEquals(1, rs.stream().filter(p -> "4444444444444".equals(p)).count());
    files = fs.listStatus(new Path(table.getSd().getLocation()));
    // base dir
    assertEquals(1, files.length);
    assertEquals("base_0000004_v0000016", files[0].getPath().getName());
    files = fs.listStatus(files[0].getPath(), AcidUtils.bucketFileFilter);
    // files
    assertEquals(2, files.length);
    Arrays.stream(files).filter(p -> "bucket_00000".equals(p.getPath().getName())).count();
    Arrays.stream(files).filter(p -> "bucket_00001".equals(p.getPath().getName())).count();
}
Also used : Path(org.apache.hadoop.fs.Path) DriverFactory(org.apache.hadoop.hive.ql.DriverFactory) OrcFile(org.apache.hadoop.hive.ql.io.orc.OrcFile) CompactionRequest(org.apache.hadoop.hive.metastore.api.CompactionRequest) Arrays(java.util.Arrays) SortedSet(java.util.SortedSet) TestTxnDbUtil(org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil) StreamingConnection(org.apache.hive.streaming.StreamingConnection) FileSystem(org.apache.hadoop.fs.FileSystem) HiveStreamingConnection(org.apache.hive.streaming.HiveStreamingConnection) ConfVars(org.apache.hadoop.hive.conf.HiveConf.ConfVars) LoggerFactory(org.slf4j.LoggerFactory) Random(java.util.Random) FileStatus(org.apache.hadoop.fs.FileStatus) CompactionType(org.apache.hadoop.hive.metastore.api.CompactionType) TestTxnCommands2.runWorker(org.apache.hadoop.hive.ql.TestTxnCommands2.runWorker) Mockito.doThrow(org.mockito.Mockito.doThrow) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) OrcConf(org.apache.orc.OrcConf) Mockito.doAnswer(org.mockito.Mockito.doAnswer) Map(java.util.Map) After(org.junit.After) Path(org.apache.hadoop.fs.Path) Reader(org.apache.hadoop.hive.ql.io.orc.Reader) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) FileUtil(org.apache.hadoop.fs.FileUtil) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) IDriver(org.apache.hadoop.hive.ql.IDriver) TxnStore(org.apache.hadoop.hive.metastore.txn.TxnStore) ColumnStatisticsObj(org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj) CompactionInfo(org.apache.hadoop.hive.metastore.txn.CompactionInfo) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) SessionState(org.apache.hadoop.hive.ql.session.SessionState) Retry(org.apache.hive.common.util.Retry) TxnUtils(org.apache.hadoop.hive.metastore.txn.TxnUtils) List(java.util.List) HCatUtil(org.apache.hive.hcatalog.common.HCatUtil) IMetaStoreClient(org.apache.hadoop.hive.metastore.IMetaStoreClient) StrictDelimitedInputWriter(org.apache.hive.streaming.StrictDelimitedInputWriter) RemoteIterator(org.apache.hadoop.fs.RemoteIterator) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils) VISIBILITY_PATTERN(org.apache.hadoop.hive.common.AcidConstants.VISIBILITY_PATTERN) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) HiveMetaStoreClient(org.apache.hadoop.hive.metastore.HiveMetaStoreClient) TestTxnCommands2.runCleaner(org.apache.hadoop.hive.ql.TestTxnCommands2.runCleaner) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) PathFilter(org.apache.hadoop.fs.PathFilter) TestTxnCommands2.runInitiator(org.apache.hadoop.hive.ql.TestTxnCommands2.runInitiator) HashMap(java.util.HashMap) Partition(org.apache.hadoop.hive.metastore.api.Partition) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) Lists(com.google.common.collect.Lists) Constants(org.apache.hadoop.hive.conf.Constants) Before(org.junit.Before) Hive(org.apache.hadoop.hive.ql.metadata.Hive) StreamingException(org.apache.hive.streaming.StreamingException) Logger(org.slf4j.Logger) FileWriter(java.io.FileWriter) HiveConf(org.apache.hadoop.hive.conf.HiveConf) Mockito.times(org.mockito.Mockito.times) IOException(java.io.IOException) Test(org.junit.Test) CliSessionState(org.apache.hadoop.hive.cli.CliSessionState) File(java.io.File) Table(org.apache.hadoop.hive.metastore.api.Table) Mockito.verify(org.mockito.Mockito.verify) TimeUnit(java.util.concurrent.TimeUnit) Mockito(org.mockito.Mockito) Rule(org.junit.Rule) FieldSetter(org.mockito.internal.util.reflection.FieldSetter) Assert(org.junit.Assert) Collections(java.util.Collections) HiveInputFormat(org.apache.hadoop.hive.ql.io.HiveInputFormat) Assert.assertEquals(org.junit.Assert.assertEquals) TemporaryFolder(org.junit.rules.TemporaryFolder) HiveMetaStoreClient(org.apache.hadoop.hive.metastore.HiveMetaStoreClient) Table(org.apache.hadoop.hive.metastore.api.Table) FileStatus(org.apache.hadoop.fs.FileStatus) IMetaStoreClient(org.apache.hadoop.hive.metastore.IMetaStoreClient) FileSystem(org.apache.hadoop.fs.FileSystem) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) TxnStore(org.apache.hadoop.hive.metastore.txn.TxnStore) CompactionRequest(org.apache.hadoop.hive.metastore.api.CompactionRequest) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Example 4 with ShowCompactResponseElement

use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.

the class TestCompactor method schemaEvolutionAddColDynamicPartitioningInsert.

/**
 * Simple schema evolution add columns with partitioning.
 *
 * @throws Exception
 */
@Test
public void schemaEvolutionAddColDynamicPartitioningInsert() throws Exception {
    String tblName = "dpct";
    executeStatementOnDriver("drop table if exists " + tblName, driver);
    executeStatementOnDriver("CREATE TABLE " + tblName + "(a INT, b STRING) " + " PARTITIONED BY(ds string)" + // currently ACID requires table to be bucketed
    " CLUSTERED BY(a) INTO 2 BUCKETS" + " STORED AS ORC TBLPROPERTIES ('transactional'='true')", driver);
    // First INSERT round.
    executeStatementOnDriver("insert into " + tblName + " partition (ds) values (1, 'fred', " + "'today'), (2, 'wilma', 'yesterday')", driver);
    // ALTER TABLE ... ADD COLUMNS
    executeStatementOnDriver("ALTER TABLE " + tblName + " ADD COLUMNS(c int)", driver);
    // Validate there is an added NULL for column c.
    executeStatementOnDriver("SELECT * FROM " + tblName + " ORDER BY a", driver);
    ArrayList<String> valuesReadFromHiveDriver = new ArrayList<String>();
    driver.getResults(valuesReadFromHiveDriver);
    Assert.assertEquals(2, valuesReadFromHiveDriver.size());
    Assert.assertEquals("1\tfred\tNULL\ttoday", valuesReadFromHiveDriver.get(0));
    Assert.assertEquals("2\twilma\tNULL\tyesterday", valuesReadFromHiveDriver.get(1));
    // Second INSERT round with new inserts into previously existing partition 'yesterday'.
    executeStatementOnDriver("insert into " + tblName + " partition (ds) values " + "(3, 'mark', 1900, 'soon'), (4, 'douglas', 1901, 'last_century'), " + "(5, 'doc', 1902, 'yesterday')", driver);
    // Validate there the new insertions for column c.
    executeStatementOnDriver("SELECT * FROM " + tblName + " ORDER BY a", driver);
    valuesReadFromHiveDriver = new ArrayList<String>();
    driver.getResults(valuesReadFromHiveDriver);
    Assert.assertEquals(5, valuesReadFromHiveDriver.size());
    Assert.assertEquals("1\tfred\tNULL\ttoday", valuesReadFromHiveDriver.get(0));
    Assert.assertEquals("2\twilma\tNULL\tyesterday", valuesReadFromHiveDriver.get(1));
    Assert.assertEquals("3\tmark\t1900\tsoon", valuesReadFromHiveDriver.get(2));
    Assert.assertEquals("4\tdouglas\t1901\tlast_century", valuesReadFromHiveDriver.get(3));
    Assert.assertEquals("5\tdoc\t1902\tyesterday", valuesReadFromHiveDriver.get(4));
    conf.setIntVar(HiveConf.ConfVars.HIVE_COMPACTOR_DELTA_NUM_THRESHOLD, 0);
    runInitiator(conf);
    TxnStore txnHandler = TxnUtils.getTxnStore(conf);
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
    Assert.assertEquals(4, compacts.size());
    SortedSet<String> partNames = new TreeSet<String>();
    verifyCompactions(compacts, partNames, tblName);
    List<String> names = new ArrayList<String>(partNames);
    Assert.assertEquals("ds=last_century", names.get(0));
    Assert.assertEquals("ds=soon", names.get(1));
    Assert.assertEquals("ds=today", names.get(2));
    Assert.assertEquals("ds=yesterday", names.get(3));
    // Validate after compaction.
    executeStatementOnDriver("SELECT * FROM " + tblName + " ORDER BY a", driver);
    valuesReadFromHiveDriver = new ArrayList<String>();
    driver.getResults(valuesReadFromHiveDriver);
    Assert.assertEquals(5, valuesReadFromHiveDriver.size());
    Assert.assertEquals("1\tfred\tNULL\ttoday", valuesReadFromHiveDriver.get(0));
    Assert.assertEquals("2\twilma\tNULL\tyesterday", valuesReadFromHiveDriver.get(1));
    Assert.assertEquals("3\tmark\t1900\tsoon", valuesReadFromHiveDriver.get(2));
    Assert.assertEquals("4\tdouglas\t1901\tlast_century", valuesReadFromHiveDriver.get(3));
    Assert.assertEquals("5\tdoc\t1902\tyesterday", valuesReadFromHiveDriver.get(4));
}
Also used : ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) TxnStore(org.apache.hadoop.hive.metastore.txn.TxnStore) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Example 5 with ShowCompactResponseElement

use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.

the class TestCompactor method verifyCompactions.

private void verifyCompactions(List<ShowCompactResponseElement> compacts, SortedSet<String> partNames, String tblName) {
    for (ShowCompactResponseElement compact : compacts) {
        Assert.assertEquals("default", compact.getDbname());
        Assert.assertEquals(tblName, compact.getTablename());
        Assert.assertEquals("initiated", compact.getState());
        partNames.add(compact.getPartitionname());
    }
}
Also used : ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement)

Aggregations

ShowCompactResponseElement (org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement)100 ShowCompactResponse (org.apache.hadoop.hive.metastore.api.ShowCompactResponse)87 Test (org.junit.Test)81 ShowCompactRequest (org.apache.hadoop.hive.metastore.api.ShowCompactRequest)79 Table (org.apache.hadoop.hive.metastore.api.Table)58 CompactionRequest (org.apache.hadoop.hive.metastore.api.CompactionRequest)46 ArrayList (java.util.ArrayList)42 Partition (org.apache.hadoop.hive.metastore.api.Partition)27 LockComponent (org.apache.hadoop.hive.metastore.api.LockComponent)26 LockRequest (org.apache.hadoop.hive.metastore.api.LockRequest)26 FileSystem (org.apache.hadoop.fs.FileSystem)25 LockResponse (org.apache.hadoop.hive.metastore.api.LockResponse)25 Path (org.apache.hadoop.fs.Path)24 FileStatus (org.apache.hadoop.fs.FileStatus)20 CommitTxnRequest (org.apache.hadoop.hive.metastore.api.CommitTxnRequest)17 TxnStore (org.apache.hadoop.hive.metastore.txn.TxnStore)16 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)11 CompactionInfo (org.apache.hadoop.hive.metastore.txn.CompactionInfo)11 IOException (java.io.IOException)9 List (java.util.List)9