Search in sources :

Example 46 with CompactionRequest

use of org.apache.hadoop.hive.metastore.api.CompactionRequest in project hive by apache.

the class TestWorker method majorWithOpenInMiddle.

@Test
public void majorWithOpenInMiddle() throws Exception {
    LOG.debug("Starting majorWithOpenInMiddle");
    Table t = newTable("default", "mtwb", false);
    addBaseFile(t, null, 20L, 20);
    addDeltaFile(t, null, 21L, 22L, 2);
    addDeltaFile(t, null, 23L, 25L, 3);
    addLengthFile(t, null, 23L, 25L, 3);
    addDeltaFile(t, null, 26L, 27L, 2);
    burnThroughTransactions(27, new HashSet<Long>(Arrays.asList(23L)), null);
    CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MAJOR);
    txnHandler.compact(rqst);
    startWorker();
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
    Assert.assertEquals(1, compacts.size());
    Assert.assertEquals("ready for cleaning", compacts.get(0).getState());
    // There should still now be 5 directories in the location
    FileSystem fs = FileSystem.get(conf);
    FileStatus[] stat = fs.listStatus(new Path(t.getSd().getLocation()));
    Assert.assertEquals(5, stat.length);
    // Find the new delta file and make sure it has the right contents
    Arrays.sort(stat);
    Assert.assertEquals("base_0000022", stat[0].getPath().getName());
    Assert.assertEquals("base_20", stat[1].getPath().getName());
    Assert.assertEquals(makeDeltaDirName(21, 22), stat[2].getPath().getName());
    Assert.assertEquals(makeDeltaDirName(23, 25), stat[3].getPath().getName());
    Assert.assertEquals(makeDeltaDirName(26, 27), stat[4].getPath().getName());
}
Also used : Path(org.apache.hadoop.fs.Path) Table(org.apache.hadoop.hive.metastore.api.Table) FileStatus(org.apache.hadoop.fs.FileStatus) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) FileSystem(org.apache.hadoop.fs.FileSystem) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) CompactionRequest(org.apache.hadoop.hive.metastore.api.CompactionRequest) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Example 47 with CompactionRequest

use of org.apache.hadoop.hive.metastore.api.CompactionRequest in project hive by apache.

the class TestWorker method droppedPartition.

@Test
public void droppedPartition() throws Exception {
    Table t = newTable("default", "dp", true);
    Partition p = newPartition(t, "today");
    addBaseFile(t, p, 20L, 20);
    addDeltaFile(t, p, 21L, 22L, 2);
    addDeltaFile(t, p, 23L, 24L, 2);
    burnThroughTransactions(25);
    CompactionRequest rqst = new CompactionRequest("default", "dp", CompactionType.MINOR);
    rqst.setPartitionname("ds=today");
    txnHandler.compact(rqst);
    ms.dropPartition("default", "dp", Collections.singletonList("today"), true);
    startWorker();
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
    Assert.assertEquals(1, compacts.size());
    Assert.assertTrue(TxnStore.SUCCEEDED_RESPONSE.equals(rsp.getCompacts().get(0).getState()));
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) CompactionRequest(org.apache.hadoop.hive.metastore.api.CompactionRequest) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Example 48 with CompactionRequest

use of org.apache.hadoop.hive.metastore.api.CompactionRequest in project hive by apache.

the class TestWorker method majorTableNoBase.

@Test
public void majorTableNoBase() throws Exception {
    LOG.debug("Starting majorTableNoBase");
    Table t = newTable("default", "matnb", false);
    addDeltaFile(t, null, 1L, 2L, 2);
    addDeltaFile(t, null, 3L, 4L, 2);
    burnThroughTransactions(4);
    CompactionRequest rqst = new CompactionRequest("default", "matnb", CompactionType.MAJOR);
    txnHandler.compact(rqst);
    startWorker();
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
    Assert.assertEquals(1, compacts.size());
    Assert.assertEquals("ready for cleaning", compacts.get(0).getState());
    // There should now be 3 directories in the location
    FileSystem fs = FileSystem.get(conf);
    FileStatus[] stat = fs.listStatus(new Path(t.getSd().getLocation()));
    Assert.assertEquals(3, stat.length);
    // Find the new delta file and make sure it has the right contents
    boolean sawNewBase = false;
    for (int i = 0; i < stat.length; i++) {
        if (stat[i].getPath().getName().equals("base_0000004")) {
            sawNewBase = true;
            FileStatus[] buckets = fs.listStatus(stat[i].getPath());
            Assert.assertEquals(2, buckets.length);
            Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]"));
            Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]"));
            Assert.assertEquals(104L, buckets[0].getLen());
            Assert.assertEquals(104L, buckets[1].getLen());
        } else {
            LOG.debug("This is not the file you are looking for " + stat[i].getPath().getName());
        }
    }
    Assert.assertTrue(sawNewBase);
}
Also used : Path(org.apache.hadoop.fs.Path) Table(org.apache.hadoop.hive.metastore.api.Table) FileStatus(org.apache.hadoop.fs.FileStatus) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) FileSystem(org.apache.hadoop.fs.FileSystem) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) CompactionRequest(org.apache.hadoop.hive.metastore.api.CompactionRequest) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Example 49 with CompactionRequest

use of org.apache.hadoop.hive.metastore.api.CompactionRequest in project hive by apache.

the class TestWorker method droppedTable.

@Test
public void droppedTable() throws Exception {
    Table t = newTable("default", "dt", false);
    addDeltaFile(t, null, 1L, 2L, 2);
    addDeltaFile(t, null, 3L, 4L, 2);
    burnThroughTransactions(4);
    CompactionRequest rqst = new CompactionRequest("default", "dt", CompactionType.MAJOR);
    txnHandler.compact(rqst);
    ms.dropTable("default", "dt");
    startWorker();
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
    Assert.assertEquals(1, compacts.size());
    Assert.assertTrue(TxnStore.SUCCEEDED_RESPONSE.equals(compacts.get(0).getState()));
}
Also used : Table(org.apache.hadoop.hive.metastore.api.Table) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) CompactionRequest(org.apache.hadoop.hive.metastore.api.CompactionRequest) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Example 50 with CompactionRequest

use of org.apache.hadoop.hive.metastore.api.CompactionRequest in project hive by apache.

the class TestWorker method minorWithOpenInMiddle.

/**
   * todo: fix https://issues.apache.org/jira/browse/HIVE-9995
   * @throws Exception
   */
@Test
public void minorWithOpenInMiddle() throws Exception {
    LOG.debug("Starting minorWithOpenInMiddle");
    Table t = newTable("default", "mtwb", false);
    addBaseFile(t, null, 20L, 20);
    addDeltaFile(t, null, 21L, 22L, 2);
    addDeltaFile(t, null, 23L, 25L, 3);
    addLengthFile(t, null, 23L, 25L, 3);
    addDeltaFile(t, null, 26L, 27L, 2);
    burnThroughTransactions(27, new HashSet<Long>(Arrays.asList(23L)), null);
    CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MINOR);
    txnHandler.compact(rqst);
    startWorker();
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
    Assert.assertEquals(1, compacts.size());
    Assert.assertEquals("ready for cleaning", compacts.get(0).getState());
    // There should still now be 5 directories in the location
    FileSystem fs = FileSystem.get(conf);
    FileStatus[] stat = fs.listStatus(new Path(t.getSd().getLocation()));
    Assert.assertEquals(4, stat.length);
    // Find the new delta file and make sure it has the right contents
    Arrays.sort(stat);
    Assert.assertEquals("base_20", stat[0].getPath().getName());
    Assert.assertEquals(makeDeltaDirNameCompacted(21, 22), stat[1].getPath().getName());
    Assert.assertEquals(makeDeltaDirName(23, 25), stat[2].getPath().getName());
    Assert.assertEquals(makeDeltaDirName(26, 27), stat[3].getPath().getName());
}
Also used : Path(org.apache.hadoop.fs.Path) Table(org.apache.hadoop.hive.metastore.api.Table) FileStatus(org.apache.hadoop.fs.FileStatus) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) FileSystem(org.apache.hadoop.fs.FileSystem) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) CompactionRequest(org.apache.hadoop.hive.metastore.api.CompactionRequest) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Aggregations

CompactionRequest (org.apache.hadoop.hive.metastore.api.CompactionRequest)56 Test (org.junit.Test)52 ShowCompactRequest (org.apache.hadoop.hive.metastore.api.ShowCompactRequest)41 ShowCompactResponse (org.apache.hadoop.hive.metastore.api.ShowCompactResponse)41 Table (org.apache.hadoop.hive.metastore.api.Table)40 ShowCompactResponseElement (org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement)33 Path (org.apache.hadoop.fs.Path)29 FileStatus (org.apache.hadoop.fs.FileStatus)24 FileSystem (org.apache.hadoop.fs.FileSystem)24 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)13 Partition (org.apache.hadoop.hive.metastore.api.Partition)12 CompactionInfo (org.apache.hadoop.hive.metastore.txn.CompactionInfo)12 TxnStore (org.apache.hadoop.hive.metastore.txn.TxnStore)12 ArrayList (java.util.ArrayList)10 HiveEndPoint (org.apache.hive.hcatalog.streaming.HiveEndPoint)9 HiveMetaStoreClient (org.apache.hadoop.hive.metastore.HiveMetaStoreClient)8 IMetaStoreClient (org.apache.hadoop.hive.metastore.IMetaStoreClient)8 DelimitedInputWriter (org.apache.hive.hcatalog.streaming.DelimitedInputWriter)7 StreamingConnection (org.apache.hive.hcatalog.streaming.StreamingConnection)7 LockComponent (org.apache.hadoop.hive.metastore.api.LockComponent)6