use of org.apache.hadoop.hive.metastore.api.ShowCompactRequest in project hive by apache.
the class TestInitiator method noCompactTableNotEnoughDeltas.
@Test
public void noCompactTableNotEnoughDeltas() throws Exception {
Table t = newTable("default", "nctned", false);
addBaseFile(t, null, 200L, 200);
addDeltaFile(t, null, 201L, 205L, 5);
addDeltaFile(t, null, 206L, 211L, 6);
burnThroughTransactions("default", "nctned", 210);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
comp.setTablename("nctned");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
long writeid = allocateWriteId("default", "nctned", txnid);
Assert.assertEquals(211, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals(0, rsp.getCompactsSize());
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactRequest in project hive by apache.
the class TestInitiator method enoughDeltasNoBase.
@Test
public void enoughDeltasNoBase() throws Exception {
Table t = newTable("default", "ednb", true);
Partition p = newPartition(t, "today");
addDeltaFile(t, p, 1L, 201L, 200);
addDeltaFile(t, p, 202L, 202L, 1);
addDeltaFile(t, p, 203L, 203L, 1);
addDeltaFile(t, p, 204L, 204L, 1);
addDeltaFile(t, p, 205L, 205L, 1);
addDeltaFile(t, p, 206L, 206L, 1);
addDeltaFile(t, p, 207L, 207L, 1);
addDeltaFile(t, p, 208L, 208L, 1);
addDeltaFile(t, p, 209L, 209L, 1);
addDeltaFile(t, p, 210L, 210L, 1);
addDeltaFile(t, p, 211L, 211L, 1);
burnThroughTransactions("default", "ednb", 210);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
comp.setTablename("ednb");
comp.setPartitionname("ds=today");
comp.setOperationType(DataOperationType.DELETE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
long writeid = allocateWriteId("default", "ednb", txnid);
Assert.assertEquals(211, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("initiated", compacts.get(0).getState());
Assert.assertEquals("ednb", compacts.get(0).getTablename());
Assert.assertEquals("ds=today", compacts.get(0).getPartitionname());
Assert.assertEquals(CompactionType.MAJOR, compacts.get(0).getType());
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactRequest in project hive by apache.
the class TestInitiator method chooseMajorOverMinorWhenBothValid.
@Test
public void chooseMajorOverMinorWhenBothValid() throws Exception {
Table t = newTable("default", "cmomwbv", false);
addBaseFile(t, null, 200L, 200);
addDeltaFile(t, null, 201L, 211L, 11);
addDeltaFile(t, null, 212L, 222L, 11);
addDeltaFile(t, null, 223L, 233L, 11);
addDeltaFile(t, null, 234L, 244L, 11);
addDeltaFile(t, null, 245L, 255L, 11);
addDeltaFile(t, null, 256L, 266L, 11);
addDeltaFile(t, null, 267L, 277L, 11);
addDeltaFile(t, null, 278L, 288L, 11);
addDeltaFile(t, null, 289L, 299L, 11);
addDeltaFile(t, null, 300L, 310L, 11);
addDeltaFile(t, null, 311L, 321L, 11);
burnThroughTransactions("default", "cmomwbv", 320);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
comp.setTablename("cmomwbv");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
long writeid = allocateWriteId("default", "cmomwbv", txnid);
Assert.assertEquals(321, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("initiated", compacts.get(0).getState());
Assert.assertEquals("cmomwbv", compacts.get(0).getTablename());
Assert.assertEquals(CompactionType.MAJOR, compacts.get(0).getType());
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactRequest in project hive by apache.
the class TestWorker method droppedPartition.
@Test
public void droppedPartition() throws Exception {
Table t = newTable("default", "dp", true);
Partition p = newPartition(t, "today");
addBaseFile(t, p, 20L, 20);
addDeltaFile(t, p, 21L, 22L, 2);
addDeltaFile(t, p, 23L, 24L, 2);
burnThroughTransactions("default", "dp", 25);
CompactionRequest rqst = new CompactionRequest("default", "dp", CompactionType.MINOR);
rqst.setPartitionname("ds=today");
txnHandler.compact(rqst);
// Drop partition will clean the partition entry from the compaction queue and hence worker have no effect
ms.dropPartition("default", "dp", Collections.singletonList("today"), true);
startWorker();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(0, compacts.size());
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactRequest in project hive by apache.
the class TestWorker method minorTableNoBase.
@Test
public void minorTableNoBase() throws Exception {
LOG.debug("Starting minorTableWithBase");
Table t = newTable("default", "mtnb", false);
addDeltaFile(t, null, 1L, 2L, 2);
addDeltaFile(t, null, 3L, 4L, 2);
burnThroughTransactions("default", "mtnb", 5);
CompactionRequest rqst = new CompactionRequest("default", "mtnb", CompactionType.MINOR);
txnHandler.compact(rqst);
startWorker();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("ready for cleaning", compacts.get(0).getState());
// There should still now be 5 directories in the location
FileSystem fs = FileSystem.get(conf);
FileStatus[] stat = fs.listStatus(new Path(t.getSd().getLocation()));
Assert.assertEquals(4, stat.length);
// Find the new delta file and make sure it has the right contents
boolean sawNewDelta = false;
for (int i = 0; i < stat.length; i++) {
if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(1, 4))) {
sawNewDelta = true;
FileStatus[] buckets = fs.listStatus(stat[i].getPath(), AcidUtils.hiddenFileFilter);
Assert.assertEquals(2, buckets.length);
Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]"));
Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]"));
Assert.assertEquals(104L, buckets[0].getLen());
Assert.assertEquals(104L, buckets[1].getLen());
}
if (stat[i].getPath().getName().equals(makeDeleteDeltaDirNameCompacted(1, 4))) {
sawNewDelta = true;
FileStatus[] buckets = fs.listStatus(stat[i].getPath(), AcidUtils.hiddenFileFilter);
Assert.assertEquals(2, buckets.length);
Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]"));
Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]"));
Assert.assertEquals(104L, buckets[0].getLen());
Assert.assertEquals(104L, buckets[1].getLen());
} else {
LOG.debug("This is not the delta file you are looking for " + stat[i].getPath().getName());
}
}
Assert.assertTrue(sawNewDelta);
}
Aggregations