use of org.apache.hadoop.hive.metastore.api.ShowCompactResponse in project hive by apache.
the class TestCompactionTxnHandler method testMarkCompacted.
@Test
public void testMarkCompacted() throws Exception {
CompactionRequest rqst = new CompactionRequest("foo", "bar", CompactionType.MINOR);
rqst.setPartitionname("ds=today");
txnHandler.compact(rqst);
CompactionInfo ci = txnHandler.findNextToCompact("fred");
assertNotNull(ci);
txnHandler.markCompacted(ci);
assertNull(txnHandler.findNextToCompact("fred"));
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
assertEquals(1, compacts.size());
ShowCompactResponseElement c = compacts.get(0);
assertEquals("foo", c.getDbname());
assertEquals("bar", c.getTablename());
assertEquals("ds=today", c.getPartitionname());
assertEquals(CompactionType.MINOR, c.getType());
assertEquals("ready for cleaning", c.getState());
assertNull(c.getWorkerid());
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponse in project hive by apache.
the class TestCompactionTxnHandler method testFindNextToClean.
@Test
public void testFindNextToClean() throws Exception {
CompactionRequest rqst = new CompactionRequest("foo", "bar", CompactionType.MINOR);
rqst.setPartitionname("ds=today");
txnHandler.compact(rqst);
assertEquals(0, txnHandler.findReadyToClean().size());
CompactionInfo ci = txnHandler.findNextToCompact("fred");
assertNotNull(ci);
assertEquals(0, txnHandler.findReadyToClean().size());
txnHandler.markCompacted(ci);
assertNull(txnHandler.findNextToCompact("fred"));
List<CompactionInfo> toClean = txnHandler.findReadyToClean();
assertEquals(1, toClean.size());
assertNull(txnHandler.findNextToCompact("fred"));
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
assertEquals(1, compacts.size());
ShowCompactResponseElement c = compacts.get(0);
assertEquals("foo", c.getDbname());
assertEquals("bar", c.getTablename());
assertEquals("ds=today", c.getPartitionname());
assertEquals(CompactionType.MINOR, c.getType());
assertEquals("ready for cleaning", c.getState());
assertNull(c.getWorkerid());
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponse in project hive by apache.
the class TestCompactionTxnHandler method testFindNextToCompact2.
@Test
public void testFindNextToCompact2() throws Exception {
CompactionRequest rqst = new CompactionRequest("foo", "bar", CompactionType.MINOR);
rqst.setPartitionname("ds=today");
txnHandler.compact(rqst);
rqst = new CompactionRequest("foo", "bar", CompactionType.MINOR);
rqst.setPartitionname("ds=yesterday");
txnHandler.compact(rqst);
long now = System.currentTimeMillis();
boolean expectToday = false;
CompactionInfo ci = txnHandler.findNextToCompact("fred");
assertNotNull(ci);
assertEquals("foo", ci.dbname);
assertEquals("bar", ci.tableName);
if ("ds=today".equals(ci.partName))
expectToday = false;
else if ("ds=yesterday".equals(ci.partName))
expectToday = true;
else
fail("partition name should have been today or yesterday but was " + ci.partName);
assertEquals(CompactionType.MINOR, ci.type);
ci = txnHandler.findNextToCompact("fred");
assertNotNull(ci);
assertEquals("foo", ci.dbname);
assertEquals("bar", ci.tableName);
if (expectToday)
assertEquals("ds=today", ci.partName);
else
assertEquals("ds=yesterday", ci.partName);
assertEquals(CompactionType.MINOR, ci.type);
assertNull(txnHandler.findNextToCompact("fred"));
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
assertEquals(2, compacts.size());
for (ShowCompactResponseElement e : compacts) {
assertEquals("working", e.getState());
assertTrue(e.getStart() - 5000 < now && e.getStart() + 5000 > now);
assertEquals("fred", e.getWorkerid());
}
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponse in project hive by apache.
the class TestTxnHandler method testCompactWhenAlreadyCompacting.
/**
* Once a Compaction for a given resource is scheduled/working, we should not
* schedule another one to prevent concurrent compactions for the same resource.
* @throws Exception
*/
@Test
public void testCompactWhenAlreadyCompacting() throws Exception {
CompactionRequest rqst = new CompactionRequest("foo", "bar", CompactionType.MAJOR);
rqst.setPartitionname("ds=today");
CompactionResponse resp = txnHandler.compact(rqst);
Assert.assertEquals(resp, new CompactionResponse(1, TxnStore.INITIATED_RESPONSE, true));
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
assertEquals(1, compacts.size());
rqst.setType(CompactionType.MINOR);
resp = txnHandler.compact(rqst);
Assert.assertEquals(resp, new CompactionResponse(1, TxnStore.INITIATED_RESPONSE, false));
rsp = txnHandler.showCompact(new ShowCompactRequest());
compacts = rsp.getCompacts();
assertEquals(1, compacts.size());
ShowCompactResponseElement c = compacts.get(0);
assertEquals("foo", c.getDbname());
assertEquals("bar", c.getTablename());
assertEquals("ds=today", c.getPartitionname());
assertEquals(CompactionType.MAJOR, c.getType());
assertEquals("initiated", c.getState());
assertEquals(0L, c.getStart());
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponse in project hive by apache.
the class TestWorker method compactNoBaseLotsOfDeltas.
private void compactNoBaseLotsOfDeltas(CompactionType type) throws Exception {
conf.setIntVar(HiveConf.ConfVars.COMPACTOR_MAX_NUM_DELTA, 2);
Table t = newTable("default", "mapwb", true);
Partition p = newPartition(t, "today");
// addBaseFile(t, p, 20L, 20);
addDeltaFile(t, p, 21L, 21L, 2);
addDeltaFile(t, p, 23L, 23L, 2);
//make it look like streaming API use case
addDeltaFile(t, p, 25L, 29L, 2);
addDeltaFile(t, p, 31L, 32L, 3);
//make it looks like 31-32 has been compacted, but not cleaned
addDeltaFile(t, p, 31L, 33L, 5);
addDeltaFile(t, p, 35L, 35L, 1);
/*since COMPACTOR_MAX_NUM_DELTA=2,
we expect files 1,2 to be minor compacted by 1 job to produce delta_21_23
* 3,5 to be minor compacted by 2nd job (file 4 is obsolete) to make delta_25_33 (4th is skipped)
*
* and then the 'requested'
* minor compaction to combine delta_21_23, delta_25_33 and delta_35_35 to make delta_21_35
* or major compaction to create base_35*/
burnThroughTransactions(35);
CompactionRequest rqst = new CompactionRequest("default", "mapwb", type);
rqst.setPartitionname("ds=today");
txnHandler.compact(rqst);
startWorker();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("ready for cleaning", compacts.get(0).getState());
FileSystem fs = FileSystem.get(conf);
FileStatus[] stat = fs.listStatus(new Path(p.getSd().getLocation()));
Assert.assertEquals(9, stat.length);
// Find the new delta file and make sure it has the right contents
BitSet matchesFound = new BitSet(9);
for (int i = 0; i < stat.length; i++) {
if (stat[i].getPath().getName().equals(makeDeltaDirName(21, 21))) {
matchesFound.set(0);
} else if (stat[i].getPath().getName().equals(makeDeltaDirName(23, 23))) {
matchesFound.set(1);
} else if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(25, 29))) {
matchesFound.set(2);
} else if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(31, 32))) {
matchesFound.set(3);
} else if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(31, 33))) {
matchesFound.set(4);
} else if (stat[i].getPath().getName().equals(makeDeltaDirName(35, 35))) {
matchesFound.set(5);
} else if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(21, 23))) {
matchesFound.set(6);
} else if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(25, 33))) {
matchesFound.set(7);
}
switch(type) {
//yes, both do set(8)
case MINOR:
if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(21, 35))) {
matchesFound.set(8);
}
break;
case MAJOR:
if (stat[i].getPath().getName().equals(AcidUtils.baseDir(35))) {
matchesFound.set(8);
}
break;
default:
throw new IllegalStateException();
}
}
StringBuilder sb = null;
for (int i = 0; i < stat.length; i++) {
if (!matchesFound.get(i)) {
if (sb == null) {
sb = new StringBuilder("Some files are missing at index: ");
}
sb.append(i).append(",");
}
}
if (sb != null) {
Assert.assertTrue(sb.toString(), false);
}
}
Aggregations