use of org.apache.hadoop.hive.metastore.api.CompactionRequest in project hive by apache.
the class TestInitiator method noCompactWhenCompactAlreadyScheduled.
@Test
public void noCompactWhenCompactAlreadyScheduled() throws Exception {
Table t = newTable("default", "ncwcas", false);
HiveConf.setIntVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD, 10);
for (int i = 0; i < 11; i++) {
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
comp.setTablename("ncwcas");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
txnHandler.abortTxn(new AbortTxnRequest(txnid));
}
CompactionRequest rqst = new CompactionRequest("default", "ncwcas", CompactionType.MAJOR);
txnHandler.compact(rqst);
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("initiated", compacts.get(0).getState());
Assert.assertEquals("ncwcas", compacts.get(0).getTablename());
startInitiator();
rsp = txnHandler.showCompact(new ShowCompactRequest());
compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("initiated", compacts.get(0).getState());
Assert.assertEquals("ncwcas", compacts.get(0).getTablename());
Assert.assertEquals(CompactionType.MAJOR, compacts.get(0).getType());
}
use of org.apache.hadoop.hive.metastore.api.CompactionRequest in project hive by apache.
the class TestWorker method sortedTable.
@Test
public void sortedTable() throws Exception {
List<Order> sortCols = new ArrayList<Order>(1);
sortCols.add(new Order("b", 1));
Table t = newTable("default", "st", false, new HashMap<String, String>(), sortCols, false);
addBaseFile(t, null, 20L, 20);
addDeltaFile(t, null, 21L, 22L, 2);
addDeltaFile(t, null, 23L, 24L, 2);
addDeltaFile(t, null, 21L, 24L, 4);
burnThroughTransactions(25);
CompactionRequest rqst = new CompactionRequest("default", "st", CompactionType.MINOR);
txnHandler.compact(rqst);
startWorker();
// There should still be four directories in the location.
FileSystem fs = FileSystem.get(conf);
FileStatus[] stat = fs.listStatus(new Path(t.getSd().getLocation()));
Assert.assertEquals(4, stat.length);
}
use of org.apache.hadoop.hive.metastore.api.CompactionRequest in project hive by apache.
the class TestCleaner method cleanupAfterMinorTableCompaction.
@Test
public void cleanupAfterMinorTableCompaction() throws Exception {
Table t = newTable("default", "camitc", false);
addBaseFile(t, null, 20L, 20);
addDeltaFile(t, null, 21L, 22L, 2);
addDeltaFile(t, null, 23L, 24L, 2);
addDeltaFile(t, null, 21L, 24L, 4);
burnThroughTransactions(25);
CompactionRequest rqst = new CompactionRequest("default", "camitc", CompactionType.MINOR);
txnHandler.compact(rqst);
CompactionInfo ci = txnHandler.findNextToCompact("fred");
txnHandler.markCompacted(ci);
txnHandler.setRunAs(ci.id, System.getProperty("user.name"));
startCleaner();
// Check there are no compactions requests left.
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals(1, rsp.getCompactsSize());
Assert.assertTrue(TxnStore.SUCCEEDED_RESPONSE.equals(rsp.getCompacts().get(0).getState()));
// Check that the files are removed
List<Path> paths = getDirectories(conf, t, null);
Assert.assertEquals(2, paths.size());
boolean sawBase = false, sawDelta = false;
for (Path p : paths) {
if (p.getName().equals("base_20"))
sawBase = true;
else if (p.getName().equals(makeDeltaDirName(21, 24)))
sawDelta = true;
else
Assert.fail("Unexpected file " + p.getName());
}
Assert.assertTrue(sawBase);
Assert.assertTrue(sawDelta);
}
use of org.apache.hadoop.hive.metastore.api.CompactionRequest in project hive by apache.
the class TestCleaner method droppedPartition.
@Test
public void droppedPartition() throws Exception {
Table t = newTable("default", "dp", true);
Partition p = newPartition(t, "today");
addDeltaFile(t, p, 1L, 22L, 22);
addDeltaFile(t, p, 23L, 24L, 2);
addBaseFile(t, p, 25L, 25);
burnThroughTransactions(25);
CompactionRequest rqst = new CompactionRequest("default", "dp", CompactionType.MAJOR);
rqst.setPartitionname("ds=today");
txnHandler.compact(rqst);
CompactionInfo ci = txnHandler.findNextToCompact("fred");
txnHandler.markCompacted(ci);
txnHandler.setRunAs(ci.id, System.getProperty("user.name"));
ms.dropPartition("default", "dp", Collections.singletonList("today"), true);
startCleaner();
// Check there are no compactions requests left.
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals(1, rsp.getCompactsSize());
Assert.assertTrue(TxnStore.SUCCEEDED_RESPONSE.equals(rsp.getCompacts().get(0).getState()));
}
use of org.apache.hadoop.hive.metastore.api.CompactionRequest in project hive by apache.
the class TestTxnCommands2 method updateDeletePartitioned.
/**
* Test update that hits multiple partitions (i.e. requries dynamic partition insert to process)
* @throws Exception
*/
@Test
public void updateDeletePartitioned() throws Exception {
int[][] tableData = { { 1, 2 }, { 3, 4 }, { 5, 6 } };
runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p=1) (a,b) " + makeValuesClause(tableData));
runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p=2) (a,b) " + makeValuesClause(tableData));
TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
txnHandler.compact(new CompactionRequest("default", Table.ACIDTBLPART.name(), CompactionType.MAJOR));
runWorker(hiveConf);
runCleaner(hiveConf);
runStatementOnDriver("update " + Table.ACIDTBLPART + " set b = b + 1 where a = 3");
txnHandler.compact(new CompactionRequest("default", Table.ACIDTBLPART.toString(), CompactionType.MAJOR));
runWorker(hiveConf);
runCleaner(hiveConf);
List<String> rs = runStatementOnDriver("select p,a,b from " + Table.ACIDTBLPART + " order by p, a, b");
int[][] expectedData = { { 1, 1, 2 }, { 1, 3, 5 }, { 1, 5, 6 }, { 2, 1, 2 }, { 2, 3, 5 }, { 2, 5, 6 } };
Assert.assertEquals("Update " + Table.ACIDTBLPART + " didn't match:", stringifyValues(expectedData), rs);
}
Aggregations