use of org.apache.hadoop.hive.metastore.api.ShowCompactResponse in project hive by apache.
the class TestCleanerWithReplication method assertCleanerActions.
private void assertCleanerActions(int expectedNumOCleanedFiles) throws Exception {
assertEquals("there should be no deleted files in cm root", 0, fs.listStatus(cmRootDirectory).length);
startCleaner();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals(1, rsp.getCompactsSize());
String state = rsp.getCompacts().get(0).getState();
Assert.assertTrue("unexpected state " + state, TxnStore.SUCCEEDED_RESPONSE.equals(state));
assertEquals("there should be " + String.valueOf(expectedNumOCleanedFiles) + " deleted files in cm root", expectedNumOCleanedFiles, fs.listStatus(cmRootDirectory).length);
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponse in project hive by apache.
the class TestTxnCommands method testVersioning.
@Test
public void testVersioning() throws Exception {
hiveConf.set(MetastoreConf.ConfVars.CREATE_TABLES_AS_ACID.getVarname(), "true");
runStatementOnDriver("drop table if exists T");
runStatementOnDriver("create table T (a int, b int) stored as orc");
int[][] data = { { 1, 2 } };
// create 1 delta file bucket_00000
runStatementOnDriver("insert into T" + makeValuesClause(data));
// delete the bucket files so now we have empty delta dirs
List<String> rs = runStatementOnDriver("select distinct INPUT__FILE__NAME from T");
FileSystem fs = FileSystem.get(hiveConf);
Assert.assertTrue(rs != null && rs.size() == 1 && rs.get(0).contains(AcidUtils.DELTA_PREFIX));
Path filePath = new Path(rs.get(0));
int version = AcidUtils.OrcAcidVersion.getAcidVersionFromDataFile(filePath, fs);
// check it has expected version marker
Assert.assertEquals("Unexpected version marker in " + filePath, AcidUtils.OrcAcidVersion.ORC_ACID_VERSION, version);
// check that delta dir has a version file with expected value
filePath = filePath.getParent();
Assert.assertTrue(filePath.getName().startsWith(AcidUtils.DELTA_PREFIX));
int versionFromMetaFile = AcidUtils.OrcAcidVersion.getAcidVersionFromMetaFile(filePath, fs);
Assert.assertEquals("Unexpected version marker in " + filePath, AcidUtils.OrcAcidVersion.ORC_ACID_VERSION, versionFromMetaFile);
runStatementOnDriver("insert into T" + makeValuesClause(data));
runStatementOnDriver("alter table T compact 'major'");
TestTxnCommands2.runWorker(hiveConf);
// check status of compaction job
TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals("Unexpected number of compactions in history", 1, resp.getCompactsSize());
Assert.assertEquals("Unexpected 0 compaction state", TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(0).getState());
Assert.assertTrue(resp.getCompacts().get(0).getHadoopJobId().startsWith("job_local"));
rs = runStatementOnDriver("select distinct INPUT__FILE__NAME from T");
Assert.assertTrue(rs != null && rs.size() == 1 && rs.get(0).contains(AcidUtils.BASE_PREFIX));
filePath = new Path(rs.get(0));
version = AcidUtils.OrcAcidVersion.getAcidVersionFromDataFile(filePath, fs);
// check that files produced by compaction still have the version marker
Assert.assertEquals("Unexpected version marker in " + filePath, AcidUtils.OrcAcidVersion.ORC_ACID_VERSION, version);
// check that compacted base dir has a version file with expected value
filePath = filePath.getParent();
Assert.assertTrue(filePath.getName().startsWith(AcidUtils.BASE_PREFIX));
versionFromMetaFile = AcidUtils.OrcAcidVersion.getAcidVersionFromMetaFile(filePath, fs);
Assert.assertEquals("Unexpected version marker in " + filePath, AcidUtils.OrcAcidVersion.ORC_ACID_VERSION, versionFromMetaFile);
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponse in project hive by apache.
the class TestTxnCommands2 method testCompactWithDelete.
@Test
public void testCompactWithDelete() throws Exception {
int[][] tableData = { { 1, 2 }, { 3, 4 } };
runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData));
runStatementOnDriver("alter table " + Table.ACIDTBL + " compact 'MAJOR'");
Worker t = new Worker();
t.setThreadId((int) t.getId());
t.setConf(hiveConf);
AtomicBoolean stop = new AtomicBoolean();
AtomicBoolean looped = new AtomicBoolean();
stop.set(true);
t.init(stop, looped);
t.run();
runStatementOnDriver("delete from " + Table.ACIDTBL + " where b = 4");
runStatementOnDriver("update " + Table.ACIDTBL + " set b = -2 where b = 2");
runStatementOnDriver("alter table " + Table.ACIDTBL + " compact 'MINOR'");
t.run();
TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals("Unexpected number of compactions in history", 2, resp.getCompactsSize());
Assert.assertEquals("Unexpected 0 compaction state", TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(0).getState());
Assert.assertEquals("Unexpected 1 compaction state", TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(1).getState());
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponse in project hive by apache.
the class TestTxnCommands2 method testEmptyInTblproperties.
/**
* https://issues.apache.org/jira/browse/HIVE-17391
*/
@Test
public void testEmptyInTblproperties() throws Exception {
runStatementOnDriver("create table t1 " + "(a int, b int) stored as orc TBLPROPERTIES ('serialization.null.format'='', 'transactional'='true')");
runStatementOnDriver("insert into t1 " + "(a,b) values(1,7),(3,7)");
runStatementOnDriver("update t1" + " set b = -2 where b = 2");
runStatementOnDriver("alter table t1 " + " compact 'MAJOR'");
runWorker(hiveConf);
TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals("Unexpected number of compactions in history", 1, resp.getCompactsSize());
Assert.assertEquals("Unexpected 0 compaction state", TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(0).getState());
Assert.assertTrue(resp.getCompacts().get(0).getHadoopJobId().startsWith("job_local"));
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponse in project hive by apache.
the class TestTxnNoBuckets method testEmptyCompactionResult.
/**
* see HIVE-18429
*/
@Test
public void testEmptyCompactionResult() throws Exception {
hiveConf.set(MetastoreConf.ConfVars.CREATE_TABLES_AS_ACID.getVarname(), "true");
runStatementOnDriver("drop table if exists T");
runStatementOnDriver("create table T (a int, b int) stored as orc");
int[][] data = { { 1, 2 }, { 3, 4 } };
runStatementOnDriver("insert into T" + makeValuesClause(data));
runStatementOnDriver("insert into T" + makeValuesClause(data));
// delete the bucket files so now we have empty delta dirs
List<String> rs = runStatementOnDriver("select distinct INPUT__FILE__NAME from T");
FileSystem fs = FileSystem.get(hiveConf);
for (String path : rs) {
fs.delete(new Path(path), true);
}
runStatementOnDriver("alter table T compact 'major'");
TestTxnCommands2.runWorker(hiveConf);
// check status of compaction job
TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals("Unexpected number of compactions in history", 1, resp.getCompactsSize());
Assert.assertEquals("Unexpected 0 compaction state", TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(0).getState());
Assert.assertTrue(resp.getCompacts().get(0).getHadoopJobId().startsWith("job_local"));
// now run another compaction make sure empty dirs don't cause issues
runStatementOnDriver("insert into T" + makeValuesClause(data));
runStatementOnDriver("alter table T compact 'major'");
TestTxnCommands2.runWorker(hiveConf);
// check status of compaction job
resp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals("Unexpected number of compactions in history", 2, resp.getCompactsSize());
for (int i = 0; i < 2; i++) {
Assert.assertEquals("Unexpected 0 compaction state", TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(i).getState());
Assert.assertTrue(resp.getCompacts().get(i).getHadoopJobId().startsWith("job_local"));
}
rs = runStatementOnDriver("select a, b from T order by a, b");
Assert.assertEquals(stringifyValues(data), rs);
}
Aggregations