use of org.apache.hadoop.hive.metastore.txn.TxnStore in project hive by apache.
the class TestTxnCommands method testDropWithBaseMultiplePartitions.
@Test
public void testDropWithBaseMultiplePartitions() throws Exception {
runStatementOnDriver("insert into " + Table.ACIDTBLNESTEDPART + " partition (p1='a', p2='a', p3='a') values (1,1),(2,2)");
runStatementOnDriver("insert into " + Table.ACIDTBLNESTEDPART + " partition (p1='a', p2='a', p3='b') values (3,3),(4,4)");
runStatementOnDriver("insert into " + Table.ACIDTBLNESTEDPART + " partition (p1='a', p2='b', p3='c') values (7,7),(8,8)");
HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_ACID_DROP_PARTITION_USE_BASE, true);
runStatementOnDriver("alter table " + Table.ACIDTBLNESTEDPART + " drop partition (p2='a')");
TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals("Unexpected number of compactions in history", 2, resp.getCompactsSize());
FileSystem fs = FileSystem.get(hiveConf);
FileStatus[] stat;
for (char p : Arrays.asList('a', 'b')) {
String partName = "p1=a/p2=a/p3=" + p;
Assert.assertTrue(resp.getCompacts().stream().anyMatch(ci -> TxnStore.CLEANING_RESPONSE.equals(ci.getState()) && partName.equals(ci.getPartitionname())));
stat = fs.listStatus(new Path(getWarehouseDir(), Table.ACIDTBLNESTEDPART.toString().toLowerCase() + "/" + partName), AcidUtils.baseFileFilter);
if (1 != stat.length) {
Assert.fail("Expecting 1 base and found " + stat.length + " files " + Arrays.toString(stat));
}
String name = stat[0].getPath().getName();
Assert.assertEquals("base_0000004", name);
}
stat = fs.listStatus(new Path(getWarehouseDir(), Table.ACIDTBLNESTEDPART.toString().toLowerCase() + "/p1=a/p2=b/p3=c"), AcidUtils.baseFileFilter);
if (0 != stat.length) {
Assert.fail("Expecting no base and found " + stat.length + " files " + Arrays.toString(stat));
}
List<String> r = runStatementOnDriver("select * from " + Table.ACIDTBLNESTEDPART);
Assert.assertEquals(2, r.size());
runCleaner(hiveConf);
for (char p : Arrays.asList('a', 'b')) {
stat = fs.listStatus(new Path(getWarehouseDir(), Table.ACIDTBLNESTEDPART.toString().toLowerCase() + "/p1=a/p2=a"), path -> path.getName().equals("p3=" + p));
if (0 != stat.length) {
Assert.fail("Expecting partition data to be removed from FS");
}
}
}
use of org.apache.hadoop.hive.metastore.txn.TxnStore in project hive by apache.
the class TestTxnCommands method testDropTableWithoutSuffix.
@Test
public void testDropTableWithoutSuffix() throws Exception {
String tableName = "tab_acid";
runStatementOnDriver("drop table if exists " + tableName);
for (boolean enabled : Arrays.asList(false, true)) {
HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_ACID_CREATE_TABLE_USE_SUFFIX, enabled);
runStatementOnDriver("create table " + tableName + "(a int, b int) stored as orc TBLPROPERTIES ('transactional'='true')");
runStatementOnDriver("insert into " + tableName + " values(1,2),(3,4)");
HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_ACID_CREATE_TABLE_USE_SUFFIX, !enabled);
runStatementOnDriver("drop table " + tableName);
int count = TestTxnDbUtil.countQueryAgent(hiveConf, "select count(*) from TXN_TO_WRITE_ID where T2W_TABLE = '" + tableName + "'");
Assert.assertEquals(0, count);
FileSystem fs = FileSystem.get(hiveConf);
FileStatus[] stat = fs.listStatus(new Path(getWarehouseDir()), t -> t.getName().equals(tableName));
Assert.assertEquals(0, stat.length);
try {
runStatementOnDriver("select * from " + tableName);
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains(ErrorMsg.INVALID_TABLE.getMsg(StringUtils.wrap(tableName, "'"))));
}
// Check status of compaction job
TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals("Unexpected number of compactions in history", 0, resp.getCompactsSize());
}
}
use of org.apache.hadoop.hive.metastore.txn.TxnStore in project hive by apache.
the class TestTxnCommands3 method testAcidMetaColumsDecode.
/**
* HIVE-19985
*/
@Test
public void testAcidMetaColumsDecode() throws Exception {
// this only applies in vectorized mode
hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, true);
hiveConf.set(MetastoreConf.ConfVars.CREATE_TABLES_AS_ACID.getVarname(), "true");
runStatementOnDriver("drop table if exists T");
runStatementOnDriver("create table T (a int, b int) stored as orc");
int[][] data1 = { { 1, 2 }, { 3, 4 } };
runStatementOnDriver("insert into T" + makeValuesClause(data1));
int[][] data2 = { { 5, 6 }, { 7, 8 } };
runStatementOnDriver("insert into T" + makeValuesClause(data2));
int[][] dataAll = { { 1, 2 }, { 3, 4 }, { 5, 6 }, { 7, 8 } };
hiveConf.setBoolVar(HiveConf.ConfVars.OPTIMIZE_ACID_META_COLUMNS, true);
List<String> rs = runStatementOnDriver("select a, b from T order by a, b");
Assert.assertEquals(stringifyValues(dataAll), rs);
hiveConf.setBoolVar(HiveConf.ConfVars.OPTIMIZE_ACID_META_COLUMNS, false);
rs = runStatementOnDriver("select a, b from T order by a, b");
Assert.assertEquals(stringifyValues(dataAll), rs);
runStatementOnDriver("alter table T compact 'major'");
runWorker(hiveConf);
// check status of compaction job
TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals("Unexpected number of compactions in history", 1, resp.getCompactsSize());
Assert.assertEquals("Unexpected 0 compaction state", TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(0).getState());
Assert.assertTrue(resp.getCompacts().get(0).getHadoopJobId().startsWith("job_local"));
hiveConf.setBoolVar(HiveConf.ConfVars.OPTIMIZE_ACID_META_COLUMNS, true);
rs = runStatementOnDriver("select a, b from T order by a, b");
Assert.assertEquals(stringifyValues(dataAll), rs);
hiveConf.setBoolVar(HiveConf.ConfVars.OPTIMIZE_ACID_META_COLUMNS, false);
rs = runStatementOnDriver("select a, b from T order by a, b");
Assert.assertEquals(stringifyValues(dataAll), rs);
}
use of org.apache.hadoop.hive.metastore.txn.TxnStore in project hive by apache.
the class TestTxnConcatenate method testConcatenatePart.
@Test
public void testConcatenatePart() throws Exception {
runStatementOnDriver("insert into " + Table.ACIDTBLPART + " values(1,2,'p1'),(4,5,'p2')");
runStatementOnDriver("update " + Table.ACIDTBLPART + " set b = 4 where p='p1'");
runStatementOnDriver("insert into " + Table.ACIDTBLPART + " values(5,6,'p1'),(8,8,'p2')");
String testQuery = "select ROW__ID, a, b, INPUT__FILE__NAME from " + Table.ACIDTBLPART + " order by a, b";
String[][] expected = new String[][] { { "{\"writeid\":2,\"bucketid\":536936448,\"rowid\":0}\t1\t4", "acidtblpart/p=p1/delta_0000002_0000002_0000/bucket_00001_0" }, { "{\"writeid\":1,\"bucketid\":536936448,\"rowid\":0}\t4\t5", "acidtblpart/p=p2/delta_0000001_0000001_0000/bucket_00001_0" }, { "{\"writeid\":3,\"bucketid\":536936448,\"rowid\":0}\t5\t6", "acidtblpart/p=p1/delta_0000003_0000003_0000/bucket_00001_0" }, { "{\"writeid\":3,\"bucketid\":536936448,\"rowid\":0}\t8\t8", "acidtblpart/p=p2/delta_0000003_0000003_0000/bucket_00001_0" } };
checkResult(expected, testQuery, false, "check data", LOG);
/*in UTs, there is no standalone HMS running to kick off compaction so it's done via runWorker()
but in normal usage 'concatenate' is blocking, */
hiveConf.setBoolVar(HiveConf.ConfVars.TRANSACTIONAL_CONCATENATE_NOBLOCK, true);
runStatementOnDriver("alter table " + Table.ACIDTBLPART + " PARTITION(p='p1') concatenate");
TxnStore txnStore = TxnUtils.getTxnStore(hiveConf);
ShowCompactResponse rsp = txnStore.showCompact(new ShowCompactRequest());
Assert.assertEquals(1, rsp.getCompactsSize());
Assert.assertEquals(TxnStore.INITIATED_RESPONSE, rsp.getCompacts().get(0).getState());
runWorker(hiveConf);
rsp = txnStore.showCompact(new ShowCompactRequest());
Assert.assertEquals(1, rsp.getCompactsSize());
Assert.assertEquals(TxnStore.CLEANING_RESPONSE, rsp.getCompacts().get(0).getState());
String[][] expected2 = new String[][] { { "{\"writeid\":2,\"bucketid\":536936448,\"rowid\":0}\t1\t4", "acidtblpart/p=p1/base_0000003_v0000021/bucket_00001" }, { "{\"writeid\":1,\"bucketid\":536936448,\"rowid\":0}\t4\t5", "acidtblpart/p=p2/delta_0000001_0000001_0000/bucket_00001_0" }, { "{\"writeid\":3,\"bucketid\":536936448,\"rowid\":0}\t5\t6", "acidtblpart/p=p1/base_0000003_v0000021/bucket_00001" }, { "{\"writeid\":3,\"bucketid\":536936448,\"rowid\":0}\t8\t8", "acidtblpart/p=p2/delta_0000003_0000003_0000/bucket_00001_0" } };
checkResult(expected2, testQuery, false, "check data after concatenate", LOG);
}
use of org.apache.hadoop.hive.metastore.txn.TxnStore in project hive by apache.
the class TestTxnNoBuckets method testEmptyCompactionResult.
/**
* see HIVE-18429
*/
@Test
public void testEmptyCompactionResult() throws Exception {
hiveConf.set(MetastoreConf.ConfVars.CREATE_TABLES_AS_ACID.getVarname(), "true");
runStatementOnDriver("drop table if exists T");
runStatementOnDriver("create table T (a int, b int) stored as orc");
int[][] data = { { 1, 2 }, { 3, 4 } };
runStatementOnDriver("insert into T" + makeValuesClause(data));
runStatementOnDriver("insert into T" + makeValuesClause(data));
// delete the bucket files so now we have empty delta dirs
List<String> rs = runStatementOnDriver("select distinct INPUT__FILE__NAME from T");
FileSystem fs = FileSystem.get(hiveConf);
for (String path : rs) {
fs.delete(new Path(path), true);
}
runStatementOnDriver("alter table T compact 'major'");
TestTxnCommands2.runWorker(hiveConf);
// check status of compaction job
TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals("Unexpected number of compactions in history", 1, resp.getCompactsSize());
Assert.assertEquals("Unexpected 0 compaction state", TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(0).getState());
Assert.assertTrue(resp.getCompacts().get(0).getHadoopJobId().startsWith("job_local"));
// now run another compaction make sure empty dirs don't cause issues
runStatementOnDriver("insert into T" + makeValuesClause(data));
runStatementOnDriver("alter table T compact 'major'");
TestTxnCommands2.runWorker(hiveConf);
// check status of compaction job
resp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals("Unexpected number of compactions in history", 2, resp.getCompactsSize());
for (int i = 0; i < 2; i++) {
Assert.assertEquals("Unexpected 0 compaction state", TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(i).getState());
Assert.assertTrue(resp.getCompacts().get(i).getHadoopJobId().startsWith("job_local"));
}
rs = runStatementOnDriver("select a, b from T order by a, b");
Assert.assertEquals(stringifyValues(data), rs);
}
Aggregations