use of org.apache.hadoop.hive.metastore.api.ShowCompactRequest in project hive by apache.
the class TestCleaner method droppedPartition.
@Test
public void droppedPartition() throws Exception {
Table t = newTable("default", "dp", true);
Partition p = newPartition(t, "today");
addDeltaFile(t, p, 1L, 22L, 22);
addDeltaFile(t, p, 23L, 24L, 2);
addBaseFile(t, p, 25L, 25);
burnThroughTransactions(25);
CompactionRequest rqst = new CompactionRequest("default", "dp", CompactionType.MAJOR);
rqst.setPartitionname("ds=today");
txnHandler.compact(rqst);
CompactionInfo ci = txnHandler.findNextToCompact("fred");
txnHandler.markCompacted(ci);
txnHandler.setRunAs(ci.id, System.getProperty("user.name"));
ms.dropPartition("default", "dp", Collections.singletonList("today"), true);
startCleaner();
// Check there are no compactions requests left.
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals(1, rsp.getCompactsSize());
Assert.assertTrue(TxnStore.SUCCEEDED_RESPONSE.equals(rsp.getCompacts().get(0).getState()));
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactRequest in project hive by apache.
the class TestInitiator method compactPartitionHighDeltaPct.
@Test
public void compactPartitionHighDeltaPct() throws Exception {
Table t = newTable("default", "cphdp", true);
Partition p = newPartition(t, "today");
addBaseFile(t, p, 20L, 20);
addDeltaFile(t, p, 21L, 22L, 2);
addDeltaFile(t, p, 23L, 24L, 2);
burnThroughTransactions(23);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
comp.setTablename("cphdp");
comp.setPartitionname("ds=today");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("initiated", compacts.get(0).getState());
Assert.assertEquals("cphdp", compacts.get(0).getTablename());
Assert.assertEquals("ds=today", compacts.get(0).getPartitionname());
Assert.assertEquals(CompactionType.MAJOR, compacts.get(0).getType());
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactRequest in project hive by apache.
the class TestInitiator method noCompactWhenNoCompactSet.
@Test
public void noCompactWhenNoCompactSet() throws Exception {
Map<String, String> parameters = new HashMap<String, String>(1);
parameters.put("NO_AUTO_COMPACTION", "true");
Table t = newTable("default", "ncwncs", false, parameters);
HiveConf.setIntVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD, 10);
for (int i = 0; i < 11; i++) {
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
comp.setTablename("ncwncs");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
txnHandler.abortTxn(new AbortTxnRequest(txnid));
}
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals(0, rsp.getCompactsSize());
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactRequest in project hive by apache.
the class TestTxnCommands2 method testOriginalFileReaderWhenNonAcidConvertedToAcid.
@Test
public void testOriginalFileReaderWhenNonAcidConvertedToAcid() throws Exception {
// 1. Insert five rows to Non-ACID table.
runStatementOnDriver("insert into " + Table.NONACIDORCTBL + "(a,b) values(1,2),(3,4),(5,6),(7,8),(9,10)");
// 2. Convert NONACIDORCTBL to ACID table.
runStatementOnDriver("alter table " + Table.NONACIDORCTBL + " SET TBLPROPERTIES ('transactional'='true')");
runStatementOnDriver("update " + Table.NONACIDORCTBL + " set b = b*2 where b in (4,10)");
runStatementOnDriver("delete from " + Table.NONACIDORCTBL + " where a = 7");
List<String> rs = runStatementOnDriver("select a,b from " + Table.NONACIDORCTBL + " order by a,b");
int[][] resultData = new int[][] { { 1, 2 }, { 3, 8 }, { 5, 6 }, { 9, 20 } };
Assert.assertEquals(stringifyValues(resultData), rs);
// 3. Perform a major compaction.
runStatementOnDriver("alter table " + Table.NONACIDORCTBL + " compact 'MAJOR'");
runWorker(hiveConf);
TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals("Unexpected number of compactions in history", 1, resp.getCompactsSize());
Assert.assertEquals("Unexpected 0 compaction state", TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(0).getState());
Assert.assertTrue(resp.getCompacts().get(0).getHadoopJobId().startsWith("job_local"));
// 3. Perform a delete.
runStatementOnDriver("delete from " + Table.NONACIDORCTBL + " where a = 1");
rs = runStatementOnDriver("select a,b from " + Table.NONACIDORCTBL + " order by a,b");
resultData = new int[][] { { 3, 8 }, { 5, 6 }, { 9, 20 } };
Assert.assertEquals(stringifyValues(resultData), rs);
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactRequest in project hive by apache.
the class TestCompactor method schemaEvolutionAddColDynamicPartitioningInsert.
/**
* Simple schema evolution add columns with partitioning.
* @throws Exception
*/
@Test
public void schemaEvolutionAddColDynamicPartitioningInsert() throws Exception {
String tblName = "dpct";
List<String> colNames = Arrays.asList("a", "b");
executeStatementOnDriver("drop table if exists " + tblName, driver);
executeStatementOnDriver("CREATE TABLE " + tblName + "(a INT, b STRING) " + " PARTITIONED BY(ds string)" + //currently ACID requires table to be bucketed
" CLUSTERED BY(a) INTO 2 BUCKETS" + " STORED AS ORC TBLPROPERTIES ('transactional'='true')", driver);
// First INSERT round.
executeStatementOnDriver("insert into " + tblName + " partition (ds) values (1, 'fred', " + "'today'), (2, 'wilma', 'yesterday')", driver);
// ALTER TABLE ... ADD COLUMNS
executeStatementOnDriver("ALTER TABLE " + tblName + " ADD COLUMNS(c int)", driver);
// Validate there is an added NULL for column c.
executeStatementOnDriver("SELECT * FROM " + tblName + " ORDER BY a", driver);
ArrayList<String> valuesReadFromHiveDriver = new ArrayList<String>();
driver.getResults(valuesReadFromHiveDriver);
Assert.assertEquals(2, valuesReadFromHiveDriver.size());
Assert.assertEquals("1\tfred\tNULL\ttoday", valuesReadFromHiveDriver.get(0));
Assert.assertEquals("2\twilma\tNULL\tyesterday", valuesReadFromHiveDriver.get(1));
// Second INSERT round with new inserts into previously existing partition 'yesterday'.
executeStatementOnDriver("insert into " + tblName + " partition (ds) values " + "(3, 'mark', 1900, 'soon'), (4, 'douglas', 1901, 'last_century'), " + "(5, 'doc', 1902, 'yesterday')", driver);
// Validate there the new insertions for column c.
executeStatementOnDriver("SELECT * FROM " + tblName + " ORDER BY a", driver);
valuesReadFromHiveDriver = new ArrayList<String>();
driver.getResults(valuesReadFromHiveDriver);
Assert.assertEquals(5, valuesReadFromHiveDriver.size());
Assert.assertEquals("1\tfred\tNULL\ttoday", valuesReadFromHiveDriver.get(0));
Assert.assertEquals("2\twilma\tNULL\tyesterday", valuesReadFromHiveDriver.get(1));
Assert.assertEquals("3\tmark\t1900\tsoon", valuesReadFromHiveDriver.get(2));
Assert.assertEquals("4\tdouglas\t1901\tlast_century", valuesReadFromHiveDriver.get(3));
Assert.assertEquals("5\tdoc\t1902\tyesterday", valuesReadFromHiveDriver.get(4));
Initiator initiator = new Initiator();
initiator.setThreadId((int) initiator.getId());
conf.setIntVar(HiveConf.ConfVars.HIVE_COMPACTOR_DELTA_NUM_THRESHOLD, 0);
initiator.setHiveConf(conf);
AtomicBoolean stop = new AtomicBoolean();
stop.set(true);
initiator.init(stop, new AtomicBoolean());
initiator.run();
TxnStore txnHandler = TxnUtils.getTxnStore(conf);
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(4, compacts.size());
SortedSet<String> partNames = new TreeSet<String>();
for (int i = 0; i < compacts.size(); i++) {
Assert.assertEquals("default", compacts.get(i).getDbname());
Assert.assertEquals(tblName, compacts.get(i).getTablename());
Assert.assertEquals("initiated", compacts.get(i).getState());
partNames.add(compacts.get(i).getPartitionname());
}
List<String> names = new ArrayList<String>(partNames);
Assert.assertEquals("ds=last_century", names.get(0));
Assert.assertEquals("ds=soon", names.get(1));
Assert.assertEquals("ds=today", names.get(2));
Assert.assertEquals("ds=yesterday", names.get(3));
// Validate after compaction.
executeStatementOnDriver("SELECT * FROM " + tblName + " ORDER BY a", driver);
valuesReadFromHiveDriver = new ArrayList<String>();
driver.getResults(valuesReadFromHiveDriver);
Assert.assertEquals(5, valuesReadFromHiveDriver.size());
Assert.assertEquals("1\tfred\tNULL\ttoday", valuesReadFromHiveDriver.get(0));
Assert.assertEquals("2\twilma\tNULL\tyesterday", valuesReadFromHiveDriver.get(1));
Assert.assertEquals("3\tmark\t1900\tsoon", valuesReadFromHiveDriver.get(2));
Assert.assertEquals("4\tdouglas\t1901\tlast_century", valuesReadFromHiveDriver.get(3));
Assert.assertEquals("5\tdoc\t1902\tyesterday", valuesReadFromHiveDriver.get(4));
}
Aggregations