use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.
the class TestInitiator method compactTableTooManyDeltas.
@Test
public void compactTableTooManyDeltas() throws Exception {
Table t = newTable("default", "cttmd", false);
addBaseFile(t, null, 200L, 200);
addDeltaFile(t, null, 201L, 201L, 1);
addDeltaFile(t, null, 202L, 202L, 1);
addDeltaFile(t, null, 203L, 203L, 1);
addDeltaFile(t, null, 204L, 204L, 1);
addDeltaFile(t, null, 205L, 205L, 1);
addDeltaFile(t, null, 206L, 206L, 1);
addDeltaFile(t, null, 207L, 207L, 1);
addDeltaFile(t, null, 208L, 208L, 1);
addDeltaFile(t, null, 209L, 209L, 1);
addDeltaFile(t, null, 210L, 210L, 1);
addDeltaFile(t, null, 211L, 211L, 1);
burnThroughTransactions("default", "cttmd", 210);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
comp.setTablename("cttmd");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
long writeid = allocateWriteId("default", "cttmd", txnid);
Assert.assertEquals(211, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("initiated", compacts.get(0).getState());
Assert.assertEquals("cttmd", compacts.get(0).getTablename());
Assert.assertEquals(CompactionType.MINOR, compacts.get(0).getType());
}
use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.
the class TestInitiator method compactTableHighDeltaPct.
@Test
public void compactTableHighDeltaPct() throws Exception {
Table t = newTable("default", "cthdp", false);
addBaseFile(t, null, 20L, 20);
addDeltaFile(t, null, 21L, 22L, 2);
addDeltaFile(t, null, 23L, 24L, 2);
burnThroughTransactions("default", "cthdp", 23);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
comp.setTablename("cthdp");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
long writeid = allocateWriteId("default", "cthdp", txnid);
Assert.assertEquals(24, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("initiated", compacts.get(0).getState());
Assert.assertEquals("cthdp", compacts.get(0).getTablename());
Assert.assertEquals(CompactionType.MAJOR, compacts.get(0).getType());
}
use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.
the class TestInitiator method noCompactTableDeltaPctNotHighEnough.
@Test
public void noCompactTableDeltaPctNotHighEnough() throws Exception {
Table t = newTable("default", "nctdpnhe", false);
addBaseFile(t, null, 50L, 50);
addDeltaFile(t, null, 21L, 22L, 2);
addDeltaFile(t, null, 23L, 24L, 2);
burnThroughTransactions("default", "nctdpnhe", 53);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
comp.setTablename("nctdpnhe");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
long writeid = allocateWriteId("default", "nctdpnhe", txnid);
Assert.assertEquals(54, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals(0, rsp.getCompactsSize());
}
use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.
the class TestInitiator method testMetaCache.
@Test
public void testMetaCache() throws Exception {
String dbname = "default";
String tableName = "tmc";
Table t = newTable(dbname, tableName, true);
List<LockComponent> components = new ArrayList<>();
for (int i = 0; i < 2; i++) {
Partition p = newPartition(t, "part" + (i + 1));
addBaseFile(t, p, 20L, 20);
addDeltaFile(t, p, 21L, 22L, 2);
addDeltaFile(t, p, 23L, 24L, 2);
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, dbname);
comp.setTablename(tableName);
comp.setPartitionname("ds=part" + (i + 1));
comp.setOperationType(DataOperationType.UPDATE);
components.add(comp);
}
burnThroughTransactions(dbname, tableName, 23);
long txnid = openTxn();
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
Assert.assertEquals(LockState.ACQUIRED, res.getState());
long writeid = allocateWriteId(dbname, tableName, txnid);
Assert.assertEquals(24, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
conf.setIntVar(HiveConf.ConfVars.HIVE_COMPACTOR_REQUEST_QUEUE, 3);
Initiator initiator = Mockito.spy(new Initiator());
initiator.setThreadId((int) t.getId());
initiator.setConf(conf);
initiator.init(new AtomicBoolean(true));
initiator.run();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(2, compacts.size());
Mockito.verify(initiator, times(1)).resolveTable(Mockito.any());
}
use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.
the class TestInitiator method compactTableWithMultipleBase.
@Test
public void compactTableWithMultipleBase() throws Exception {
Table t = newTable("default", "nctdpnhe", false);
addBaseFile(t, null, 50L, 50);
addBaseFile(t, null, 100L, 50);
burnThroughTransactions("default", "nctdpnhe", 102);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
comp.setTablename("nctdpnhe");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
long writeid = allocateWriteId("default", "nctdpnhe", txnid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals(1, rsp.getCompactsSize());
Assert.assertEquals("initiated", rsp.getCompacts().get(0).getState());
startWorker();
Thread.sleep(1L);
ShowCompactResponse response = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals("ready for cleaning", response.getCompacts().get(0).getState());
}
Aggregations