use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.
the class TestCompactionMetrics method testInitiatorDurationMeasuredCorrectly.
@Test
public void testInitiatorDurationMeasuredCorrectly() throws Exception {
final String DEFAULT_DB = "default";
final String TABLE_NAME = "x_table";
final String PARTITION_NAME = "part";
List<LockComponent> components = new ArrayList<>();
Table table = newTable(DEFAULT_DB, TABLE_NAME, true);
for (int i = 0; i < 10; i++) {
String partitionName = PARTITION_NAME + i;
Partition p = newPartition(table, partitionName);
addBaseFile(table, p, 20L, 20);
addDeltaFile(table, p, 21L, 22L, 2);
addDeltaFile(table, p, 23L, 24L, 2);
addDeltaFile(table, p, 21L, 24L, 4);
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, DEFAULT_DB);
comp.setTablename(TABLE_NAME);
comp.setPartitionname("ds=" + partitionName);
comp.setOperationType(DataOperationType.UPDATE);
components.add(comp);
}
burnThroughTransactions(DEFAULT_DB, TABLE_NAME, 25);
long txnId = openTxn();
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnId);
LockResponse res = txnHandler.lock(req);
Assert.assertEquals(LockState.ACQUIRED, res.getState());
allocateWriteId(DEFAULT_DB, TABLE_NAME, txnId);
txnHandler.commitTxn(new CommitTxnRequest(txnId));
long initiatorStart = System.currentTimeMillis();
startInitiator();
long durationUpperLimit = System.currentTimeMillis() - initiatorStart;
int initiatorDurationFromMetric = Metrics.getOrCreateGauge(MetricsConstants.COMPACTION_INITIATOR_CYCLE_DURATION).intValue();
Assert.assertTrue("Initiator duration must be withing the limits", (0 < initiatorDurationFromMetric) && (initiatorDurationFromMetric <= durationUpperLimit));
}
use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.
the class TestCompactionMetrics method testInitiatorPerfMetricsEnabled.
@Test
public void testInitiatorPerfMetricsEnabled() throws Exception {
Metrics.getOrCreateGauge(INITIATED_METRICS_KEY).set(0);
long initiatorCycles = Objects.requireNonNull(Metrics.getOrCreateTimer(INITIATOR_CYCLE_KEY)).getCount();
Table t = newTable("default", "ime", true);
List<LockComponent> components = new ArrayList<>();
for (int i = 0; i < 10; i++) {
Partition p = newPartition(t, "part" + (i + 1));
addBaseFile(t, p, 20L, 20);
addDeltaFile(t, p, 21L, 22L, 2);
addDeltaFile(t, p, 23L, 24L, 2);
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
comp.setTablename("ime");
comp.setPartitionname("ds=part" + (i + 1));
comp.setOperationType(DataOperationType.UPDATE);
components.add(comp);
}
burnThroughTransactions("default", "ime", 23);
long txnid = openTxn();
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
Assert.assertEquals(LockState.ACQUIRED, res.getState());
long writeid = allocateWriteId("default", "ime", txnid);
Assert.assertEquals(24, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(10, compacts.size());
Assert.assertEquals(initiatorCycles + 1, Objects.requireNonNull(Metrics.getOrCreateTimer(INITIATOR_CYCLE_KEY)).getCount());
runAcidMetricService();
Assert.assertEquals(10, Metrics.getOrCreateGauge(INITIATED_METRICS_KEY).intValue());
}
use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.
the class TestInitiator method majorCompactOnPartitionTooManyAborts.
@Test
public void majorCompactOnPartitionTooManyAborts() throws Exception {
Table t = newTable("default", "mcoptma", true);
Partition p = newPartition(t, "today");
HiveConf.setIntVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD, 10);
for (int i = 0; i < 11; i++) {
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
comp.setTablename("mcoptma");
comp.setPartitionname("ds=today");
comp.setOperationType(DataOperationType.DELETE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
txnHandler.abortTxn(new AbortTxnRequest(txnid));
}
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("initiated", compacts.get(0).getState());
Assert.assertEquals("mcoptma", compacts.get(0).getTablename());
Assert.assertEquals("ds=today", compacts.get(0).getPartitionname());
Assert.assertEquals(CompactionType.MAJOR, compacts.get(0).getType());
}
use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.
the class TestInitiator method noCompactTableNotEnoughDeltas.
@Test
public void noCompactTableNotEnoughDeltas() throws Exception {
Table t = newTable("default", "nctned", false);
addBaseFile(t, null, 200L, 200);
addDeltaFile(t, null, 201L, 205L, 5);
addDeltaFile(t, null, 206L, 211L, 6);
burnThroughTransactions("default", "nctned", 210);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
comp.setTablename("nctned");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
long writeid = allocateWriteId("default", "nctned", txnid);
Assert.assertEquals(211, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals(0, rsp.getCompactsSize());
}
use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.
the class TestDeltaFilesMetrics method testDeltaFileMetricUnpartitionedTable.
@Test
public void testDeltaFileMetricUnpartitionedTable() throws Exception {
String dbName = "default";
String tblName = "dp";
Table t = newTable(dbName, tblName, false);
List<LockComponent> components = new ArrayList<>();
addBaseFile(t, null, 20L, 20);
addDeltaFile(t, null, 21L, 22L, 2);
addDeltaFile(t, null, 23L, 24L, 20);
components.add(createLockComponent(dbName, tblName, null));
burnThroughTransactions(dbName, tblName, 24);
long txnId = openTxn();
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnId);
LockResponse res = txnHandler.lock(req);
Assert.assertEquals(LockState.ACQUIRED, res.getState());
long writeId = allocateWriteId(dbName, tblName, txnId);
Assert.assertEquals(25, writeId);
txnHandler.commitTxn(new CommitTxnRequest(txnId));
startInitiator();
TimeUnit.SECONDS.sleep(2);
// 2 active deltas
// 1 small delta
// 0 obsolete deltas
verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName, 2), MetricsConstants.COMPACTION_NUM_DELTAS);
verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName, 1), MetricsConstants.COMPACTION_NUM_SMALL_DELTAS);
verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_OBSOLETE_DELTAS);
startWorker();
TimeUnit.SECONDS.sleep(2);
// 0 active delta
// 0 small delta
// 2 obsolete delta
verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_DELTAS);
verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_SMALL_DELTAS);
verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName, 2), MetricsConstants.COMPACTION_NUM_OBSOLETE_DELTAS);
startCleaner();
TimeUnit.SECONDS.sleep(2);
// 0 active delta
// 0 small delta
// 0 obsolete delta
verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_DELTAS);
verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_SMALL_DELTAS);
verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_OBSOLETE_DELTAS);
}
Aggregations