use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.
the class TestWorker method compactNoBaseLotsOfDeltas.
/**
* These tests are starting to be a hack. The files writtern by addDeltaFile() are not proper
* Acid files and the {@link CompactorTest.MockRawReader} performs no merging of delta files and
* fakes isDelete() as a shortcut. This makes files created on disk to not be representative of
* what they should look like in a real system.
* Making {@link org.apache.hadoop.hive.ql.txn.compactor.CompactorTest.MockRawReader} do proper
* delete event handling would be duplicating either OrcRawRecordMerger or VectorizedOrcAcidRowBatchReaer.
* @param type
* @throws Exception
*/
private void compactNoBaseLotsOfDeltas(CompactionType type) throws Exception {
conf.setIntVar(HiveConf.ConfVars.COMPACTOR_MAX_NUM_DELTA, 2);
Table t = newTable("default", "mapwb", true);
Partition p = newPartition(t, "today");
// addBaseFile(t, p, 20L, 20);
addDeltaFile(t, p, 21L, 21L, 2);
addDeltaFile(t, p, 23L, 23L, 2);
// make it look like streaming API use case
addDeltaFile(t, p, 25L, 29L, 2);
addDeltaFile(t, p, 31L, 32L, 3);
// make it looks like 31-32 has been compacted, but not cleaned
addDeltaFile(t, p, 31L, 33L, 5);
addDeltaFile(t, p, 35L, 35L, 1);
/*since COMPACTOR_MAX_NUM_DELTA=2,
we expect files 1,2 to be minor compacted by 1 job to produce delta_21_23
* 3,5 to be minor compacted by 2nd job (file 4 is obsolete) to make delta_25_33 (4th is skipped)
*
* and then the 'requested'
* minor compaction to combine delta_21_23, delta_25_33 and delta_35_35 to make delta_21_35
* or major compaction to create base_35*/
burnThroughTransactions("default", "mapwb", 35);
CompactionRequest rqst = new CompactionRequest("default", "mapwb", type);
rqst.setPartitionname("ds=today");
txnHandler.compact(rqst);
startWorker();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("ready for cleaning", compacts.get(0).getState());
FileSystem fs = FileSystem.get(conf);
FileStatus[] stat = fs.listStatus(new Path(p.getSd().getLocation()));
/* delete_delta_21_23 and delete_delta_25_33 which are created as a result of compacting*/
int numFilesExpected = 11 + (type == CompactionType.MINOR ? 1 : 0);
Assert.assertEquals(numFilesExpected, stat.length);
// Find the new delta file and make sure it has the right contents
List<String> matchesNotFound = new ArrayList<>(numFilesExpected);
matchesNotFound.add(makeDeleteDeltaDirNameCompacted(21, 23) + VISIBILITY_PATTERN);
matchesNotFound.add(makeDeleteDeltaDirNameCompacted(25, 33) + VISIBILITY_PATTERN);
matchesNotFound.add(makeDeltaDirName(21, 21));
matchesNotFound.add(makeDeltaDirName(23, 23));
// streaming ingest
matchesNotFound.add(makeDeltaDirNameCompacted(25, 29));
// streaming ingest
matchesNotFound.add(makeDeltaDirNameCompacted(31, 32));
// todo: this should have some _vXXXX suffix but addDeltaFile() doesn't support it
matchesNotFound.add(makeDeltaDirNameCompacted(31, 33));
matchesNotFound.add(makeDeltaDirName(35, 35));
matchesNotFound.add(makeDeltaDirNameCompacted(21, 23) + VISIBILITY_PATTERN);
matchesNotFound.add(makeDeltaDirNameCompacted(25, 33) + VISIBILITY_PATTERN);
if (type == CompactionType.MINOR) {
matchesNotFound.add(makeDeltaDirNameCompacted(21, 35) + VISIBILITY_PATTERN);
matchesNotFound.add(makeDeleteDeltaDirNameCompacted(21, 35) + VISIBILITY_PATTERN);
}
if (type == CompactionType.MAJOR) {
matchesNotFound.add(AcidUtils.baseDir(35) + VISIBILITY_PATTERN);
}
for (FileStatus f : stat) {
for (int j = 0; j < matchesNotFound.size(); j++) {
if (f.getPath().getName().matches(matchesNotFound.get(j))) {
matchesNotFound.remove(j);
break;
}
}
}
if (matchesNotFound.size() == 0) {
return;
}
Assert.assertTrue("Files remaining: " + matchesNotFound + "; " + toString(stat), false);
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.
the class TestCompactionMetrics method testAgeMetricsNotSet.
@Test
public void testAgeMetricsNotSet() {
List<ShowCompactResponseElement> elements = ImmutableList.of(generateElement(1, "db", "tb", null, CompactionType.MAJOR, TxnStore.FAILED_RESPONSE, 1L), generateElement(5, "db", "tb3", "p1", CompactionType.MINOR, TxnStore.DID_NOT_INITIATE_RESPONSE, 2L), generateElement(9, "db2", "tb", null, CompactionType.MINOR, TxnStore.SUCCEEDED_RESPONSE, 3L));
ShowCompactResponse scr = new ShowCompactResponse();
scr.setCompacts(elements);
AcidMetricService.updateMetricsFromShowCompact(scr);
// Check that it is not set
Assert.assertEquals(0, Metrics.getOrCreateGauge(MetricsConstants.COMPACTION_OLDEST_ENQUEUE_AGE).intValue());
Assert.assertEquals(0, Metrics.getOrCreateGauge(MetricsConstants.COMPACTION_OLDEST_WORKING_AGE).intValue());
Assert.assertEquals(0, Metrics.getOrCreateGauge(MetricsConstants.COMPACTION_OLDEST_CLEANING_AGE).intValue());
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.
the class TestCompactionMetrics method testInitiatorPerfMetricsDisabled.
@Test
public void testInitiatorPerfMetricsDisabled() throws Exception {
MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.METRICS_ENABLED, false);
Metrics.initialize(conf);
int originalValue = Metrics.getOrCreateGauge(INITIATED_METRICS_KEY).intValue();
long initiatorCycles = Objects.requireNonNull(Metrics.getOrCreateTimer(INITIATOR_CYCLE_KEY)).getCount();
Table t = newTable("default", "imd", true);
List<LockComponent> components = new ArrayList<>();
for (int i = 0; i < 10; i++) {
Partition p = newPartition(t, "part" + (i + 1));
addBaseFile(t, p, 20L, 20);
addDeltaFile(t, p, 21L, 22L, 2);
addDeltaFile(t, p, 23L, 24L, 2);
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
comp.setTablename("imd");
comp.setPartitionname("ds=part" + (i + 1));
comp.setOperationType(DataOperationType.UPDATE);
components.add(comp);
}
burnThroughTransactions("default", "imd", 23);
long txnid = openTxn();
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
Assert.assertEquals(LockState.ACQUIRED, res.getState());
long writeid = allocateWriteId("default", "imd", txnid);
Assert.assertEquals(24, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(10, compacts.size());
Assert.assertEquals(initiatorCycles, Objects.requireNonNull(Metrics.getOrCreateTimer(INITIATOR_CYCLE_KEY)).getCount());
runAcidMetricService();
Assert.assertEquals(originalValue, Metrics.getOrCreateGauge(INITIATED_METRICS_KEY).intValue());
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.
the class TestCompactionMetrics method generateElement.
private ShowCompactResponseElement generateElement(long id, String db, String table, String partition, CompactionType type, String state, long enqueueTime, boolean manuallyInitiatedCompaction, String initiatorVersion, String workerVersion, long startTime, long cleanerStartTime) {
ShowCompactResponseElement element = new ShowCompactResponseElement(db, table, type, state);
element.setId(id);
element.setPartitionname(partition);
element.setEnqueueTime(enqueueTime);
String runtimeId;
if (manuallyInitiatedCompaction) {
runtimeId = "hs2-host-" + ThreadLocalRandom.current().nextInt(999) + "-" + HiveMetaStoreClient.MANUALLY_INITIATED_COMPACTION;
} else {
runtimeId = ServerUtils.hostname() + "-" + ThreadLocalRandom.current().nextInt(999);
}
String workerId = "hs2-host-" + ThreadLocalRandom.current().nextInt(999);
element.setInitiatorId(runtimeId);
element.setWorkerid(workerId);
element.setInitiatorVersion(initiatorVersion);
element.setWorkerVersion(workerVersion);
element.setStart(startTime);
element.setCleanerStart(cleanerStartTime);
return element;
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.
the class TestCompactionMetrics method testWorkingAgeMetrics.
@Test
public void testWorkingAgeMetrics() {
ShowCompactResponse scr = new ShowCompactResponse();
long start = System.currentTimeMillis() - 1000L;
List<ShowCompactResponseElement> elements = ImmutableList.of(generateElement(17, "db3", "tb7", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, System.currentTimeMillis(), true, "4.0.0", "4.0.0", start));
scr.setCompacts(elements);
AcidMetricService.updateMetricsFromShowCompact(scr);
long diff = (System.currentTimeMillis() - start) / 1000;
// Check that we have at least 1s old compaction age, but not more than expected
int age = Metrics.getOrCreateGauge(MetricsConstants.COMPACTION_OLDEST_WORKING_AGE).intValue();
Assert.assertTrue(age <= diff);
Assert.assertTrue(age >= 1);
}
Aggregations