use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.
the class TestCompactionMetrics method testDBMetrics.
@Test
public void testDBMetrics() throws Exception {
String dbName = "default";
String tblName = "dcamc";
Table t = newTable(dbName, tblName, false);
MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.TXN_USE_MIN_HISTORY_LEVEL, false);
long start = System.currentTimeMillis();
burnThroughTransactions(t.getDbName(), t.getTableName(), 24, new HashSet<>(Arrays.asList(22L, 23L, 24L)), null);
openTxn(TxnType.REPL_CREATED);
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, t.getDbName());
comp.setTablename(t.getTableName());
comp.setOperationType(DataOperationType.UPDATE);
LockRequest req = new LockRequest(Lists.newArrayList(comp), "me", "localhost");
req.setTxnid(22);
LockResponse res = txnHandler.lock(req);
Assert.assertEquals(LockState.ACQUIRED, res.getState());
txnHandler.commitTxn(new CommitTxnRequest(22));
req.setTxnid(23);
res = txnHandler.lock(req);
Assert.assertEquals(LockState.ACQUIRED, res.getState());
Thread.sleep(1000);
runAcidMetricService();
long diff = (System.currentTimeMillis() - start) / 1000;
Assert.assertEquals(24, Metrics.getOrCreateGauge(MetricsConstants.NUM_TXN_TO_WRITEID).intValue());
Assert.assertEquals(1, Metrics.getOrCreateGauge(MetricsConstants.NUM_COMPLETED_TXN_COMPONENTS).intValue());
Assert.assertEquals(2, Metrics.getOrCreateGauge(MetricsConstants.NUM_OPEN_NON_REPL_TXNS).intValue());
Assert.assertEquals(1, Metrics.getOrCreateGauge(MetricsConstants.NUM_OPEN_REPL_TXNS).intValue());
Assert.assertEquals(23, Metrics.getOrCreateGauge(MetricsConstants.OLDEST_OPEN_NON_REPL_TXN_ID).longValue());
Assert.assertTrue(Metrics.getOrCreateGauge(MetricsConstants.OLDEST_OPEN_NON_REPL_TXN_AGE).intValue() <= diff);
Assert.assertTrue(Metrics.getOrCreateGauge(MetricsConstants.OLDEST_OPEN_NON_REPL_TXN_AGE).intValue() >= 1);
Assert.assertEquals(25, Metrics.getOrCreateGauge(MetricsConstants.OLDEST_OPEN_REPL_TXN_ID).longValue());
Assert.assertTrue(Metrics.getOrCreateGauge(MetricsConstants.OLDEST_OPEN_REPL_TXN_AGE).intValue() <= diff);
Assert.assertTrue(Metrics.getOrCreateGauge(MetricsConstants.OLDEST_OPEN_REPL_TXN_AGE).intValue() >= 1);
Assert.assertEquals(1, Metrics.getOrCreateGauge(MetricsConstants.NUM_LOCKS).intValue());
Assert.assertTrue(Metrics.getOrCreateGauge(MetricsConstants.OLDEST_LOCK_AGE).intValue() <= diff);
Assert.assertTrue(Metrics.getOrCreateGauge(MetricsConstants.OLDEST_LOCK_AGE).intValue() >= 1);
txnHandler.cleanTxnToWriteIdTable();
runAcidMetricService();
Assert.assertEquals(3, Metrics.getOrCreateGauge(MetricsConstants.NUM_TXN_TO_WRITEID).intValue());
start = System.currentTimeMillis();
burnThroughTransactions(dbName, tblName, 3, null, new HashSet<>(Arrays.asList(26L, 28L)));
Thread.sleep(1000);
runAcidMetricService();
diff = (System.currentTimeMillis() - start) / 1000;
Assert.assertTrue(Metrics.getOrCreateGauge(MetricsConstants.OLDEST_ABORTED_TXN_AGE).intValue() <= diff);
Assert.assertTrue(Metrics.getOrCreateGauge(MetricsConstants.OLDEST_ABORTED_TXN_AGE).intValue() >= 1);
Assert.assertEquals(26, Metrics.getOrCreateGauge(MetricsConstants.OLDEST_ABORTED_TXN_ID).longValue());
Assert.assertEquals(2, Metrics.getOrCreateGauge(MetricsConstants.NUM_ABORTED_TXNS).intValue());
}
use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.
the class TestCompactionMetrics method testInitiatorPerfMetricsDisabled.
@Test
public void testInitiatorPerfMetricsDisabled() throws Exception {
MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.METRICS_ENABLED, false);
Metrics.initialize(conf);
int originalValue = Metrics.getOrCreateGauge(INITIATED_METRICS_KEY).intValue();
long initiatorCycles = Objects.requireNonNull(Metrics.getOrCreateTimer(INITIATOR_CYCLE_KEY)).getCount();
Table t = newTable("default", "imd", true);
List<LockComponent> components = new ArrayList<>();
for (int i = 0; i < 10; i++) {
Partition p = newPartition(t, "part" + (i + 1));
addBaseFile(t, p, 20L, 20);
addDeltaFile(t, p, 21L, 22L, 2);
addDeltaFile(t, p, 23L, 24L, 2);
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
comp.setTablename("imd");
comp.setPartitionname("ds=part" + (i + 1));
comp.setOperationType(DataOperationType.UPDATE);
components.add(comp);
}
burnThroughTransactions("default", "imd", 23);
long txnid = openTxn();
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
Assert.assertEquals(LockState.ACQUIRED, res.getState());
long writeid = allocateWriteId("default", "imd", txnid);
Assert.assertEquals(24, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(10, compacts.size());
Assert.assertEquals(initiatorCycles, Objects.requireNonNull(Metrics.getOrCreateTimer(INITIATOR_CYCLE_KEY)).getCount());
runAcidMetricService();
Assert.assertEquals(originalValue, Metrics.getOrCreateGauge(INITIATED_METRICS_KEY).intValue());
}
use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.
the class TestCompactionMetrics method testInitiatorFailuresCountedCorrectly.
@Test
public void testInitiatorFailuresCountedCorrectly() throws Exception {
final String DEFAULT_DB = "default";
final String SUCCESS_TABLE_NAME = "success_table";
final String FAILING_TABLE_NAME = "failing_table";
final String PARTITION_NAME = "part";
final long EXPECTED_SUCCESS_COUNT = 10;
final long EXPECTED_FAIL_COUNT = 6;
ControlledFailingTxHandler.failedTableName = FAILING_TABLE_NAME;
MetastoreConf.setVar(conf, MetastoreConf.ConfVars.TXN_STORE_IMPL, "org.apache.hadoop.hive.ql.txn.compactor.TestCompactionMetrics$ControlledFailingTxHandler");
Table failedTable = newTable(DEFAULT_DB, FAILING_TABLE_NAME, true);
Table succeededTable = newTable(DEFAULT_DB, SUCCESS_TABLE_NAME, true);
for (Table table : new Table[] { succeededTable, failedTable }) {
List<LockComponent> components = new ArrayList<>();
String tableName = table.getTableName();
long partitionCount = FAILING_TABLE_NAME.equals(tableName) ? EXPECTED_FAIL_COUNT : EXPECTED_SUCCESS_COUNT;
for (int i = 0; i < partitionCount; i++) {
String partitionName = PARTITION_NAME + i;
Partition p = newPartition(table, partitionName);
addBaseFile(table, p, 20L, 20);
addDeltaFile(table, p, 21L, 22L, 2);
addDeltaFile(table, p, 23L, 24L, 2);
addDeltaFile(table, p, 21L, 24L, 4);
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, DEFAULT_DB);
comp.setTablename(tableName);
comp.setPartitionname("ds=" + partitionName);
comp.setOperationType(DataOperationType.UPDATE);
components.add(comp);
}
burnThroughTransactions(DEFAULT_DB, tableName, 25);
long txnid = openTxn();
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
Assert.assertEquals(LockState.ACQUIRED, res.getState());
long writeid = allocateWriteId(DEFAULT_DB, tableName, txnid);
Assert.assertEquals(26, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
}
conf.setIntVar(HiveConf.ConfVars.HIVE_COMPACTOR_REQUEST_QUEUE, 5);
startInitiator();
// Check if all the compaction have initiated
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals(EXPECTED_FAIL_COUNT + EXPECTED_SUCCESS_COUNT, rsp.getCompactsSize());
Assert.assertEquals(EXPECTED_FAIL_COUNT, Metrics.getOrCreateCounter(MetricsConstants.COMPACTION_INITIATOR_FAILURE_COUNTER).getCount());
}
use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.
the class TestCleaner method cleanupAfterMajorTableCompactionWithLongRunningQuery.
@Test
public void cleanupAfterMajorTableCompactionWithLongRunningQuery() throws Exception {
Table t = newTable("default", "camtc", false);
addBaseFile(t, null, 20L, 20);
addDeltaFile(t, null, 21L, 22L, 2);
addDeltaFile(t, null, 23L, 24L, 2);
addBaseFile(t, null, 25L, 25, 26);
burnThroughTransactions("default", "camtc", 25);
CompactionRequest rqst = new CompactionRequest("default", "camtc", CompactionType.MAJOR);
txnHandler.compact(rqst);
FindNextCompactRequest findNextCompactRequest = new FindNextCompactRequest();
findNextCompactRequest.setWorkerId("fred");
findNextCompactRequest.setWorkerVersion("4.0.0");
CompactionInfo ci = txnHandler.findNextToCompact(findNextCompactRequest);
ci.runAs = System.getProperty("user.name");
long compactTxn = openTxn(TxnType.COMPACTION);
ValidTxnList validTxnList = TxnCommonUtils.createValidReadTxnList(txnHandler.getOpenTxns(Collections.singletonList(TxnType.READ_ONLY)), compactTxn);
GetValidWriteIdsRequest validWriteIdsRqst = new GetValidWriteIdsRequest(Collections.singletonList(ci.getFullTableName()));
validWriteIdsRqst.setValidTxnList(validTxnList.writeToString());
ValidCompactorWriteIdList tblValidWriteIds = TxnUtils.createValidCompactWriteIdList(txnHandler.getValidWriteIds(validWriteIdsRqst).getTblValidWriteIds().get(0));
ci.highestWriteId = tblValidWriteIds.getHighWatermark();
txnHandler.updateCompactorState(ci, compactTxn);
txnHandler.markCompacted(ci);
// Open a query during compaction
long longQuery = openTxn();
txnHandler.commitTxn(new CommitTxnRequest(compactTxn));
startCleaner();
// The long running query should prevent the cleanup
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals(1, rsp.getCompactsSize());
Assert.assertEquals(TxnStore.CLEANING_RESPONSE, rsp.getCompacts().get(0).getState());
// Check that the files are not removed
List<Path> paths = getDirectories(conf, t, null);
Assert.assertEquals(4, paths.size());
// After the commit cleaning can proceed
txnHandler.commitTxn(new CommitTxnRequest(longQuery));
Thread.sleep(MetastoreConf.getTimeVar(conf, MetastoreConf.ConfVars.TXN_OPENTXN_TIMEOUT, TimeUnit.MILLISECONDS));
startCleaner();
rsp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals(1, rsp.getCompactsSize());
Assert.assertEquals(TxnStore.SUCCEEDED_RESPONSE, rsp.getCompacts().get(0).getState());
// Check that the files are removed
paths = getDirectories(conf, t, null);
Assert.assertEquals(1, paths.size());
Assert.assertEquals("base_25_v26", paths.get(0).getName());
}
use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.
the class TestInitiator method compactPartitionHighDeltaPct.
@Test
public void compactPartitionHighDeltaPct() throws Exception {
Table t = newTable("default", "cphdp", true);
Partition p = newPartition(t, "today");
addBaseFile(t, p, 20L, 20);
addDeltaFile(t, p, 21L, 22L, 2);
addDeltaFile(t, p, 23L, 24L, 2);
burnThroughTransactions("default", "cphdp", 23);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
comp.setTablename("cphdp");
comp.setPartitionname("ds=today");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
long writeid = allocateWriteId("default", "cphdp", txnid);
Assert.assertEquals(24, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("initiated", compacts.get(0).getState());
Assert.assertEquals("cphdp", compacts.get(0).getTablename());
Assert.assertEquals("ds=today", compacts.get(0).getPartitionname());
Assert.assertEquals(CompactionType.MAJOR, compacts.get(0).getType());
}
Aggregations