use of org.apache.hadoop.hive.ql.parse.repl.metric.event.Stage in project hive by apache.
the class TestReplicationMetricSink method testSuccessBootstrapDumpMetrics.
@Test
public void testSuccessBootstrapDumpMetrics() throws Exception {
ReplicationMetricCollector bootstrapDumpMetricCollector = new BootstrapDumpMetricCollector("testAcidTablesReplLoadBootstrapIncr_1592205875387", "hdfs://localhost:65158/tmp/org_apache_hadoop_hive_ql_parse_TestReplicationScenarios_245261428230295" + "/hrepl0/dGVzdGFjaWR0YWJsZXNyZXBsbG9hZGJvb3RzdHJhcGluY3JfMTU5MjIwNTg3NTM4Nw==/0/hive", conf);
Map<String, Long> metricMap = new HashMap<>();
metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) 10);
metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) 1);
bootstrapDumpMetricCollector.reportStageStart("dump", metricMap);
bootstrapDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 1);
bootstrapDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 2);
bootstrapDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.FUNCTIONS.name(), 1);
bootstrapDumpMetricCollector.reportStageEnd("dump", Status.SUCCESS, 10, new SnapshotUtils.ReplSnapshotCount(), new ReplStatsTracker(0));
bootstrapDumpMetricCollector.reportEnd(Status.SUCCESS);
Metadata expectedMetadata = new Metadata("testAcidTablesReplLoadBootstrapIncr_1592205875387", Metadata.ReplicationType.BOOTSTRAP, "dummyDir");
expectedMetadata.setLastReplId(10);
Progress expectedProgress = new Progress();
expectedProgress.setStatus(Status.SUCCESS);
Stage dumpStage = new Stage("dump", Status.SUCCESS, 0);
dumpStage.setEndTime(0);
Metric expectedTableMetric = new Metric(ReplUtils.MetricName.TABLES.name(), 10);
expectedTableMetric.setCurrentCount(3);
Metric expectedFuncMetric = new Metric(ReplUtils.MetricName.FUNCTIONS.name(), 1);
expectedFuncMetric.setCurrentCount(1);
dumpStage.addMetric(expectedTableMetric);
dumpStage.addMetric(expectedFuncMetric);
expectedProgress.addStage(dumpStage);
ReplicationMetric expectedMetric = new ReplicationMetric(1, "repl", 0, expectedMetadata);
expectedMetric.setProgress(expectedProgress);
Thread.sleep(1000 * 20);
GetReplicationMetricsRequest metricsRequest = new GetReplicationMetricsRequest();
metricsRequest.setPolicy("repl");
ReplicationMetricList actualReplicationMetrics = Hive.get(conf).getMSC().getReplicationMetrics(metricsRequest);
ReplicationMetrics actualThriftMetric = actualReplicationMetrics.getReplicationMetricList().get(0);
ObjectMapper mapper = new ObjectMapper();
ReplicationMetric actualMetric = new ReplicationMetric(actualThriftMetric.getScheduledExecutionId(), actualThriftMetric.getPolicy(), actualThriftMetric.getDumpExecutionId(), mapper.readValue(actualThriftMetric.getMetadata(), Metadata.class));
actualMetric.setMessageFormat(actualThriftMetric.getMessageFormat());
ProgressMapper progressMapper = mapper.readValue(deSerialize(actualThriftMetric.getProgress()), ProgressMapper.class);
Progress progress = new Progress();
progress.setStatus(progressMapper.getStatus());
for (StageMapper stageMapper : progressMapper.getStages()) {
Stage stage = new Stage();
stage.setName(stageMapper.getName());
stage.setStatus(stageMapper.getStatus());
stage.setStartTime(stageMapper.getStartTime());
stage.setEndTime(stageMapper.getEndTime());
for (Metric metric : stageMapper.getMetrics()) {
stage.addMetric(metric);
}
progress.addStage(stage);
}
actualMetric.setProgress(progress);
checkSuccess(actualMetric, expectedMetric, "dump", Arrays.asList(ReplUtils.MetricName.TABLES.name(), ReplUtils.MetricName.FUNCTIONS.name()));
// Incremental
conf.set(Constants.SCHEDULED_QUERY_EXECUTIONID, "2");
ReplicationMetricCollector incrementDumpMetricCollector = new IncrementalDumpMetricCollector("testAcidTablesReplLoadBootstrapIncr_1592205875387", "hdfs://localhost:65158/tmp/org_apache_hadoop_hive_ql_parse_TestReplicationScenarios_245261428230295" + "/hrepl0/dGVzdGFjaWR0YWJsZXNyZXBsbG9hZGJvb3RzdHJhcGluY3JfMTU5MjIwNTg3NTM4Nw==/0/hive", conf);
metricMap = new HashMap<>();
metricMap.put(ReplUtils.MetricName.EVENTS.name(), (long) 10);
incrementDumpMetricCollector.reportStageStart("dump", metricMap);
incrementDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.EVENTS.name(), 10);
incrementDumpMetricCollector.reportStageEnd("dump", Status.SUCCESS, 10, new SnapshotUtils.ReplSnapshotCount(), new ReplStatsTracker(0));
incrementDumpMetricCollector.reportEnd(Status.SUCCESS);
expectedMetadata = new Metadata("testAcidTablesReplLoadBootstrapIncr_1592205875387", Metadata.ReplicationType.INCREMENTAL, "dummyDir");
expectedMetadata.setLastReplId(10);
expectedProgress = new Progress();
expectedProgress.setStatus(Status.SUCCESS);
dumpStage = new Stage("dump", Status.SUCCESS, 0);
dumpStage.setEndTime(0);
Metric expectedEventsMetric = new Metric(ReplUtils.MetricName.EVENTS.name(), 10);
expectedEventsMetric.setCurrentCount(10);
dumpStage.addMetric(expectedEventsMetric);
expectedProgress.addStage(dumpStage);
expectedMetric = new ReplicationMetric(2, "repl", 0, expectedMetadata);
expectedMetric.setProgress(expectedProgress);
Thread.sleep(1000 * 20);
metricsRequest = new GetReplicationMetricsRequest();
metricsRequest.setPolicy("repl");
actualReplicationMetrics = Hive.get(conf).getMSC().getReplicationMetrics(metricsRequest);
Assert.assertEquals(2, actualReplicationMetrics.getReplicationMetricListSize());
actualThriftMetric = actualReplicationMetrics.getReplicationMetricList().get(0);
mapper = new ObjectMapper();
actualMetric = new ReplicationMetric(actualThriftMetric.getScheduledExecutionId(), actualThriftMetric.getPolicy(), actualThriftMetric.getDumpExecutionId(), mapper.readValue(actualThriftMetric.getMetadata(), Metadata.class));
actualMetric.setMessageFormat(actualThriftMetric.getMessageFormat());
progressMapper = mapper.readValue(deSerialize(actualThriftMetric.getProgress()), ProgressMapper.class);
progress = new Progress();
progress.setStatus(progressMapper.getStatus());
for (StageMapper stageMapper : progressMapper.getStages()) {
Stage stage = new Stage();
stage.setName(stageMapper.getName());
stage.setStatus(stageMapper.getStatus());
stage.setStartTime(stageMapper.getStartTime());
stage.setEndTime(stageMapper.getEndTime());
for (Metric metric : stageMapper.getMetrics()) {
stage.addMetric(metric);
}
progress.addStage(stage);
}
actualMetric.setProgress(progress);
checkSuccessIncremental(actualMetric, expectedMetric, "dump", Arrays.asList(ReplUtils.MetricName.EVENTS.name()));
// Failover Metrics Sink
Mockito.when(fmd.getFailoverEventId()).thenReturn(100L);
Mockito.when(fmd.getFilePath()).thenReturn("hdfs://localhost:65158/tmp/org_apache_hadoop_hive_ql_parse_TestReplicationScenarios_245261428230295" + "/hrepl0/dGVzdGFjaWR0YWJsZXNyZXBsbG9hZGJvb3RzdHJhcGluY3JfMTU5MjIwNTg3NTM4Nw==/0/hive/");
conf.set(Constants.SCHEDULED_QUERY_EXECUTIONID, "3");
String stagingDir = "hdfs://localhost:65158/tmp/org_apache_hadoop_hive_ql_parse_TestReplicationScenarios_245261428230295" + "/hrepl0/dGVzdGFjaWR0YWJsZXNyZXBsbG9hZGJvb3RzdHJhcGluY3JfMTU5MjIwNTg3NTM4Nw==/0/hive/";
ReplicationMetricCollector failoverDumpMetricCollector = new IncrementalDumpMetricCollector("testAcidTablesReplLoadBootstrapIncr_1592205875387", stagingDir, conf);
metricMap = new HashMap<String, Long>() {
{
put(ReplUtils.MetricName.EVENTS.name(), (long) 10);
}
};
failoverDumpMetricCollector.reportFailoverStart("dump", metricMap, fmd);
failoverDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.EVENTS.name(), 10);
failoverDumpMetricCollector.reportStageEnd("dump", Status.SUCCESS, 10, new SnapshotUtils.ReplSnapshotCount(), new ReplStatsTracker(0));
failoverDumpMetricCollector.reportEnd(Status.FAILOVER_READY);
expectedMetadata = new Metadata("testAcidTablesReplLoadBootstrapIncr_1592205875387", Metadata.ReplicationType.INCREMENTAL, "dummyDir");
expectedMetadata.setLastReplId(10);
expectedMetadata.setFailoverEventId(100);
expectedMetadata.setFailoverMetadataLoc(stagingDir + FailoverMetaData.FAILOVER_METADATA);
expectedProgress = new Progress();
expectedProgress.setStatus(Status.FAILOVER_READY);
dumpStage = new Stage("dump", Status.SUCCESS, 0);
dumpStage.setEndTime(0);
expectedEventsMetric = new Metric(ReplUtils.MetricName.EVENTS.name(), 10);
expectedEventsMetric.setCurrentCount(10);
dumpStage.addMetric(expectedEventsMetric);
expectedProgress.addStage(dumpStage);
expectedMetric = new ReplicationMetric(3, "repl", 0, expectedMetadata);
expectedMetric.setProgress(expectedProgress);
Thread.sleep(1000 * 20);
metricsRequest = new GetReplicationMetricsRequest();
metricsRequest.setPolicy("repl");
actualReplicationMetrics = Hive.get(conf).getMSC().getReplicationMetrics(metricsRequest);
Assert.assertEquals(3, actualReplicationMetrics.getReplicationMetricListSize());
actualThriftMetric = actualReplicationMetrics.getReplicationMetricList().get(0);
mapper = new ObjectMapper();
actualMetric = new ReplicationMetric(actualThriftMetric.getScheduledExecutionId(), actualThriftMetric.getPolicy(), actualThriftMetric.getDumpExecutionId(), mapper.readValue(actualThriftMetric.getMetadata(), Metadata.class));
actualMetric.setMessageFormat(actualThriftMetric.getMessageFormat());
progressMapper = mapper.readValue(deSerialize(actualThriftMetric.getProgress()), ProgressMapper.class);
progress = new Progress();
progress.setStatus(progressMapper.getStatus());
for (StageMapper stageMapper : progressMapper.getStages()) {
Stage stage = new Stage();
stage.setName(stageMapper.getName());
stage.setStatus(stageMapper.getStatus());
stage.setStartTime(stageMapper.getStartTime());
stage.setEndTime(stageMapper.getEndTime());
for (Metric metric : stageMapper.getMetrics()) {
stage.addMetric(metric);
}
progress.addStage(stage);
}
actualMetric.setProgress(progress);
checkSuccessIncremental(actualMetric, expectedMetric, "dump", Arrays.asList(ReplUtils.MetricName.EVENTS.name()));
}
use of org.apache.hadoop.hive.ql.parse.repl.metric.event.Stage in project hive by apache.
the class TestReplicationMetricCollector method testSuccessIncrDumpMetrics.
@Test
public void testSuccessIncrDumpMetrics() throws Exception {
ReplicationMetricCollector incrDumpMetricCollector = new IncrementalDumpMetricCollector("db", "dummyDir", conf);
Map<String, Long> metricMap = new HashMap<>();
metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) 10);
metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) 1);
incrDumpMetricCollector.reportStageStart("dump", metricMap);
incrDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 1);
List<ReplicationMetric> actualMetrics = MetricCollector.getInstance().getMetrics();
Assert.assertEquals(1, actualMetrics.size());
incrDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 2);
incrDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.FUNCTIONS.name(), 1);
actualMetrics = MetricCollector.getInstance().getMetrics();
Assert.assertEquals(1, actualMetrics.size());
incrDumpMetricCollector.reportStageEnd("dump", Status.SUCCESS, 10, new SnapshotUtils.ReplSnapshotCount(), new ReplStatsTracker(0));
incrDumpMetricCollector.reportEnd(Status.SUCCESS);
actualMetrics = MetricCollector.getInstance().getMetrics();
Assert.assertEquals(1, actualMetrics.size());
Metadata expectedMetadata = new Metadata("db", Metadata.ReplicationType.INCREMENTAL, "dummyDir");
expectedMetadata.setLastReplId(10);
Progress expectedProgress = new Progress();
expectedProgress.setStatus(Status.SUCCESS);
Stage dumpStage = new Stage("dump", Status.SUCCESS, 0);
dumpStage.setEndTime(0);
Metric expectedTableMetric = new Metric(ReplUtils.MetricName.TABLES.name(), 10);
expectedTableMetric.setCurrentCount(3);
Metric expectedFuncMetric = new Metric(ReplUtils.MetricName.FUNCTIONS.name(), 1);
expectedFuncMetric.setCurrentCount(1);
dumpStage.addMetric(expectedTableMetric);
dumpStage.addMetric(expectedFuncMetric);
expectedProgress.addStage(dumpStage);
ReplicationMetric expectedMetric = new ReplicationMetric(1, "repl", 0, expectedMetadata);
expectedMetric.setProgress(expectedProgress);
checkSuccess(actualMetrics.get(0), expectedMetric, "dump", Arrays.asList(ReplUtils.MetricName.TABLES.name(), ReplUtils.MetricName.FUNCTIONS.name()));
}
use of org.apache.hadoop.hive.ql.parse.repl.metric.event.Stage in project hive by apache.
the class TestReplicationMetricCollector method testSuccessBootstrapLoadMetrics.
@Test
public void testSuccessBootstrapLoadMetrics() throws Exception {
ReplicationMetricCollector bootstrapLoadMetricCollector = new BootstrapLoadMetricCollector("db", "dummyDir", 1, conf);
Map<String, Long> metricMap = new HashMap<>();
metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) 10);
metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) 1);
bootstrapLoadMetricCollector.reportStageStart("dump", metricMap);
bootstrapLoadMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 1);
List<ReplicationMetric> actualMetrics = MetricCollector.getInstance().getMetrics();
Assert.assertEquals(1, actualMetrics.size());
bootstrapLoadMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 2);
bootstrapLoadMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.FUNCTIONS.name(), 1);
actualMetrics = MetricCollector.getInstance().getMetrics();
Assert.assertEquals(1, actualMetrics.size());
bootstrapLoadMetricCollector.reportStageEnd("dump", Status.SUCCESS, 10, new SnapshotUtils.ReplSnapshotCount(), new ReplStatsTracker(0));
bootstrapLoadMetricCollector.reportEnd(Status.SUCCESS);
actualMetrics = MetricCollector.getInstance().getMetrics();
Assert.assertEquals(1, actualMetrics.size());
Metadata expectedMetadata = new Metadata("db", Metadata.ReplicationType.BOOTSTRAP, "dummyDir");
expectedMetadata.setLastReplId(10);
Progress expectedProgress = new Progress();
expectedProgress.setStatus(Status.SUCCESS);
Stage dumpStage = new Stage("dump", Status.SUCCESS, 0);
dumpStage.setEndTime(0);
Metric expectedTableMetric = new Metric(ReplUtils.MetricName.TABLES.name(), 10);
expectedTableMetric.setCurrentCount(3);
Metric expectedFuncMetric = new Metric(ReplUtils.MetricName.FUNCTIONS.name(), 1);
expectedFuncMetric.setCurrentCount(1);
dumpStage.addMetric(expectedTableMetric);
dumpStage.addMetric(expectedFuncMetric);
expectedProgress.addStage(dumpStage);
ReplicationMetric expectedMetric = new ReplicationMetric(1, "repl", 1, expectedMetadata);
expectedMetric.setProgress(expectedProgress);
checkSuccess(actualMetrics.get(0), expectedMetric, "dump", Arrays.asList(ReplUtils.MetricName.TABLES.name(), ReplUtils.MetricName.FUNCTIONS.name()));
}
use of org.apache.hadoop.hive.ql.parse.repl.metric.event.Stage in project hive by apache.
the class TestReplicationMetricCollector method testFailoverReadyDumpMetrics.
@Test
public void testFailoverReadyDumpMetrics() throws Exception {
ReplicationMetricCollector incrDumpMetricCollector = new IncrementalDumpMetricCollector("db", "dummyDir", conf);
Map<String, Long> metricMap = new HashMap<>();
metricMap.put(ReplUtils.MetricName.EVENTS.name(), (long) 10);
incrDumpMetricCollector.reportFailoverStart("dump", metricMap, fmd);
incrDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.EVENTS.name(), 2);
List<ReplicationMetric> actualMetrics = MetricCollector.getInstance().getMetrics();
Assert.assertEquals(1, actualMetrics.size());
incrDumpMetricCollector.reportStageEnd("dump", Status.SUCCESS, 10, new SnapshotUtils.ReplSnapshotCount(), new ReplStatsTracker(0));
incrDumpMetricCollector.reportEnd(Status.FAILOVER_READY);
actualMetrics = MetricCollector.getInstance().getMetrics();
Assert.assertEquals(1, actualMetrics.size());
Metadata expectedMetadata = new Metadata("db", Metadata.ReplicationType.INCREMENTAL, "dummyDir");
expectedMetadata.setLastReplId(10);
expectedMetadata.setFailoverEventId(10);
expectedMetadata.setFailoverMetadataLoc("dummyDir");
Progress expectedProgress = new Progress();
expectedProgress.setStatus(Status.FAILOVER_READY);
Stage dumpStage = new Stage("dump", Status.SUCCESS, 0);
dumpStage.setEndTime(0);
Metric expectedEventMetric = new Metric(ReplUtils.MetricName.EVENTS.name(), 10);
expectedEventMetric.setCurrentCount(2);
dumpStage.addMetric(expectedEventMetric);
expectedProgress.addStage(dumpStage);
ReplicationMetric expectedMetric = new ReplicationMetric(1, "repl", 0, expectedMetadata);
expectedMetric.setProgress(expectedProgress);
checkSuccess(actualMetrics.get(0), expectedMetric, "dump", Arrays.asList(ReplUtils.MetricName.EVENTS.name()));
}
use of org.apache.hadoop.hive.ql.parse.repl.metric.event.Stage in project hive by apache.
the class TestReplicationScenarios method testIncrementalStatisticsMetrics.
@Test
public void testIncrementalStatisticsMetrics() throws Throwable {
isMetricsEnabledForTests(true);
ReplLoadWork.setMbeansParamsForTesting(true, false);
MetricCollector collector = MetricCollector.getInstance();
String testName = "testIncrementalStatisticsMetrics";
String dbName = createDB(testName, driver);
String replDbName = dbName + "_dupe";
String nameStri = "Hadoop:" + "service=HiveServer2" + "," + "name=" + "Database-" + replDbName + " Policy-pol";
// Do a bootstrap dump & load
Tuple bootstrapDump = bootstrapLoadAndVerify(dbName, replDbName);
ReplLoadWork.setMbeansParamsForTesting(true, true);
// 10 create table
for (int i = 0; i < 10; i++) {
run("CREATE TABLE " + dbName + ".ptned" + i + "(a string) partitioned by (b int) STORED AS TEXTFILE", driver);
for (int j = 0; j < 5; j++) {
// Create 5 partitoins per table.
run("ALTER TABLE " + dbName + ".ptned" + i + " ADD PARTITION(b=" + j + ")", driver);
}
}
verifyRun("SHOW PARTITIONS " + dbName + ".ptned1", new String[] { "b=0", "b=1", "b=2", "b=3", "b=4" }, driver);
// Do an incremental load & verify the metrics.
Tuple incrementalDump = incrementalLoadAndVerify(dbName, replDbName);
String[] events = new String[] { "[[Event Name: EVENT_CREATE_TABLE; " + "Total Number: 10;", "[[Event Name: EVENT_ADD_PARTITION; Total Number: 50;" };
Iterator<ReplicationMetric> itr = collector.getMetrics().iterator();
while (itr.hasNext()) {
ReplicationMetric elem = itr.next();
assertEquals(Metadata.ReplicationType.INCREMENTAL, elem.getMetadata().getReplicationType());
List<Stage> stages = elem.getProgress().getStages();
assertTrue(stages.size() != 0);
for (Stage stage : stages) {
for (String event : events) {
assertTrue(stage.getReplStats(), stage.getReplStats().contains(event));
}
}
}
verifyMBeanStatistics(testName, replDbName, nameStri, events, incrementalDump);
// Do some drop table/drop partition & rename table operations.
for (int i = 0; i < 3; i++) {
// Drop 3 tables
run("DROP TABLE " + dbName + ".ptned" + i, driver);
}
for (int i = 3; i < 6; i++) {
// Rename 3 tables
run("ALTER TABLE " + dbName + ".ptned" + i + " RENAME TO " + dbName + ".ptned" + i + "_renamed", driver);
}
for (int i = 6; i < 10; i++) {
// Drop partitions from 4 tables
run("ALTER TABLE " + dbName + ".ptned" + i + " DROP PARTITION(b=1)", driver);
}
for (int i = 10; i < 12; i++) {
// Create 2 tables
run("CREATE TABLE " + dbName + ".ptned" + i + "(a string) partitioned by (b int) STORED AS TEXTFILE", driver);
}
incrementalDump = incrementalLoadAndVerify(dbName, replDbName);
events = new String[] { "[[Event Name: EVENT_CREATE_TABLE; " + "Total Number: 2;", "[[Event Name: EVENT_DROP_TABLE; " + "Total Number: 3;", "[[Event Name: EVENT_RENAME_TABLE; " + "Total Number: 3;", "[[Event Name: EVENT_DROP_PARTITION; Total Number: 4;" };
itr = collector.getMetrics().iterator();
while (itr.hasNext()) {
ReplicationMetric elem = itr.next();
assertEquals(Metadata.ReplicationType.INCREMENTAL, elem.getMetadata().getReplicationType());
List<Stage> stages = elem.getProgress().getStages();
assertTrue(stages.size() != 0);
for (Stage stage : stages) {
for (String event : events) {
assertTrue(stage.getReplStats(), stage.getReplStats().contains(event));
}
}
}
verifyMBeanStatistics(testName, replDbName, nameStri, events, incrementalDump);
// Clean up the test setup.
ReplLoadWork.setMbeansParamsForTesting(false, false);
MBeans.unregister(ObjectName.getInstance(nameStri));
}
Aggregations