Search in sources :

Example 6 with ReplStatsTracker

use of org.apache.hadoop.hive.ql.exec.repl.ReplStatsTracker in project hive by apache.

the class TestReplicationMetricCollector method testFailoverReadyDumpMetrics.

@Test
public void testFailoverReadyDumpMetrics() throws Exception {
    ReplicationMetricCollector incrDumpMetricCollector = new IncrementalDumpMetricCollector("db", "dummyDir", conf);
    Map<String, Long> metricMap = new HashMap<>();
    metricMap.put(ReplUtils.MetricName.EVENTS.name(), (long) 10);
    incrDumpMetricCollector.reportFailoverStart("dump", metricMap, fmd);
    incrDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.EVENTS.name(), 2);
    List<ReplicationMetric> actualMetrics = MetricCollector.getInstance().getMetrics();
    Assert.assertEquals(1, actualMetrics.size());
    incrDumpMetricCollector.reportStageEnd("dump", Status.SUCCESS, 10, new SnapshotUtils.ReplSnapshotCount(), new ReplStatsTracker(0));
    incrDumpMetricCollector.reportEnd(Status.FAILOVER_READY);
    actualMetrics = MetricCollector.getInstance().getMetrics();
    Assert.assertEquals(1, actualMetrics.size());
    Metadata expectedMetadata = new Metadata("db", Metadata.ReplicationType.INCREMENTAL, "dummyDir");
    expectedMetadata.setLastReplId(10);
    expectedMetadata.setFailoverEventId(10);
    expectedMetadata.setFailoverMetadataLoc("dummyDir");
    Progress expectedProgress = new Progress();
    expectedProgress.setStatus(Status.FAILOVER_READY);
    Stage dumpStage = new Stage("dump", Status.SUCCESS, 0);
    dumpStage.setEndTime(0);
    Metric expectedEventMetric = new Metric(ReplUtils.MetricName.EVENTS.name(), 10);
    expectedEventMetric.setCurrentCount(2);
    dumpStage.addMetric(expectedEventMetric);
    expectedProgress.addStage(dumpStage);
    ReplicationMetric expectedMetric = new ReplicationMetric(1, "repl", 0, expectedMetadata);
    expectedMetric.setProgress(expectedProgress);
    checkSuccess(actualMetrics.get(0), expectedMetric, "dump", Arrays.asList(ReplUtils.MetricName.EVENTS.name()));
}
Also used : ReplStatsTracker(org.apache.hadoop.hive.ql.exec.repl.ReplStatsTracker) Progress(org.apache.hadoop.hive.ql.parse.repl.metric.event.Progress) HashMap(java.util.HashMap) IncrementalDumpMetricCollector(org.apache.hadoop.hive.ql.parse.repl.dump.metric.IncrementalDumpMetricCollector) Metadata(org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata) ReplicationMetric(org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric) SnapshotUtils(org.apache.hadoop.hive.ql.exec.repl.util.SnapshotUtils) Stage(org.apache.hadoop.hive.ql.parse.repl.metric.event.Stage) Metric(org.apache.hadoop.hive.ql.parse.repl.metric.event.Metric) ReplicationMetric(org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric) Test(org.junit.Test)

Example 7 with ReplStatsTracker

use of org.apache.hadoop.hive.ql.exec.repl.ReplStatsTracker in project hive by apache.

the class TestReplicationMetricCollector method testReplStatsTracker.

@Test
public void testReplStatsTracker() throws Exception {
    ReplStatsTracker repl = new ReplStatsTracker(5);
    repl.addEntry("EVENT_ADD_PARTITION", "1", 2345);
    repl.addEntry("EVENT_ADD_PARTITION", "2", 23451);
    repl.addEntry("EVENT_ADD_PARTITION", "3", 23451);
    repl.addEntry("EVENT_ADD_DATABASE", "4", 234544);
    repl.addEntry("EVENT_ALTER_PARTITION", "5", 2145);
    repl.addEntry("EVENT_CREATE_TABLE", "6", 2245);
    repl.addEntry("EVENT_ADD_PARTITION", "7", 1245);
    repl.addEntry("EVENT_ADD_PARTITION", "8", 23425);
    repl.addEntry("EVENT_ALTER_PARTITION", "9", 21345);
    repl.addEntry("EVENT_CREATE_TABLE", "10", 1345);
    repl.addEntry("EVENT_ADD_DATABASE", "11", 345);
    repl.addEntry("EVENT_ADD_DATABASE", "12", 12345);
    repl.addEntry("EVENT_ADD_DATABASE", "13", 3345);
    repl.addEntry("EVENT_ALTER_PARTITION", "14", 2645);
    repl.addEntry("EVENT_ALTER_PARTITION", "15", 2555);
    repl.addEntry("EVENT_CREATE_TABLE", "16", 23765);
    repl.addEntry("EVENT_ADD_PARTITION", "17", 23435);
    repl.addEntry("EVENT_DROP_PARTITION", "18", 2205);
    repl.addEntry("EVENT_CREATE_TABLE", "19", 2195);
    repl.addEntry("EVENT_DROP_PARTITION", "20", 2225);
    repl.addEntry("EVENT_DROP_PARTITION", "21", 2225);
    repl.addEntry("EVENT_DROP_PARTITION", "22", 23485);
    repl.addEntry("EVENT_CREATE_TABLE", "23", 2385);
    repl.addEntry("EVENT_DROP_PARTITION", "24", 234250);
    repl.addEntry("EVENT_DROP_PARTITION", "25", 15);
    repl.addEntry("EVENT_CREATE_TABLE", "26", 23425);
    repl.addEntry("EVENT_CREATE_TABLE", "27", 23445);
    // Check the total number of entries in the TopKEvents is equal to the number of events fed in.
    assertEquals(5, repl.getTopKEvents().size());
    // Check the timing & number of events for ADD_PARTITION
    assertArrayEquals(repl.getTopKEvents().get("EVENT_ADD_PARTITION").valueList().toString(), new Long[] { 23451L, 23451L, 23435L, 23425L, 2345L }, repl.getTopKEvents().get("EVENT_ADD_PARTITION").valueList().toArray());
    assertEquals(6, repl.getDescMap().get("EVENT_ADD_PARTITION").getN());
    // Check the timing & number of events for DROP_PARTITION
    assertArrayEquals(repl.getTopKEvents().get("EVENT_DROP_PARTITION").valueList().toString(), new Long[] { 234250L, 23485L, 2225L, 2225L, 2205L }, repl.getTopKEvents().get("EVENT_DROP_PARTITION").valueList().toArray());
    assertEquals(6, repl.getDescMap().get("EVENT_DROP_PARTITION").getN());
    // Check the timing & number of events for CREATE_TABLE
    assertArrayEquals(repl.getTopKEvents().get("EVENT_CREATE_TABLE").valueList().toString(), new Long[] { 23765L, 23445L, 23425L, 2385L, 2245L }, repl.getTopKEvents().get("EVENT_CREATE_TABLE").valueList().toArray());
    assertEquals(7, repl.getDescMap().get("EVENT_CREATE_TABLE").getN());
    // Check the timing & number of events for ALTER_PARTITION
    assertArrayEquals(repl.getTopKEvents().get("EVENT_ALTER_PARTITION").valueList().toString(), new Long[] { 21345L, 2645L, 2555L, 2145L }, repl.getTopKEvents().get("EVENT_ALTER_PARTITION").valueList().toArray());
    assertEquals(4, repl.getDescMap().get("EVENT_ALTER_PARTITION").getN());
    // Check the timing & number of events for ADD_DATABASE
    assertArrayEquals(repl.getTopKEvents().get("EVENT_ADD_DATABASE").valueList().toString(), new Long[] { 234544L, 12345L, 3345L, 345L }, repl.getTopKEvents().get("EVENT_ADD_DATABASE").valueList().toArray());
    assertEquals(4, repl.getDescMap().get("EVENT_ADD_DATABASE").getN());
}
Also used : ReplStatsTracker(org.apache.hadoop.hive.ql.exec.repl.ReplStatsTracker) Test(org.junit.Test)

Example 8 with ReplStatsTracker

use of org.apache.hadoop.hive.ql.exec.repl.ReplStatsTracker in project hive by apache.

the class TestReplicationMetricCollector method testSuccessIncrLoadMetrics.

@Test
public void testSuccessIncrLoadMetrics() throws Exception {
    ReplicationMetricCollector incrLoadMetricCollector = new IncrementalLoadMetricCollector("db", "dummyDir", 1, conf);
    Map<String, Long> metricMap = new HashMap<>();
    metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) 10);
    metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) 1);
    incrLoadMetricCollector.reportStageStart("dump", metricMap);
    incrLoadMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 1);
    List<ReplicationMetric> actualMetrics = MetricCollector.getInstance().getMetrics();
    Assert.assertEquals(1, actualMetrics.size());
    incrLoadMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 2);
    incrLoadMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.FUNCTIONS.name(), 1);
    actualMetrics = MetricCollector.getInstance().getMetrics();
    Assert.assertEquals(1, actualMetrics.size());
    incrLoadMetricCollector.reportStageEnd("dump", Status.SUCCESS, 10, new SnapshotUtils.ReplSnapshotCount(), new ReplStatsTracker(0));
    incrLoadMetricCollector.reportEnd(Status.SUCCESS);
    actualMetrics = MetricCollector.getInstance().getMetrics();
    Assert.assertEquals(1, actualMetrics.size());
    Metadata expectedMetadata = new Metadata("db", Metadata.ReplicationType.INCREMENTAL, "dummyDir");
    expectedMetadata.setLastReplId(10);
    Progress expectedProgress = new Progress();
    expectedProgress.setStatus(Status.SUCCESS);
    Stage dumpStage = new Stage("dump", Status.SUCCESS, 0);
    dumpStage.setEndTime(0);
    Metric expectedTableMetric = new Metric(ReplUtils.MetricName.TABLES.name(), 10);
    expectedTableMetric.setCurrentCount(3);
    Metric expectedFuncMetric = new Metric(ReplUtils.MetricName.FUNCTIONS.name(), 1);
    expectedFuncMetric.setCurrentCount(1);
    dumpStage.addMetric(expectedTableMetric);
    dumpStage.addMetric(expectedFuncMetric);
    expectedProgress.addStage(dumpStage);
    ReplicationMetric expectedMetric = new ReplicationMetric(1, "repl", 1, expectedMetadata);
    expectedMetric.setProgress(expectedProgress);
    checkSuccess(actualMetrics.get(0), expectedMetric, "dump", Arrays.asList(ReplUtils.MetricName.TABLES.name(), ReplUtils.MetricName.FUNCTIONS.name()));
}
Also used : ReplStatsTracker(org.apache.hadoop.hive.ql.exec.repl.ReplStatsTracker) Progress(org.apache.hadoop.hive.ql.parse.repl.metric.event.Progress) HashMap(java.util.HashMap) Metadata(org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata) ReplicationMetric(org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric) SnapshotUtils(org.apache.hadoop.hive.ql.exec.repl.util.SnapshotUtils) Stage(org.apache.hadoop.hive.ql.parse.repl.metric.event.Stage) Metric(org.apache.hadoop.hive.ql.parse.repl.metric.event.Metric) ReplicationMetric(org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric) IncrementalLoadMetricCollector(org.apache.hadoop.hive.ql.parse.repl.load.metric.IncrementalLoadMetricCollector) Test(org.junit.Test)

Example 9 with ReplStatsTracker

use of org.apache.hadoop.hive.ql.exec.repl.ReplStatsTracker in project hive by apache.

the class TestReplicationMetricCollector method testSuccessBootstrapDumpMetrics.

@Test
public void testSuccessBootstrapDumpMetrics() throws Exception {
    ReplicationMetricCollector bootstrapDumpMetricCollector = new BootstrapDumpMetricCollector("db", "dummyDir", conf);
    Map<String, Long> metricMap = new HashMap<>();
    metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) 10);
    metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) 1);
    bootstrapDumpMetricCollector.reportStageStart("dump", metricMap);
    bootstrapDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 1);
    List<ReplicationMetric> actualMetrics = MetricCollector.getInstance().getMetrics();
    Assert.assertEquals(1, actualMetrics.size());
    bootstrapDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 2);
    bootstrapDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.FUNCTIONS.name(), 1);
    actualMetrics = MetricCollector.getInstance().getMetrics();
    Assert.assertEquals(1, actualMetrics.size());
    bootstrapDumpMetricCollector.reportStageEnd("dump", Status.SUCCESS, 10, new SnapshotUtils.ReplSnapshotCount(), new ReplStatsTracker(0));
    bootstrapDumpMetricCollector.reportEnd(Status.SUCCESS);
    actualMetrics = MetricCollector.getInstance().getMetrics();
    Assert.assertEquals(1, actualMetrics.size());
    Metadata expectedMetadata = new Metadata("db", Metadata.ReplicationType.BOOTSTRAP, "dummyDir");
    expectedMetadata.setLastReplId(10);
    Progress expectedProgress = new Progress();
    expectedProgress.setStatus(Status.SUCCESS);
    Stage dumpStage = new Stage("dump", Status.SUCCESS, 0);
    dumpStage.setEndTime(0);
    Metric expectedTableMetric = new Metric(ReplUtils.MetricName.TABLES.name(), 10);
    expectedTableMetric.setCurrentCount(3);
    Metric expectedFuncMetric = new Metric(ReplUtils.MetricName.FUNCTIONS.name(), 1);
    expectedFuncMetric.setCurrentCount(1);
    dumpStage.addMetric(expectedTableMetric);
    dumpStage.addMetric(expectedFuncMetric);
    expectedProgress.addStage(dumpStage);
    ReplicationMetric expectedMetric = new ReplicationMetric(1, "repl", 0, expectedMetadata);
    expectedMetric.setProgress(expectedProgress);
    checkSuccess(actualMetrics.get(0), expectedMetric, "dump", Arrays.asList(ReplUtils.MetricName.TABLES.name(), ReplUtils.MetricName.FUNCTIONS.name()));
}
Also used : BootstrapDumpMetricCollector(org.apache.hadoop.hive.ql.parse.repl.dump.metric.BootstrapDumpMetricCollector) ReplStatsTracker(org.apache.hadoop.hive.ql.exec.repl.ReplStatsTracker) Progress(org.apache.hadoop.hive.ql.parse.repl.metric.event.Progress) HashMap(java.util.HashMap) Metadata(org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata) ReplicationMetric(org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric) SnapshotUtils(org.apache.hadoop.hive.ql.exec.repl.util.SnapshotUtils) Stage(org.apache.hadoop.hive.ql.parse.repl.metric.event.Stage) Metric(org.apache.hadoop.hive.ql.parse.repl.metric.event.Metric) ReplicationMetric(org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric) Test(org.junit.Test)

Aggregations

ReplStatsTracker (org.apache.hadoop.hive.ql.exec.repl.ReplStatsTracker)9 Test (org.junit.Test)9 HashMap (java.util.HashMap)7 SnapshotUtils (org.apache.hadoop.hive.ql.exec.repl.util.SnapshotUtils)7 Metadata (org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata)6 Metric (org.apache.hadoop.hive.ql.parse.repl.metric.event.Metric)6 Progress (org.apache.hadoop.hive.ql.parse.repl.metric.event.Progress)6 ReplicationMetric (org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric)6 Stage (org.apache.hadoop.hive.ql.parse.repl.metric.event.Stage)6 IncrementalDumpMetricCollector (org.apache.hadoop.hive.ql.parse.repl.dump.metric.IncrementalDumpMetricCollector)4 GetReplicationMetricsRequest (org.apache.hadoop.hive.metastore.api.GetReplicationMetricsRequest)2 ReplicationMetricList (org.apache.hadoop.hive.metastore.api.ReplicationMetricList)2 BootstrapDumpMetricCollector (org.apache.hadoop.hive.ql.parse.repl.dump.metric.BootstrapDumpMetricCollector)2 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)1 ReplicationMetrics (org.apache.hadoop.hive.metastore.api.ReplicationMetrics)1 MessageSerializer (org.apache.hadoop.hive.metastore.messaging.MessageSerializer)1 BootstrapLoadMetricCollector (org.apache.hadoop.hive.ql.parse.repl.load.metric.BootstrapLoadMetricCollector)1 IncrementalLoadMetricCollector (org.apache.hadoop.hive.ql.parse.repl.load.metric.IncrementalLoadMetricCollector)1 ProgressMapper (org.apache.hadoop.hive.ql.parse.repl.metric.event.ProgressMapper)1 StageMapper (org.apache.hadoop.hive.ql.parse.repl.metric.event.StageMapper)1