Search in sources :

Example 81 with Counters

use of org.apache.hadoop.mapreduce.Counters in project java-bigtable-hbase by googleapis.

the class TestSnapshotMapper method testRowExceedingMaxCells.

@Test
public void testRowExceedingMaxCells() throws IOException {
    int cellCount = SnapshotMapper.MAX_CELLS + 100;
    byte[] row = Bytes.toBytes("row");
    ImmutableBytesWritable key = new ImmutableBytesWritable(row);
    long ts = 123L;
    List<Cell> cellList = new ArrayList<>();
    for (int i = 0; i < cellCount; i++) {
        KeyValue kv = new KeyValue(row, Bytes.toBytes("f"), Bytes.toBytes("q"), ts, Bytes.toBytes("v"));
        cellList.add(kv);
    }
    Result res = Result.create(cellList);
    Pair<ImmutableBytesWritable, Result> input = new Pair<>(key, res);
    mapDriver.addInput(input);
    List<Pair<ImmutableBytesWritable, Put>> resultList = mapDriver.run();
    Assert.assertEquals(2, resultList.size());
    int cellResCount = 0;
    for (Pair<ImmutableBytesWritable, Put> r : resultList) {
        CellScanner s = r.getSecond().cellScanner();
        while (s.advance()) {
            cellResCount++;
        }
    }
    Assert.assertEquals(cellCount, cellResCount);
    // verify counters
    Counters counters = mapDriver.getCounters();
    long numRows = counters.findCounter(ScanCounter.NUM_ROWS).getValue();
    long numCells = counters.findCounter(ScanCounter.NUM_CELLS).getValue();
    Assert.assertEquals(1, numRows);
    Assert.assertEquals(cellCount, numCells);
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) KeyValue(org.apache.hadoop.hbase.KeyValue) ArrayList(java.util.ArrayList) CellScanner(org.apache.hadoop.hbase.CellScanner) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) Counters(org.apache.hadoop.mapreduce.Counters) Cell(org.apache.hadoop.hbase.Cell) Pair(org.apache.hadoop.mrunit.types.Pair) Test(org.junit.Test)

Example 82 with Counters

use of org.apache.hadoop.mapreduce.Counters in project java-bigtable-hbase by googleapis.

the class TestSnapshotMapper method testSnapshotMapper.

@Test
public void testSnapshotMapper() throws IOException {
    int rowCount = 20;
    int cellCount = 10;
    byte[] row = Bytes.toBytes("row");
    byte[] columnFamily = Bytes.toBytes("f");
    byte[] qualifier = Bytes.toBytes("q");
    byte[] value = Bytes.toBytes("v");
    long ts = 123L;
    ImmutableBytesWritable key = new ImmutableBytesWritable(row);
    for (int h = 0; h < rowCount; h++) {
        List<Cell> cellList = new ArrayList<>();
        for (int i = 0; i < cellCount; i++) {
            KeyValue kv = new KeyValue(row, columnFamily, qualifier, ts, value);
            cellList.add(kv);
        }
        Result res = Result.create(cellList);
        mapDriver.addInput(new Pair<>(key, res));
    }
    List<Pair<ImmutableBytesWritable, Put>> resultList = mapDriver.run();
    Assert.assertEquals(rowCount, resultList.size());
    int cellResCount = 0;
    for (Pair<ImmutableBytesWritable, Put> r : resultList) {
        CellScanner s = r.getSecond().cellScanner();
        while (s.advance()) {
            Cell resultCell = s.current();
            Assert.assertTrue(CellUtil.matchingRow(resultCell, row));
            Assert.assertTrue(CellUtil.matchingFamily(resultCell, columnFamily));
            Assert.assertTrue(CellUtil.matchingQualifier(resultCell, qualifier));
            Assert.assertTrue(CellUtil.matchingValue(resultCell, value));
            Assert.assertEquals(ts, resultCell.getTimestamp());
            cellResCount++;
        }
    }
    Assert.assertEquals((rowCount * cellCount), cellResCount);
    // verify counters
    Counters counters = mapDriver.getCounters();
    long numRows = counters.findCounter(ScanCounter.NUM_ROWS).getValue();
    long numCells = counters.findCounter(ScanCounter.NUM_CELLS).getValue();
    Assert.assertEquals(rowCount, numRows);
    Assert.assertEquals((rowCount * cellCount), numCells);
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) KeyValue(org.apache.hadoop.hbase.KeyValue) ArrayList(java.util.ArrayList) CellScanner(org.apache.hadoop.hbase.CellScanner) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) Counters(org.apache.hadoop.mapreduce.Counters) Cell(org.apache.hadoop.hbase.Cell) Pair(org.apache.hadoop.mrunit.types.Pair) Test(org.junit.Test)

Example 83 with Counters

use of org.apache.hadoop.mapreduce.Counters in project hadoop by apache.

the class TestTaskImpl method testCountersWithSpeculation.

@Test
public void testCountersWithSpeculation() {
    mockTask = new MockTaskImpl(jobId, partition, dispatcher.getEventHandler(), remoteJobConfFile, conf, taskAttemptListener, jobToken, credentials, clock, startCount, metrics, appContext, TaskType.MAP) {

        @Override
        protected int getMaxAttempts() {
            return 1;
        }
    };
    TaskId taskId = getNewTaskID();
    scheduleTaskAttempt(taskId);
    launchTaskAttempt(getLastAttempt().getAttemptId());
    updateLastAttemptState(TaskAttemptState.RUNNING);
    MockTaskAttemptImpl baseAttempt = getLastAttempt();
    // add a speculative attempt
    mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(), TaskEventType.T_ADD_SPEC_ATTEMPT));
    launchTaskAttempt(getLastAttempt().getAttemptId());
    updateLastAttemptState(TaskAttemptState.RUNNING);
    MockTaskAttemptImpl specAttempt = getLastAttempt();
    assertEquals(2, taskAttempts.size());
    Counters specAttemptCounters = new Counters();
    Counter cpuCounter = specAttemptCounters.findCounter(TaskCounter.CPU_MILLISECONDS);
    cpuCounter.setValue(1000);
    specAttempt.setCounters(specAttemptCounters);
    // have the spec attempt succeed but second attempt at 1.0 progress as well
    commitTaskAttempt(specAttempt.getAttemptId());
    specAttempt.setProgress(1.0f);
    specAttempt.setState(TaskAttemptState.SUCCEEDED);
    mockTask.handle(new TaskTAttemptEvent(specAttempt.getAttemptId(), TaskEventType.T_ATTEMPT_SUCCEEDED));
    assertEquals(TaskState.SUCCEEDED, mockTask.getState());
    baseAttempt.setProgress(1.0f);
    Counters taskCounters = mockTask.getCounters();
    assertEquals("wrong counters for task", specAttemptCounters, taskCounters);
}
Also used : TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Counter(org.apache.hadoop.mapreduce.Counter) TaskCounter(org.apache.hadoop.mapreduce.TaskCounter) TaskTAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent) Counters(org.apache.hadoop.mapreduce.Counters) Test(org.junit.Test)

Example 84 with Counters

use of org.apache.hadoop.mapreduce.Counters in project hadoop by apache.

the class TaskAttemptImpl method updateProgressSplits.

private void updateProgressSplits() {
    double newProgress = reportedStatus.progress;
    newProgress = Math.max(Math.min(newProgress, 1.0D), 0.0D);
    Counters counters = reportedStatus.counters;
    if (counters == null)
        return;
    WrappedProgressSplitsBlock splitsBlock = getProgressSplitBlock();
    if (splitsBlock != null) {
        long now = clock.getTime();
        // TODO Ensure not 0
        long start = getLaunchTime();
        if (start != 0 && now - start <= Integer.MAX_VALUE) {
            splitsBlock.getProgressWallclockTime().extend(newProgress, (int) (now - start));
        }
        Counter cpuCounter = counters.findCounter(TaskCounter.CPU_MILLISECONDS);
        if (cpuCounter != null && cpuCounter.getValue() <= Integer.MAX_VALUE) {
            splitsBlock.getProgressCPUTime().extend(newProgress, // long to int? TODO: FIX. Same below
            (int) cpuCounter.getValue());
        }
        Counter virtualBytes = counters.findCounter(TaskCounter.VIRTUAL_MEMORY_BYTES);
        if (virtualBytes != null) {
            splitsBlock.getProgressVirtualMemoryKbytes().extend(newProgress, (int) (virtualBytes.getValue() / (MEMORY_SPLITS_RESOLUTION)));
        }
        Counter physicalBytes = counters.findCounter(TaskCounter.PHYSICAL_MEMORY_BYTES);
        if (physicalBytes != null) {
            splitsBlock.getProgressPhysicalMemoryKbytes().extend(newProgress, (int) (physicalBytes.getValue() / (MEMORY_SPLITS_RESOLUTION)));
        }
    }
}
Also used : JobCounter(org.apache.hadoop.mapreduce.JobCounter) Counter(org.apache.hadoop.mapreduce.Counter) TaskCounter(org.apache.hadoop.mapreduce.TaskCounter) Counters(org.apache.hadoop.mapreduce.Counters) WrappedProgressSplitsBlock(org.apache.hadoop.mapred.WrappedProgressSplitsBlock)

Example 85 with Counters

use of org.apache.hadoop.mapreduce.Counters in project hadoop by apache.

the class TaskAttemptImpl method initTaskAttemptStatus.

private void initTaskAttemptStatus(TaskAttemptStatus result) {
    result.progress = 0.0f;
    result.phase = Phase.STARTING;
    result.stateString = "NEW";
    result.taskState = TaskAttemptState.NEW;
    Counters counters = EMPTY_COUNTERS;
    result.counters = counters;
}
Also used : Counters(org.apache.hadoop.mapreduce.Counters)

Aggregations

Counters (org.apache.hadoop.mapreduce.Counters)111 Job (org.apache.hadoop.mapreduce.Job)37 Test (org.junit.Test)37 Configuration (org.apache.hadoop.conf.Configuration)23 IOException (java.io.IOException)21 Path (org.apache.hadoop.fs.Path)21 Counter (org.apache.hadoop.mapreduce.Counter)18 IndexScrutinyMapperForTest (org.apache.phoenix.mapreduce.index.IndexScrutinyMapperForTest)14 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)9 Connection (java.sql.Connection)8 CounterGroup (org.apache.hadoop.mapreduce.CounterGroup)8 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)7 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)7 PreparedStatement (java.sql.PreparedStatement)6 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)6 ArrayList (java.util.ArrayList)5 Value (org.apache.accumulo.core.data.Value)5 Text (org.apache.hadoop.io.Text)5 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)5 FileNotFoundException (java.io.FileNotFoundException)4