Search in sources :

Example 31 with JobContext

use of org.apache.hadoop.mapreduce.JobContext in project hbase by apache.

the class TestRoundRobinTableInputFormat method testConfigureUnconfigure.

/**
 * Test that configure/unconfigure set and properly undo the HBASE_REGIONSIZECALCULATOR_ENABLE
 * configuration.
 */
@Test
public void testConfigureUnconfigure() {
    Configuration configuration = HBaseConfiguration.create();
    RoundRobinTableInputFormat rrtif = new RoundRobinTableInputFormat();
    rrtif.setConf(configuration);
    JobContext jobContext = Mockito.mock(JobContext.class);
    Mockito.when(jobContext.getConfiguration()).thenReturn(configuration);
    // Assert when done, HBASE_REGIONSIZECALCULATOR_ENABLE is still unset.
    configuration.unset(RoundRobinTableInputFormat.HBASE_REGIONSIZECALCULATOR_ENABLE);
    rrtif.configure();
    rrtif.unconfigure();
    String value = configuration.get(RoundRobinTableInputFormat.HBASE_REGIONSIZECALCULATOR_ENABLE);
    assertNull(value);
    // Assert HBASE_REGIONSIZECALCULATOR_ENABLE is still false when done.
    checkRetainsBooleanValue(jobContext, rrtif, false);
    // Assert HBASE_REGIONSIZECALCULATOR_ENABLE is still true when done.
    checkRetainsBooleanValue(jobContext, rrtif, true);
}
Also used : HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Configuration(org.apache.hadoop.conf.Configuration) JobContext(org.apache.hadoop.mapreduce.JobContext) Test(org.junit.Test)

Example 32 with JobContext

use of org.apache.hadoop.mapreduce.JobContext in project beam by apache.

the class HadoopFormatIOReadTest method testComputeSplitsIfGetSplitsReturnsNullValue.

/**
 * This test validates behavior of {@link
 * HadoopInputFormatBoundedSource#computeSplitsIfNecessary() computeSplits()} when Hadoop
 * InputFormat's {@link InputFormat#getSplits(JobContext)} getSplits(JobContext)} returns NULL
 * value.
 */
@Test
public void testComputeSplitsIfGetSplitsReturnsNullValue() throws Exception {
    InputFormat<Text, Employee> mockInputFormat = Mockito.mock(EmployeeInputFormat.class);
    SerializableSplit mockInputSplit = Mockito.mock(SerializableSplit.class);
    Mockito.when(mockInputFormat.getSplits(Mockito.any(JobContext.class))).thenReturn(null);
    HadoopInputFormatBoundedSource<Text, Employee> hifSource = new HadoopInputFormatBoundedSource<>(serConf, WritableCoder.of(Text.class), AvroCoder.of(Employee.class), // No key translation required.
    null, // No value translation required.
    null, mockInputSplit, false, false);
    thrown.expect(IOException.class);
    thrown.expectMessage("Error in computing splits, getSplits() returns null.");
    hifSource.setInputFormatObj(mockInputFormat);
    hifSource.computeSplitsIfNecessary();
}
Also used : HadoopInputFormatBoundedSource(org.apache.beam.sdk.io.hadoop.format.HadoopFormatIO.HadoopInputFormatBoundedSource) SerializableSplit(org.apache.beam.sdk.io.hadoop.format.HadoopFormatIO.SerializableSplit) Text(org.apache.hadoop.io.Text) JobContext(org.apache.hadoop.mapreduce.JobContext) Test(org.junit.Test)

Example 33 with JobContext

use of org.apache.hadoop.mapreduce.JobContext in project beam by apache.

the class HadoopFormatIOReadTest method testComputeSplitsIfGetSplitsReturnsListHavingNullValues.

/**
 * This test validates behavior of {@link
 * HadoopInputFormatBoundedSource#computeSplitsIfNecessary() computeSplits()} if Hadoop
 * InputFormat's {@link InputFormat#getSplits(JobContext)} getSplits(JobContext)} returns
 * InputSplit list having some null values.
 */
@Test
public void testComputeSplitsIfGetSplitsReturnsListHavingNullValues() throws Exception {
    // InputSplit list having null value.
    InputSplit mockInputSplit = Mockito.mock(InputSplit.class, Mockito.withSettings().extraInterfaces(Writable.class));
    List<InputSplit> inputSplitList = new ArrayList<>();
    inputSplitList.add(mockInputSplit);
    inputSplitList.add(null);
    InputFormat<Text, Employee> mockInputFormat = Mockito.mock(EmployeeInputFormat.class);
    Mockito.when(mockInputFormat.getSplits(Mockito.any(JobContext.class))).thenReturn(inputSplitList);
    HadoopInputFormatBoundedSource<Text, Employee> hifSource = new HadoopInputFormatBoundedSource<>(serConf, WritableCoder.of(Text.class), AvroCoder.of(Employee.class), // No key translation required.
    null, // No value translation required.
    null, new SerializableSplit(), false, false);
    thrown.expect(IOException.class);
    thrown.expectMessage("Error in computing splits, split is null in InputSplits list populated " + "by getSplits() : ");
    hifSource.setInputFormatObj(mockInputFormat);
    hifSource.computeSplitsIfNecessary();
}
Also used : ArrayList(java.util.ArrayList) HadoopInputFormatBoundedSource(org.apache.beam.sdk.io.hadoop.format.HadoopFormatIO.HadoopInputFormatBoundedSource) Writable(org.apache.hadoop.io.Writable) LongWritable(org.apache.hadoop.io.LongWritable) SerializableSplit(org.apache.beam.sdk.io.hadoop.format.HadoopFormatIO.SerializableSplit) Text(org.apache.hadoop.io.Text) JobContext(org.apache.hadoop.mapreduce.JobContext) NewObjectsEmployeeInputSplit(org.apache.beam.sdk.io.hadoop.format.EmployeeInputFormat.NewObjectsEmployeeInputSplit) InputSplit(org.apache.hadoop.mapreduce.InputSplit) Test(org.junit.Test)

Example 34 with JobContext

use of org.apache.hadoop.mapreduce.JobContext in project beam by apache.

the class HadoopFormatIOReadTest method testComputeSplitsIfGetSplitsReturnsEmptyList.

/**
 * This test validates behavior of {@link
 * HadoopInputFormatBoundedSource#computeSplitsIfNecessary() computeSplits()} when Hadoop
 * InputFormat's {@link InputFormat#getSplits(JobContext)} returns empty list.
 */
@Test
public void testComputeSplitsIfGetSplitsReturnsEmptyList() throws Exception {
    InputFormat<?, ?> mockInputFormat = Mockito.mock(EmployeeInputFormat.class);
    SerializableSplit mockInputSplit = Mockito.mock(SerializableSplit.class);
    Mockito.when(mockInputFormat.getSplits(Mockito.any(JobContext.class))).thenReturn(new ArrayList<>());
    HadoopInputFormatBoundedSource<Text, Employee> hifSource = new HadoopInputFormatBoundedSource<>(serConf, WritableCoder.of(Text.class), AvroCoder.of(Employee.class), // No key translation required.
    null, // No value translation required.
    null, mockInputSplit, false, false);
    thrown.expect(IOException.class);
    thrown.expectMessage("Error in computing splits, getSplits() returns a empty list");
    hifSource.setInputFormatObj(mockInputFormat);
    hifSource.computeSplitsIfNecessary();
}
Also used : HadoopInputFormatBoundedSource(org.apache.beam.sdk.io.hadoop.format.HadoopFormatIO.HadoopInputFormatBoundedSource) SerializableSplit(org.apache.beam.sdk.io.hadoop.format.HadoopFormatIO.SerializableSplit) Text(org.apache.hadoop.io.Text) JobContext(org.apache.hadoop.mapreduce.JobContext) Test(org.junit.Test)

Example 35 with JobContext

use of org.apache.hadoop.mapreduce.JobContext in project druid by druid-io.

the class DatasourceInputFormatTest method testGetSplitsUsingDefaultSupplier.

@Test
public void testGetSplitsUsingDefaultSupplier() throws Exception {
    // Use the builtin supplier, reading from the local filesystem, rather than testFormatter.
    final File tmpFile = temporaryFolder.newFile("something:with:colons");
    Files.write("dummy", tmpFile, StandardCharsets.UTF_8);
    final ImmutableList<WindowedDataSegment> mySegments = ImmutableList.of(WindowedDataSegment.of(new DataSegment("test1", Intervals.of("2000/3000"), "ver", ImmutableMap.of("type", "local", "path", tmpFile.getPath()), ImmutableList.of("host"), ImmutableList.of("visited_sum", "unique_hosts"), NoneShardSpec.instance(), 9, 2)));
    final JobConf myConfig = populateConfiguration(new JobConf(), mySegments, 0L);
    final JobContext myContext = EasyMock.createMock(JobContext.class);
    EasyMock.expect(myContext.getConfiguration()).andReturn(myConfig);
    EasyMock.replay(myContext);
    final List<InputSplit> splits = new DatasourceInputFormat().getSplits(myContext);
    Assert.assertEquals(1, splits.size());
    final DatasourceInputSplit theSplit = (DatasourceInputSplit) Iterables.getOnlyElement(splits);
    Assert.assertEquals(mySegments.get(0).getSegment().getSize(), theSplit.getLength());
    Assert.assertEquals(mySegments, theSplit.getSegments());
    Assert.assertArrayEquals(new String[] { "localhost" }, theSplit.getLocations());
}
Also used : JobContext(org.apache.hadoop.mapreduce.JobContext) File(java.io.File) DataSegment(org.apache.druid.timeline.DataSegment) JobConf(org.apache.hadoop.mapred.JobConf) InputSplit(org.apache.hadoop.mapreduce.InputSplit) Test(org.junit.Test)

Aggregations

JobContext (org.apache.hadoop.mapreduce.JobContext)85 Configuration (org.apache.hadoop.conf.Configuration)41 Job (org.apache.hadoop.mapreduce.Job)35 TaskAttemptContext (org.apache.hadoop.mapreduce.TaskAttemptContext)34 Test (org.junit.Test)31 JobContextImpl (org.apache.hadoop.mapreduce.task.JobContextImpl)29 InputSplit (org.apache.hadoop.mapreduce.InputSplit)28 TaskAttemptContextImpl (org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl)25 Path (org.apache.hadoop.fs.Path)24 IOException (java.io.IOException)22 File (java.io.File)19 TaskAttemptID (org.apache.hadoop.mapreduce.TaskAttemptID)16 ArrayList (java.util.ArrayList)13 RecordWriter (org.apache.hadoop.mapreduce.RecordWriter)11 JobConf (org.apache.hadoop.mapred.JobConf)10 OutputCommitter (org.apache.hadoop.mapreduce.OutputCommitter)10 LongWritable (org.apache.hadoop.io.LongWritable)9 MapFile (org.apache.hadoop.io.MapFile)9 JobID (org.apache.hadoop.mapreduce.JobID)7 FileSystem (org.apache.hadoop.fs.FileSystem)6