use of org.apache.flink.api.java.hadoop.mapreduce.wrapper.HadoopInputSplit in project flink by apache.
the class HCatInputFormatBase method createInputSplits.
@Override
public HadoopInputSplit[] createInputSplits(int minNumSplits) throws IOException {
configuration.setInt("mapreduce.input.fileinputformat.split.minsize", minNumSplits);
JobContext jobContext = new JobContextImpl(configuration, new JobID());
List<InputSplit> splits;
try {
splits = this.hCatInputFormat.getSplits(jobContext);
} catch (InterruptedException e) {
throw new IOException("Could not get Splits.", e);
}
HadoopInputSplit[] hadoopInputSplits = new HadoopInputSplit[splits.size()];
for (int i = 0; i < hadoopInputSplits.length; i++) {
hadoopInputSplits[i] = new HadoopInputSplit(i, splits.get(i), jobContext);
}
return hadoopInputSplits;
}
use of org.apache.flink.api.java.hadoop.mapreduce.wrapper.HadoopInputSplit in project flink by apache.
the class HadoopInputFormatBase method createInputSplits.
@Override
public HadoopInputSplit[] createInputSplits(int minNumSplits) throws IOException {
configuration.setInt("mapreduce.input.fileinputformat.split.minsize", minNumSplits);
JobContext jobContext = new JobContextImpl(configuration, new JobID());
jobContext.getCredentials().addAll(this.credentials);
Credentials currentUserCreds = getCredentialsFromUGI(UserGroupInformation.getCurrentUser());
if (currentUserCreds != null) {
jobContext.getCredentials().addAll(currentUserCreds);
}
List<org.apache.hadoop.mapreduce.InputSplit> splits;
try {
splits = this.mapreduceInputFormat.getSplits(jobContext);
} catch (InterruptedException e) {
throw new IOException("Could not get Splits.", e);
}
HadoopInputSplit[] hadoopInputSplits = new HadoopInputSplit[splits.size()];
for (int i = 0; i < hadoopInputSplits.length; i++) {
hadoopInputSplits[i] = new HadoopInputSplit(i, splits.get(i), jobContext);
}
return hadoopInputSplits;
}
use of org.apache.flink.api.java.hadoop.mapreduce.wrapper.HadoopInputSplit in project flink by apache.
the class HadoopInputFormatTest method testOpen.
@Test
public void testOpen() throws Exception {
DummyInputFormat inputFormat = mock(DummyInputFormat.class);
when(inputFormat.createRecordReader(nullable(InputSplit.class), any(TaskAttemptContext.class))).thenReturn(new DummyRecordReader());
HadoopInputSplit inputSplit = mock(HadoopInputSplit.class);
HadoopInputFormat<String, Long> hadoopInputFormat = setupHadoopInputFormat(inputFormat, Job.getInstance(), null);
hadoopInputFormat.open(inputSplit);
verify(inputFormat, times(1)).createRecordReader(nullable(InputSplit.class), any(TaskAttemptContext.class));
assertThat(hadoopInputFormat.fetched, is(false));
}
Aggregations