use of org.apache.hadoop.mapreduce.InputSplit in project hadoop by apache.
the class TestSplitters method testBigDecimalSplitter.
@Test(timeout = 2000)
public void testBigDecimalSplitter() throws Exception {
BigDecimalSplitter splitter = new BigDecimalSplitter();
ResultSet result = mock(ResultSet.class);
List<InputSplit> splits = splitter.split(configuration, result, "column");
assertSplits(new String[] { ".*column IS NULL" }, splits);
when(result.getString(1)).thenReturn("result1");
when(result.getString(2)).thenReturn("result2");
when(result.getBigDecimal(1)).thenReturn(new BigDecimal(10));
when(result.getBigDecimal(2)).thenReturn(new BigDecimal(12));
splits = splitter.split(configuration, result, "column1");
assertSplits(new String[] { "column1 >= 10 column1 < 11", "column1 >= 11 column1 <= 12" }, splits);
}
use of org.apache.hadoop.mapreduce.InputSplit in project hadoop by apache.
the class TestSplitters method testTextSplitter.
@Test(timeout = 2000)
public void testTextSplitter() throws Exception {
TextSplitter splitter = new TextSplitter();
ResultSet result = mock(ResultSet.class);
List<InputSplit> splits = splitter.split(configuration, result, "column");
assertSplits(new String[] { "column IS NULL column IS NULL" }, splits);
when(result.getString(1)).thenReturn("result1");
when(result.getString(2)).thenReturn("result2");
splits = splitter.split(configuration, result, "column1");
assertSplits(new String[] { "column1 >= 'result1' column1 < 'result1.'", "column1 >= 'result1' column1 <= 'result2'" }, splits);
}
use of org.apache.hadoop.mapreduce.InputSplit in project hadoop by apache.
the class TestSplitters method testIntegerSplitter.
@Test(timeout = 2000)
public void testIntegerSplitter() throws Exception {
IntegerSplitter splitter = new IntegerSplitter();
ResultSet result = mock(ResultSet.class);
List<InputSplit> splits = splitter.split(configuration, result, "column");
assertSplits(new String[] { ".*column IS NULL" }, splits);
when(result.getString(1)).thenReturn("result1");
when(result.getString(2)).thenReturn("result2");
when(result.getLong(1)).thenReturn(8L);
when(result.getLong(2)).thenReturn(19L);
splits = splitter.split(configuration, result, "column1");
assertSplits(new String[] { "column1 >= 8 column1 < 13", "column1 >= 13 column1 < 18", "column1 >= 18 column1 <= 19" }, splits);
}
use of org.apache.hadoop.mapreduce.InputSplit in project hadoop by apache.
the class TestSplitters method testFloatSplitter.
@Test(timeout = 2000)
public void testFloatSplitter() throws Exception {
FloatSplitter splitter = new FloatSplitter();
ResultSet results = mock(ResultSet.class);
List<InputSplit> splits = splitter.split(configuration, results, "column");
assertSplits(new String[] { ".*column IS NULL" }, splits);
when(results.getString(1)).thenReturn("result1");
when(results.getString(2)).thenReturn("result2");
when(results.getDouble(1)).thenReturn(5.0);
when(results.getDouble(2)).thenReturn(7.0);
splits = splitter.split(configuration, results, "column1");
assertSplits(new String[] { "column1 >= 5.0 column1 < 6.0", "column1 >= 6.0 column1 <= 7.0" }, splits);
}
use of org.apache.hadoop.mapreduce.InputSplit in project hadoop by apache.
the class TestFileInputFormat method testNumInputFilesWithoutRecursively.
@Test
public void testNumInputFilesWithoutRecursively() throws Exception {
Configuration conf = getConfiguration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
Job job = Job.getInstance(conf);
FileInputFormat<?, ?> fileInputFormat = new TextInputFormat();
List<InputSplit> splits = fileInputFormat.getSplits(job);
Assert.assertEquals("Input splits are not correct", 2, splits.size());
verifySplits(Lists.newArrayList("test:/a1/a2", "test:/a1/file1"), splits);
}
Aggregations