use of org.apache.hadoop.mapred.SplitLocationInfo in project hadoop by apache.
the class TestFileInputFormat method testSplitLocationInfo.
@Test
public void testSplitLocationInfo() throws Exception {
Configuration conf = getConfiguration();
conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR, "test:///a1/a2");
Job job = Job.getInstance(conf);
TextInputFormat fileInputFormat = new TextInputFormat();
List<InputSplit> splits = fileInputFormat.getSplits(job);
String[] locations = splits.get(0).getLocations();
Assert.assertEquals(2, locations.length);
SplitLocationInfo[] locationInfo = splits.get(0).getLocationInfo();
Assert.assertEquals(2, locationInfo.length);
SplitLocationInfo localhostInfo = locations[0].equals("localhost") ? locationInfo[0] : locationInfo[1];
SplitLocationInfo otherhostInfo = locations[0].equals("otherhost") ? locationInfo[0] : locationInfo[1];
Assert.assertTrue(localhostInfo.isOnDisk());
Assert.assertTrue(localhostInfo.isInMemory());
Assert.assertTrue(otherhostInfo.isOnDisk());
Assert.assertFalse(otherhostInfo.isInMemory());
}
use of org.apache.hadoop.mapred.SplitLocationInfo in project hive by apache.
the class TestLlapInputSplit method testWritable.
@Test
public void testWritable() throws Exception {
int splitNum = 88;
byte[] planBytes = "0123456789987654321".getBytes();
byte[] fragmentBytes = "abcdefghijklmnopqrstuvwxyz".getBytes();
SplitLocationInfo[] locations = { new SplitLocationInfo("location1", false), new SplitLocationInfo("location2", false) };
LlapDaemonInfo daemonInfo1 = new LlapDaemonInfo("host1", 30004, 15003);
LlapDaemonInfo daemonInfo2 = new LlapDaemonInfo("host2", 30004, 15003);
LlapDaemonInfo[] llapDaemonInfos = { daemonInfo1, daemonInfo2 };
ArrayList<FieldDesc> colDescs = new ArrayList<FieldDesc>();
colDescs.add(new FieldDesc("col1", TypeInfoFactory.stringTypeInfo));
colDescs.add(new FieldDesc("col2", TypeInfoFactory.intTypeInfo));
Schema schema = new Schema(colDescs);
byte[] tokenBytes = new byte[] { 1 };
LlapInputSplit split1 = new LlapInputSplit(splitNum, planBytes, fragmentBytes, null, locations, llapDaemonInfos, schema, "hive", tokenBytes, "some-dummy-jwt");
ByteArrayOutputStream byteOutStream = new ByteArrayOutputStream();
DataOutputStream dataOut = new DataOutputStream(byteOutStream);
split1.write(dataOut);
ByteArrayInputStream byteInStream = new ByteArrayInputStream(byteOutStream.toByteArray());
DataInputStream dataIn = new DataInputStream(byteInStream);
LlapInputSplit split2 = new LlapInputSplit();
split2.readFields(dataIn);
// Did we read all the data?
assertEquals(0, byteInStream.available());
checkLlapSplits(split1, split2);
}
Aggregations