use of org.apache.hadoop.mapred.FileSplit in project hive by apache.
the class TestVectorizedColumnReader method testNullSplitForParquetReader.
@Test
public void testNullSplitForParquetReader() throws Exception {
Configuration conf = new Configuration();
conf.set(IOConstants.COLUMNS, "int32_field");
conf.set(IOConstants.COLUMNS_TYPES, "int");
conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false);
conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0");
conf.set(PARQUET_READ_SCHEMA, "message test { required int32 int32_field;}");
HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, true);
HiveConf.setVar(conf, HiveConf.ConfVars.PLAN, "//tmp");
Job vectorJob = new Job(conf, "read vector");
ParquetInputFormat.setInputPaths(vectorJob, file);
initialVectorizedRowBatchCtx(conf);
FileSplit fsplit = getFileSplit(vectorJob);
JobConf jobConf = new JobConf(conf);
TestVectorizedParquetRecordReader testReader = new TestVectorizedParquetRecordReader(fsplit, jobConf);
Assert.assertNull("Test should return null split from getSplit() method", testReader.getSplit(fsplit, jobConf));
}
use of org.apache.hadoop.mapred.FileSplit in project hive by apache.
the class SplitGrouper method estimateBucketSizes.
/**
* get the size estimates for each bucket in tasks. This is used to make sure
* we allocate the head room evenly
*/
private Map<Integer, Integer> estimateBucketSizes(int availableSlots, float waves, Map<Integer, Collection<InputSplit>> bucketSplitMap) {
// mapping of bucket id to size of all splits in bucket in bytes
Map<Integer, Long> bucketSizeMap = new HashMap<Integer, Long>();
// mapping of bucket id to number of required tasks to run
Map<Integer, Integer> bucketTaskMap = new HashMap<Integer, Integer>();
// TODO HIVE-12255. Make use of SplitSizeEstimator.
// The actual task computation needs to be looked at as well.
// compute the total size per bucket
long totalSize = 0;
boolean earlyExit = false;
for (int bucketId : bucketSplitMap.keySet()) {
long size = 0;
for (InputSplit s : bucketSplitMap.get(bucketId)) {
// (preset to 0.5) for SMB join.
if (!(s instanceof FileSplit)) {
bucketTaskMap.put(bucketId, (int) (availableSlots * waves));
earlyExit = true;
continue;
}
FileSplit fsplit = (FileSplit) s;
size += fsplit.getLength();
totalSize += fsplit.getLength();
}
bucketSizeMap.put(bucketId, size);
}
if (earlyExit) {
return bucketTaskMap;
}
// compute the number of tasks
for (int bucketId : bucketSizeMap.keySet()) {
int numEstimatedTasks = 0;
if (totalSize != 0) {
// availableSlots * waves => desired slots to fill
// sizePerBucket/totalSize => weight for particular bucket. weights add
// up to 1.
numEstimatedTasks = (int) (availableSlots * waves * bucketSizeMap.get(bucketId) / totalSize);
}
LOG.info("Estimated number of tasks: " + numEstimatedTasks + " for bucket " + bucketId);
if (numEstimatedTasks == 0) {
numEstimatedTasks = 1;
}
bucketTaskMap.put(bucketId, numEstimatedTasks);
}
return bucketTaskMap;
}
use of org.apache.hadoop.mapred.FileSplit in project hive by apache.
the class SplitGrouper method createTaskLocationHints.
/**
* Create task location hints from a set of input splits
* @param splits the actual splits
* @param consistentLocations whether to re-order locations for each split, if it's a file split
* @return taskLocationHints - 1 per input split specified
* @throws IOException
*/
public List<TaskLocationHint> createTaskLocationHints(InputSplit[] splits, boolean consistentLocations) throws IOException {
List<TaskLocationHint> locationHints = Lists.newArrayListWithCapacity(splits.length);
for (InputSplit split : splits) {
String rack = (split instanceof TezGroupedSplit) ? ((TezGroupedSplit) split).getRack() : null;
if (rack == null) {
String[] locations = split.getLocations();
if (locations != null && locations.length > 0) {
// Worthwhile only if more than 1 split, consistentGroupingEnabled and is a FileSplit
if (consistentLocations && locations.length > 1 && split instanceof FileSplit) {
Arrays.sort(locations);
FileSplit fileSplit = (FileSplit) split;
Path path = fileSplit.getPath();
long startLocation = fileSplit.getStart();
int hashCode = Objects.hash(path, startLocation);
int startIndex = hashCode % locations.length;
LinkedHashSet<String> locationSet = new LinkedHashSet<>(locations.length);
// Set up the locations starting from startIndex, and wrapping around the sorted array.
for (int i = 0; i < locations.length; i++) {
int index = (startIndex + i) % locations.length;
locationSet.add(locations[index]);
}
locationHints.add(TaskLocationHint.createTaskLocationHint(locationSet, null));
} else {
locationHints.add(TaskLocationHint.createTaskLocationHint(new LinkedHashSet<String>(Arrays.asList(split.getLocations())), null));
}
} else {
locationHints.add(TaskLocationHint.createTaskLocationHint(null, null));
}
} else {
locationHints.add(TaskLocationHint.createTaskLocationHint(null, Collections.singleton(rack)));
}
}
return locationHints;
}
use of org.apache.hadoop.mapred.FileSplit in project hive by apache.
the class CustomPartitionVertex method getBucketSplitMapForPath.
/*
* This method generates the map of bucket to file splits.
*/
private Multimap<Integer, InputSplit> getBucketSplitMapForPath(String inputName, Map<String, Set<FileSplit>> pathFileSplitsMap) {
Multimap<Integer, InputSplit> bucketToInitialSplitMap = ArrayListMultimap.create();
boolean fallback = false;
Map<Integer, Integer> bucketIds = new HashMap<>();
for (Map.Entry<String, Set<FileSplit>> entry : pathFileSplitsMap.entrySet()) {
// Extract the buckedID from pathFilesMap, this is more accurate method,
// however. it may not work in certain cases where buckets are named
// after files used while loading data. In such case, fallback to old
// potential inaccurate method.
// The accepted file names are such as 000000_0, 000001_0_copy_1.
String bucketIdStr = Utilities.getBucketFileNameFromPathSubString(entry.getKey());
int bucketId = Utilities.getBucketIdFromFile(bucketIdStr);
if (bucketId == -1) {
fallback = true;
LOG.info("Fallback to using older sort based logic to assign " + "buckets to splits.");
bucketIds.clear();
break;
}
// Make sure the bucketId is at max the numBuckets
bucketId = bucketId % numBuckets;
bucketIds.put(bucketId, bucketId);
for (FileSplit fsplit : entry.getValue()) {
bucketToInitialSplitMap.put(bucketId, fsplit);
}
}
int bucketNum = 0;
if (fallback) {
// alphanumeric order and mapped to appropriate bucket number.
for (Map.Entry<String, Set<FileSplit>> entry : pathFileSplitsMap.entrySet()) {
int bucketId = bucketNum % numBuckets;
for (FileSplit fsplit : entry.getValue()) {
bucketToInitialSplitMap.put(bucketId, fsplit);
}
bucketNum++;
}
}
// well.
if (numInputsAffectingRootInputSpecUpdate != 1) {
// small table
if (fallback && bucketNum < numBuckets) {
// Old logic.
int loopedBucketId = 0;
for (; bucketNum < numBuckets; bucketNum++) {
for (InputSplit fsplit : bucketToInitialSplitMap.get(loopedBucketId)) {
bucketToInitialSplitMap.put(bucketNum, fsplit);
}
loopedBucketId++;
}
} else {
// new logic.
if (inputToBucketMap.containsKey(inputName)) {
int inputNumBuckets = inputToBucketMap.get(inputName);
if (inputNumBuckets < numBuckets) {
// Need to send the splits to multiple buckets
for (int i = 1; i < numBuckets / inputNumBuckets; i++) {
int bucketIdBase = i * inputNumBuckets;
for (Integer bucketId : bucketIds.keySet()) {
for (InputSplit fsplit : bucketToInitialSplitMap.get(bucketId)) {
bucketToInitialSplitMap.put(bucketIdBase + bucketId, fsplit);
}
}
}
}
}
}
}
return bucketToInitialSplitMap;
}
use of org.apache.hadoop.mapred.FileSplit in project hive by apache.
the class CustomPartitionVertex method getFileSplitFromEvent.
private FileSplit getFileSplitFromEvent(InputDataInformationEvent event) throws IOException {
InputSplit inputSplit = null;
if (event.getDeserializedUserPayload() != null) {
inputSplit = (InputSplit) event.getDeserializedUserPayload();
} else {
MRSplitProto splitProto = MRSplitProto.parseFrom(ByteString.copyFrom(event.getUserPayload()));
SerializationFactory serializationFactory = new SerializationFactory(new Configuration());
inputSplit = MRInputHelpers.createOldFormatSplitFromUserPayload(splitProto, serializationFactory);
}
if (!(inputSplit instanceof FileSplit)) {
throw new UnsupportedOperationException("Cannot handle splits other than FileSplit for the moment. Current input split type: " + inputSplit.getClass().getSimpleName());
}
return (FileSplit) inputSplit;
}
Aggregations