Search in sources :

Example 46 with TextInputFormat

use of org.apache.hadoop.mapred.TextInputFormat in project systemml by apache.

the class FrameReaderTextCSVParallel method readCSVFrameFromHDFS.

@Override
protected void readCSVFrameFromHDFS(Path path, JobConf job, FileSystem fs, FrameBlock dest, ValueType[] schema, String[] names, long rlen, long clen) throws IOException {
    int numThreads = OptimizerUtils.getParallelTextReadParallelism();
    TextInputFormat informat = new TextInputFormat();
    informat.configure(job);
    InputSplit[] splits = informat.getSplits(job, numThreads);
    splits = IOUtilFunctions.sortInputSplits(splits);
    try {
        ExecutorService pool = CommonThreadPool.get(Math.min(numThreads, splits.length));
        // compute num rows per split
        ArrayList<CountRowsTask> tasks = new ArrayList<>();
        for (int i = 0; i < splits.length; i++) tasks.add(new CountRowsTask(splits[i], informat, job, _props.hasHeader(), i == 0));
        List<Future<Long>> cret = pool.invokeAll(tasks);
        // compute row offset per split via cumsum on row counts
        long offset = 0;
        List<Long> offsets = new ArrayList<>();
        for (Future<Long> count : cret) {
            offsets.add(offset);
            offset += count.get();
        }
        // read individual splits
        ArrayList<ReadRowsTask> tasks2 = new ArrayList<>();
        for (int i = 0; i < splits.length; i++) tasks2.add(new ReadRowsTask(splits[i], informat, job, dest, offsets.get(i).intValue(), i == 0));
        List<Future<Object>> rret = pool.invokeAll(tasks2);
        pool.shutdown();
        // error handling
        for (Future<Object> read : rret) read.get();
    } catch (Exception e) {
        throw new IOException("Failed parallel read of text csv input.", e);
    }
}
Also used : ArrayList(java.util.ArrayList) IOException(java.io.IOException) IOException(java.io.IOException) TextInputFormat(org.apache.hadoop.mapred.TextInputFormat) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) InputSplit(org.apache.hadoop.mapred.InputSplit)

Example 47 with TextInputFormat

use of org.apache.hadoop.mapred.TextInputFormat in project incubator-gobblin by apache.

the class OldApiHadoopTextInputSource method getFileInputFormat.

@Override
protected FileInputFormat<LongWritable, Text> getFileInputFormat(State state, JobConf jobConf) {
    TextInputFormat textInputFormat = ReflectionUtils.newInstance(TextInputFormat.class, jobConf);
    textInputFormat.configure(jobConf);
    return textInputFormat;
}
Also used : TextInputFormat(org.apache.hadoop.mapred.TextInputFormat)

Example 48 with TextInputFormat

use of org.apache.hadoop.mapred.TextInputFormat in project presto by prestodb.

the class StoragePartitionLoader method loadPartition.

@Override
public ListenableFuture<?> loadPartition(HivePartitionMetadata partition, HiveSplitSource hiveSplitSource, boolean stopped) throws IOException {
    String partitionName = partition.getHivePartition().getPartitionId();
    Storage storage = partition.getPartition().map(Partition::getStorage).orElse(table.getStorage());
    Properties schema = getPartitionSchema(table, partition.getPartition());
    String inputFormatName = storage.getStorageFormat().getInputFormat();
    int partitionDataColumnCount = partition.getPartition().map(p -> p.getColumns().size()).orElse(table.getDataColumns().size());
    List<HivePartitionKey> partitionKeys = getPartitionKeys(table, partition.getPartition(), partitionName);
    String location = getPartitionLocation(table, partition.getPartition());
    if (location.isEmpty()) {
        checkState(!shouldCreateFilesForMissingBuckets(table, session), "Empty location is only allowed for empty temporary table when zero-row file is not created");
        return COMPLETED_FUTURE;
    }
    Path path = new Path(location);
    Configuration configuration = hdfsEnvironment.getConfiguration(hdfsContext, path);
    InputFormat<?, ?> inputFormat = getInputFormat(configuration, inputFormatName, false);
    ExtendedFileSystem fs = hdfsEnvironment.getFileSystem(hdfsContext, path);
    boolean s3SelectPushdownEnabled = shouldEnablePushdownForTable(session, table, path.toString(), partition.getPartition());
    if (inputFormat instanceof SymlinkTextInputFormat) {
        if (tableBucketInfo.isPresent()) {
            throw new PrestoException(NOT_SUPPORTED, "Bucketed table in SymlinkTextInputFormat is not yet supported");
        }
        // TODO: This should use an iterator like the HiveFileIterator
        ListenableFuture<?> lastResult = COMPLETED_FUTURE;
        for (Path targetPath : getTargetPathsFromSymlink(fs, path)) {
            // The input should be in TextInputFormat.
            TextInputFormat targetInputFormat = new TextInputFormat();
            // the splits must be generated using the file system for the target path
            // get the configuration for the target path -- it may be a different hdfs instance
            ExtendedFileSystem targetFilesystem = hdfsEnvironment.getFileSystem(hdfsContext, targetPath);
            JobConf targetJob = toJobConf(targetFilesystem.getConf());
            targetJob.setInputFormat(TextInputFormat.class);
            targetInputFormat.configure(targetJob);
            FileInputFormat.setInputPaths(targetJob, targetPath);
            InputSplit[] targetSplits = targetInputFormat.getSplits(targetJob, 0);
            InternalHiveSplitFactory splitFactory = new InternalHiveSplitFactory(targetFilesystem, inputFormat, pathDomain, getNodeSelectionStrategy(session), getMaxInitialSplitSize(session), s3SelectPushdownEnabled, new HiveSplitPartitionInfo(storage, path.toUri(), partitionKeys, partitionName, partitionDataColumnCount, partition.getTableToPartitionMapping(), Optional.empty(), partition.getRedundantColumnDomains()), schedulerUsesHostAddresses, partition.getEncryptionInformation());
            lastResult = addSplitsToSource(targetSplits, splitFactory, hiveSplitSource, stopped);
            if (stopped) {
                return COMPLETED_FUTURE;
            }
        }
        return lastResult;
    }
    Optional<HiveSplit.BucketConversion> bucketConversion = Optional.empty();
    boolean bucketConversionRequiresWorkerParticipation = false;
    if (partition.getPartition().isPresent()) {
        Optional<HiveBucketProperty> partitionBucketProperty = partition.getPartition().get().getStorage().getBucketProperty();
        if (tableBucketInfo.isPresent() && partitionBucketProperty.isPresent()) {
            int tableBucketCount = tableBucketInfo.get().getTableBucketCount();
            int partitionBucketCount = partitionBucketProperty.get().getBucketCount();
            // Here, it's just trying to see if its needs the BucketConversion.
            if (tableBucketCount != partitionBucketCount) {
                bucketConversion = Optional.of(new HiveSplit.BucketConversion(tableBucketCount, partitionBucketCount, tableBucketInfo.get().getBucketColumns()));
                if (tableBucketCount > partitionBucketCount) {
                    bucketConversionRequiresWorkerParticipation = true;
                }
            }
        }
    }
    InternalHiveSplitFactory splitFactory = new InternalHiveSplitFactory(fs, inputFormat, pathDomain, getNodeSelectionStrategy(session), getMaxInitialSplitSize(session), s3SelectPushdownEnabled, new HiveSplitPartitionInfo(storage, path.toUri(), partitionKeys, partitionName, partitionDataColumnCount, partition.getTableToPartitionMapping(), bucketConversionRequiresWorkerParticipation ? bucketConversion : Optional.empty(), partition.getRedundantColumnDomains()), schedulerUsesHostAddresses, partition.getEncryptionInformation());
    if (shouldUseFileSplitsFromInputFormat(inputFormat, configuration, table.getStorage().getLocation())) {
        if (tableBucketInfo.isPresent()) {
            throw new PrestoException(NOT_SUPPORTED, "Presto cannot read bucketed partition in an input format with UseFileSplitsFromInputFormat annotation: " + inputFormat.getClass().getSimpleName());
        }
        JobConf jobConf = toJobConf(configuration);
        FileInputFormat.setInputPaths(jobConf, path);
        // SerDes parameters and Table parameters passing into input format
        fromProperties(schema).forEach(jobConf::set);
        InputSplit[] splits = inputFormat.getSplits(jobConf, 0);
        return addSplitsToSource(splits, splitFactory, hiveSplitSource, stopped);
    }
    PathFilter pathFilter = isHudiParquetInputFormat(inputFormat) ? hoodiePathFilterLoadingCache.getUnchecked(configuration) : path1 -> true;
    // Streaming aggregation works at the granularity of individual files
    // S3 Select pushdown works at the granularity of individual S3 objects,
    // Partial aggregation pushdown works at the granularity of individual files
    // therefore we must not split files when either is enabled.
    // Skip header / footer lines are not splittable except for a special case when skip.header.line.count=1
    boolean splittable = isFileSplittable(session) && !isStreamingAggregationEnabled(session) && !s3SelectPushdownEnabled && !partialAggregationsPushedDown && getFooterCount(schema) == 0 && getHeaderCount(schema) <= 1;
    // Bucketed partitions are fully loaded immediately since all files must be loaded to determine the file to bucket mapping
    if (tableBucketInfo.isPresent()) {
        if (tableBucketInfo.get().isVirtuallyBucketed()) {
            // For virtual bucket, bucket conversion must not be present because there is no physical partition bucket count
            checkState(!bucketConversion.isPresent(), "Virtually bucketed table must not have partitions that are physically bucketed");
            checkState(tableBucketInfo.get().getTableBucketCount() == tableBucketInfo.get().getReadBucketCount(), "Table and read bucket count should be the same for virtual bucket");
            return hiveSplitSource.addToQueue(getVirtuallyBucketedSplits(path, fs, splitFactory, tableBucketInfo.get().getReadBucketCount(), splittable, pathFilter));
        }
        return hiveSplitSource.addToQueue(getBucketedSplits(path, fs, splitFactory, tableBucketInfo.get(), bucketConversion, partitionName, splittable, pathFilter));
    }
    fileIterators.addLast(createInternalHiveSplitIterator(path, fs, splitFactory, splittable, pathFilter, partition.getPartition()));
    return COMPLETED_FUTURE;
}
Also used : ArrayListMultimap(com.google.common.collect.ArrayListMultimap) LoadingCache(com.google.common.cache.LoadingCache) ListMultimap(com.google.common.collect.ListMultimap) HiveSessionProperties.isFileSplittable(com.facebook.presto.hive.HiveSessionProperties.isFileSplittable) HiveSessionProperties.isUseListDirectoryCache(com.facebook.presto.hive.HiveSessionProperties.isUseListDirectoryCache) FileStatus(org.apache.hadoop.fs.FileStatus) IntPredicate(java.util.function.IntPredicate) HiveUtil.getHeaderCount(com.facebook.presto.hive.HiveUtil.getHeaderCount) HiveUtil.getInputFormat(com.facebook.presto.hive.HiveUtil.getInputFormat) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) SchemaTableName(com.facebook.presto.spi.SchemaTableName) FileSplit(org.apache.hadoop.mapred.FileSplit) CharStreams(com.google.common.io.CharStreams) Configuration(org.apache.hadoop.conf.Configuration) InputFormat(org.apache.hadoop.mapred.InputFormat) Path(org.apache.hadoop.fs.Path) HiveMetadata.shouldCreateFilesForMissingBuckets(com.facebook.presto.hive.HiveMetadata.shouldCreateFilesForMissingBuckets) HIVE_INVALID_FILE_NAMES(com.facebook.presto.hive.HiveErrorCode.HIVE_INVALID_FILE_NAMES) Function(com.google.common.base.Function) FileInputFormat(org.apache.hadoop.mapred.FileInputFormat) ExtendedFileSystem(com.facebook.presto.hive.filesystem.ExtendedFileSystem) HiveWriterFactory.getBucketNumber(com.facebook.presto.hive.HiveWriterFactory.getBucketNumber) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) HiveSessionProperties.isStreamingAggregationEnabled(com.facebook.presto.hive.HiveSessionProperties.isStreamingAggregationEnabled) S3SelectPushdown.shouldEnablePushdownForTable(com.facebook.presto.hive.S3SelectPushdown.shouldEnablePushdownForTable) StandardCharsets(java.nio.charset.StandardCharsets) String.format(java.lang.String.format) HiveSessionProperties.getMaxInitialSplitSize(com.facebook.presto.hive.HiveSessionProperties.getMaxInitialSplitSize) FAIL(com.facebook.presto.hive.NestedDirectoryPolicy.FAIL) Preconditions.checkState(com.google.common.base.Preconditions.checkState) ConnectorSession(com.facebook.presto.spi.ConnectorSession) CacheLoader(com.google.common.cache.CacheLoader) List(java.util.List) NOT_SUPPORTED(com.facebook.presto.spi.StandardErrorCode.NOT_SUPPORTED) HIDDEN_FILES_PATH_FILTER(org.apache.hadoop.hive.common.FileUtils.HIDDEN_FILES_PATH_FILTER) Optional(java.util.Optional) Math.max(java.lang.Math.max) CacheBuilder(com.google.common.cache.CacheBuilder) TextInputFormat(org.apache.hadoop.mapred.TextInputFormat) Table(com.facebook.presto.hive.metastore.Table) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) HiveUtil.getFooterCount(com.facebook.presto.hive.HiveUtil.getFooterCount) PathFilter(org.apache.hadoop.fs.PathFilter) MetastoreUtil.getPartitionLocation(com.facebook.presto.hive.metastore.MetastoreUtil.getPartitionLocation) PrestoException(com.facebook.presto.spi.PrestoException) Deque(java.util.Deque) HiveSessionProperties.getNodeSelectionStrategy(com.facebook.presto.hive.HiveSessionProperties.getNodeSelectionStrategy) OptionalInt(java.util.OptionalInt) Iterators(com.google.common.collect.Iterators) Partition(com.facebook.presto.hive.metastore.Partition) SymlinkTextInputFormat(org.apache.hadoop.hive.ql.io.SymlinkTextInputFormat) ArrayList(java.util.ArrayList) IGNORED(com.facebook.presto.hive.NestedDirectoryPolicy.IGNORED) ImmutableList(com.google.common.collect.ImmutableList) HiveUtil.shouldUseFileSplitsFromInputFormat(com.facebook.presto.hive.HiveUtil.shouldUseFileSplitsFromInputFormat) ConfigurationUtils.toJobConf(com.facebook.presto.hive.util.ConfigurationUtils.toJobConf) Objects.requireNonNull(java.util.Objects.requireNonNull) HIVE_BAD_DATA(com.facebook.presto.hive.HiveErrorCode.HIVE_BAD_DATA) RECURSE(com.facebook.presto.hive.NestedDirectoryPolicy.RECURSE) HIVE_INVALID_BUCKET_FILES(com.facebook.presto.hive.HiveErrorCode.HIVE_INVALID_BUCKET_FILES) Futures.immediateFuture(com.google.common.util.concurrent.Futures.immediateFuture) Storage(com.facebook.presto.hive.metastore.Storage) Properties(java.util.Properties) Iterator(java.util.Iterator) InternalHiveSplitFactory(com.facebook.presto.hive.util.InternalHiveSplitFactory) HiveColumnHandle.pathColumnHandle(com.facebook.presto.hive.HiveColumnHandle.pathColumnHandle) HoodieROTablePathFilter(org.apache.hudi.hadoop.HoodieROTablePathFilter) IOException(java.io.IOException) HiveFileIterator(com.facebook.presto.hive.util.HiveFileIterator) InputStreamReader(java.io.InputStreamReader) Domain(com.facebook.presto.common.predicate.Domain) JobConf(org.apache.hadoop.mapred.JobConf) Streams.stream(com.google.common.collect.Streams.stream) InputSplit(org.apache.hadoop.mapred.InputSplit) BufferedReader(java.io.BufferedReader) Maps.fromProperties(com.google.common.collect.Maps.fromProperties) Comparator(java.util.Comparator) HiveBucketing.getVirtualBucketNumber(com.facebook.presto.hive.HiveBucketing.getVirtualBucketNumber) HiveUtil.isHudiParquetInputFormat(com.facebook.presto.hive.HiveUtil.isHudiParquetInputFormat) MetastoreUtil.getHiveSchema(com.facebook.presto.hive.metastore.MetastoreUtil.getHiveSchema) PathFilter(org.apache.hadoop.fs.PathFilter) HoodieROTablePathFilter(org.apache.hudi.hadoop.HoodieROTablePathFilter) Configuration(org.apache.hadoop.conf.Configuration) PrestoException(com.facebook.presto.spi.PrestoException) Properties(java.util.Properties) Maps.fromProperties(com.google.common.collect.Maps.fromProperties) SymlinkTextInputFormat(org.apache.hadoop.hive.ql.io.SymlinkTextInputFormat) ConfigurationUtils.toJobConf(com.facebook.presto.hive.util.ConfigurationUtils.toJobConf) JobConf(org.apache.hadoop.mapred.JobConf) InputSplit(org.apache.hadoop.mapred.InputSplit) Path(org.apache.hadoop.fs.Path) InternalHiveSplitFactory(com.facebook.presto.hive.util.InternalHiveSplitFactory) Storage(com.facebook.presto.hive.metastore.Storage) TextInputFormat(org.apache.hadoop.mapred.TextInputFormat) SymlinkTextInputFormat(org.apache.hadoop.hive.ql.io.SymlinkTextInputFormat) ExtendedFileSystem(com.facebook.presto.hive.filesystem.ExtendedFileSystem)

Example 49 with TextInputFormat

use of org.apache.hadoop.mapred.TextInputFormat in project apex-malhar by apache.

the class MapOperator method getSplits.

private InputSplit[] getSplits(JobConf conf, int numSplits, String path) throws Exception {
    FileInputFormat.setInputPaths(conf, new Path(path));
    if (inputFormat == null) {
        inputFormat = inputFormatClass.newInstance();
        String inputFormatClassName = inputFormatClass.getName();
        if (inputFormatClassName.equals("org.apache.hadoop.mapred.TextInputFormat")) {
            ((TextInputFormat) inputFormat).configure(conf);
        } else if (inputFormatClassName.equals("org.apache.hadoop.mapred.KeyValueTextInputFormat")) {
            ((KeyValueTextInputFormat) inputFormat).configure(conf);
        }
    }
    return inputFormat.getSplits(conf, numSplits);
// return null;
}
Also used : Path(org.apache.hadoop.fs.Path) TextInputFormat(org.apache.hadoop.mapred.TextInputFormat) KeyValueTextInputFormat(org.apache.hadoop.mapred.KeyValueTextInputFormat)

Aggregations

TextInputFormat (org.apache.hadoop.mapred.TextInputFormat)49 InputSplit (org.apache.hadoop.mapred.InputSplit)39 IOException (java.io.IOException)26 Path (org.apache.hadoop.fs.Path)25 JobConf (org.apache.hadoop.mapred.JobConf)24 LongWritable (org.apache.hadoop.io.LongWritable)19 Text (org.apache.hadoop.io.Text)19 ArrayList (java.util.ArrayList)16 DMLRuntimeException (org.apache.sysml.runtime.DMLRuntimeException)14 ExecutorService (java.util.concurrent.ExecutorService)12 Future (java.util.concurrent.Future)8 FileSystem (org.apache.hadoop.fs.FileSystem)8 FastStringTokenizer (org.apache.sysml.runtime.util.FastStringTokenizer)6 Configuration (org.apache.hadoop.conf.Configuration)4 Pair (org.apache.sysml.runtime.matrix.data.Pair)4 LinkedList (java.util.LinkedList)3 Properties (java.util.Properties)3 ExecutionEnvironment (org.apache.flink.api.java.ExecutionEnvironment)3 HadoopOutputFormat (org.apache.flink.api.java.hadoop.mapred.HadoopOutputFormat)3 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)3