Search in sources :

Example 1 with BasicMapReduceTaskContext

use of io.cdap.cdap.internal.app.runtime.batch.BasicMapReduceTaskContext in project cdap by caskdata.

the class DynamicPartitioningOutputCommitter method commitJob.

@Override
public void commitJob(JobContext context) throws IOException {
    Configuration configuration = context.getConfiguration();
    MapReduceClassLoader classLoader = MapReduceClassLoader.getFromConfiguration(configuration);
    BasicMapReduceTaskContext taskContext = classLoader.getTaskContextProvider().get(this.taskContext);
    String outputDatasetName = configuration.get(Constants.Dataset.Partitioned.HCONF_ATTR_OUTPUT_DATASET);
    outputDataset = taskContext.getDataset(outputDatasetName);
    DynamicPartitioner.PartitionWriteOption partitionWriteOption = DynamicPartitioner.PartitionWriteOption.valueOf(configuration.get(PartitionedFileSetArguments.DYNAMIC_PARTITIONER_WRITE_OPTION));
    Partitioning partitioning = outputDataset.getPartitioning();
    partitionsToAdd = new HashMap<>();
    // Go over all files in the temporary directory and keep track of partitions to add for them
    FileStatus[] allCommittedTaskPaths = getAllCommittedTaskPaths(context);
    for (FileStatus committedTaskPath : allCommittedTaskPaths) {
        FileSystem fs = committedTaskPath.getPath().getFileSystem(configuration);
        RemoteIterator<LocatedFileStatus> fileIter = fs.listFiles(committedTaskPath.getPath(), true);
        while (fileIter.hasNext()) {
            Path path = fileIter.next().getPath();
            String relativePath = getRelative(committedTaskPath.getPath(), path);
            int lastPathSepIdx = relativePath.lastIndexOf(Path.SEPARATOR);
            if (lastPathSepIdx == -1) {
                // this shouldn't happen because each relative path should consist of at least one partition key and
                // the output file name
                LOG.warn("Skipping path '{}'. It's relative path '{}' has fewer than two parts", path, relativePath);
                continue;
            }
            // relativePath = "../key1/key2/part-m-00000"
            // relativeDir = "../key1/key2"
            // fileName = "part-m-00000"
            String relativeDir = relativePath.substring(0, lastPathSepIdx);
            Path finalDir = new Path(FileOutputFormat.getOutputPath(context), relativeDir);
            if (partitionWriteOption == DynamicPartitioner.PartitionWriteOption.CREATE) {
                if (fs.exists(finalDir)) {
                    throw new FileAlreadyExistsException("Final output path already exists: " + finalDir);
                }
            }
            PartitionKey partitionKey = getPartitionKey(partitioning, relativeDir);
            partitionsToAdd.put(relativeDir, partitionKey);
        }
    }
    // need to remove any existing partitions, before moving temporary content to final output
    if (partitionWriteOption == DynamicPartitioner.PartitionWriteOption.CREATE_OR_OVERWRITE) {
        for (Map.Entry<String, PartitionKey> entry : partitionsToAdd.entrySet()) {
            if (outputDataset.getPartition(entry.getValue()) != null) {
                // this allows reinstating the existing files if there's a rollback.
                // alternative is to simply remove the files within the partition's location
                // upside to that is easily avoiding explore operations. one downside is that metadata is not removed then
                outputDataset.dropPartition(entry.getValue());
            }
        }
    }
    // We need to copy to the parent of the FileOutputFormat's outputDir, since we added a _temporary_jobId suffix to
    // the original outputDir.
    Path finalOutput = FileOutputFormat.getOutputPath(context);
    FileContext fc = FileContext.getFileContext(configuration);
    // the finalOutput path doesn't have scheme or authority (but 'from' does)
    finalOutput = fc.makeQualified(finalOutput);
    for (FileStatus from : getAllCommittedTaskPaths(context)) {
        mergePaths(fc, from, finalOutput);
    }
    // compute the metadata to be written to every output partition
    Map<String, String> metadata = ConfigurationUtil.getNamedConfigurations(this.taskContext.getConfiguration(), PartitionedFileSetArguments.OUTPUT_PARTITION_METADATA_PREFIX);
    boolean allowAppend = partitionWriteOption == DynamicPartitioner.PartitionWriteOption.CREATE_OR_APPEND;
    // create all the necessary partitions
    for (Map.Entry<String, PartitionKey> entry : partitionsToAdd.entrySet()) {
        outputDataset.addPartition(entry.getValue(), entry.getKey(), metadata, true, allowAppend);
    }
    // delete the job-specific _temporary folder
    cleanupJob(context);
    // mark all the final output paths with a _SUCCESS file, if configured to do so (default = true)
    if (configuration.getBoolean(SUCCESSFUL_JOB_OUTPUT_DIR_MARKER, true)) {
        for (String relativePath : partitionsToAdd.keySet()) {
            Path pathToMark = new Path(finalOutput, relativePath);
            createOrUpdate(fc, new Path(pathToMark, SUCCEEDED_FILE_NAME));
            // also create a _SUCCESS-<RunId>, if allowing append
            if (allowAppend) {
                createOrUpdate(fc, new Path(pathToMark, SUCCEEDED_FILE_NAME + "-" + taskContext.getProgramRunId().getRun()));
            }
        }
    }
}
Also used : BasicMapReduceTaskContext(io.cdap.cdap.internal.app.runtime.batch.BasicMapReduceTaskContext) Path(org.apache.hadoop.fs.Path) MapReduceClassLoader(io.cdap.cdap.internal.app.runtime.batch.MapReduceClassLoader) FileAlreadyExistsException(org.apache.hadoop.mapred.FileAlreadyExistsException) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) Configuration(org.apache.hadoop.conf.Configuration) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) Partitioning(io.cdap.cdap.api.dataset.lib.Partitioning) FileSystem(org.apache.hadoop.fs.FileSystem) PartitionKey(io.cdap.cdap.api.dataset.lib.PartitionKey) DynamicPartitioner(io.cdap.cdap.api.dataset.lib.DynamicPartitioner) HashMap(java.util.HashMap) Map(java.util.Map) FileContext(org.apache.hadoop.fs.FileContext)

Aggregations

DynamicPartitioner (io.cdap.cdap.api.dataset.lib.DynamicPartitioner)1 PartitionKey (io.cdap.cdap.api.dataset.lib.PartitionKey)1 Partitioning (io.cdap.cdap.api.dataset.lib.Partitioning)1 BasicMapReduceTaskContext (io.cdap.cdap.internal.app.runtime.batch.BasicMapReduceTaskContext)1 MapReduceClassLoader (io.cdap.cdap.internal.app.runtime.batch.MapReduceClassLoader)1 HashMap (java.util.HashMap)1 Map (java.util.Map)1 Configuration (org.apache.hadoop.conf.Configuration)1 FileContext (org.apache.hadoop.fs.FileContext)1 FileStatus (org.apache.hadoop.fs.FileStatus)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)1 Path (org.apache.hadoop.fs.Path)1 FileAlreadyExistsException (org.apache.hadoop.mapred.FileAlreadyExistsException)1