Search in sources :

Example 11 with FileAttribute

use of java.nio.file.attribute.FileAttribute in project jena by apache.

the class AbstractBlankNodeTests method blank_node_identity_02.

/**
     * Test that starts with two blank nodes in two different files and checks
     * that writing them to a single file does not conflate them
     * 
     * @throws IOException
     * @throws InterruptedException
     */
@Test
public void blank_node_identity_02() throws IOException, InterruptedException {
    Assume.assumeTrue("Requires ParserProfile be respected", this.respectsParserProfile());
    Assume.assumeFalse("Requires that Blank Node identity not be preserved", this.preservesBlankNodeIdentity());
    // Temporary files
    File a = File.createTempFile("bnode_identity", getInitialInputExtension());
    File b = File.createTempFile("bnode_identity", getInitialInputExtension());
    File intermediateOutputDir = Files.createTempDirectory("bnode_identity", new FileAttribute[0]).toFile();
    try {
        // Prepare the input data
        // Same blank node but in different files so must be treated as
        // different blank nodes and not converge
        List<T> tuples = new ArrayList<>();
        Node bnode = NodeFactory.createBlankNode();
        Node pred = NodeFactory.createURI("http://example.org/predicate");
        tuples.add(createTuple(bnode, pred, NodeFactory.createLiteral("first")));
        writeTuples(a, tuples);
        tuples.clear();
        tuples.add(createTuple(bnode, pred, NodeFactory.createLiteral("second")));
        writeTuples(b, tuples);
        // Set up fake job which will process the two files
        Configuration config = new Configuration(true);
        InputFormat<LongWritable, TValue> inputFormat = createInitialInputFormat();
        Job job = Job.getInstance(config);
        job.setInputFormatClass(inputFormat.getClass());
        NLineInputFormat.setNumLinesPerSplit(job, 100);
        FileInputFormat.setInputPaths(job, new Path(a.getAbsolutePath()), new Path(b.getAbsolutePath()));
        FileOutputFormat.setOutputPath(job, new Path(intermediateOutputDir.getAbsolutePath()));
        JobContext context = new JobContextImpl(job.getConfiguration(), job.getJobID());
        // Get the splits
        List<InputSplit> splits = inputFormat.getSplits(context);
        Assert.assertEquals(2, splits.size());
        // Prepare the output writing - putting all output to a single file
        OutputFormat<LongWritable, TValue> outputFormat = createIntermediateOutputFormat();
        TaskAttemptContext outputTaskContext = new TaskAttemptContextImpl(job.getConfiguration(), createAttemptID(1, 2, 1));
        RecordWriter<LongWritable, TValue> writer = outputFormat.getRecordWriter(outputTaskContext);
        for (InputSplit split : splits) {
            // Initialize the input reading
            TaskAttemptContext inputTaskContext = new TaskAttemptContextImpl(job.getConfiguration(), createAttemptID(1, 1, 1));
            RecordReader<LongWritable, TValue> reader = inputFormat.createRecordReader(split, inputTaskContext);
            reader.initialize(split, inputTaskContext);
            // output
            while (reader.nextKeyValue()) {
                writer.write(reader.getCurrentKey(), reader.getCurrentValue());
            }
        }
        writer.close(outputTaskContext);
        // Promote outputs from temporary status
        promoteInputs(intermediateOutputDir);
        // Now we need to create a subsequent job that reads the
        // intermediate outputs
        // The Blank nodes should have been given separate identities so we
        // should not be conflating them, this is the opposite problem to
        // that described in JENA-820
        LOGGER.debug("Intermediate output directory is {}", intermediateOutputDir.getAbsolutePath());
        job = Job.getInstance(config);
        inputFormat = createIntermediateInputFormat();
        job.setInputFormatClass(inputFormat.getClass());
        NLineInputFormat.setNumLinesPerSplit(job, 100);
        FileInputFormat.setInputPaths(job, new Path(intermediateOutputDir.getAbsolutePath()));
        context = new JobContextImpl(job.getConfiguration(), job.getJobID());
        // Get the splits
        splits = inputFormat.getSplits(context);
        Assert.assertEquals(1, splits.size());
        // Expect to end up with a single blank node
        Set<Node> nodes = new HashSet<Node>();
        for (InputSplit split : splits) {
            TaskAttemptContext inputTaskContext = new TaskAttemptContextImpl(job.getConfiguration(), new TaskAttemptID());
            RecordReader<LongWritable, TValue> reader = inputFormat.createRecordReader(split, inputTaskContext);
            reader.initialize(split, inputTaskContext);
            while (reader.nextKeyValue()) {
                nodes.add(getSubject(reader.getCurrentValue().get()));
            }
        }
        // Nodes must not diverge
        Assert.assertEquals(2, nodes.size());
    } finally {
        a.delete();
        b.delete();
        deleteDirectory(intermediateOutputDir);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) JobContextImpl(org.apache.hadoop.mapreduce.task.JobContextImpl) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) Node(org.apache.jena.graph.Node) ArrayList(java.util.ArrayList) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) TaskAttemptContextImpl(org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl) LongWritable(org.apache.hadoop.io.LongWritable) JobContext(org.apache.hadoop.mapreduce.JobContext) Job(org.apache.hadoop.mapreduce.Job) File(java.io.File) InputSplit(org.apache.hadoop.mapreduce.InputSplit) FileAttribute(java.nio.file.attribute.FileAttribute) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 12 with FileAttribute

use of java.nio.file.attribute.FileAttribute in project jena by apache.

the class AbstractBlankNodeTests method blank_node_divergence_01.

/**
     * Test that starts with two blank nodes with the same identity in a single
     * file, splits them over two files and checks that we can workaround
     * JENA-820 successfully by setting the
     * {@link RdfIOConstants#GLOBAL_BNODE_IDENTITY} flag for our subsequent job
     * 
     * @throws IOException
     * @throws InterruptedException
     */
@Test
public final void blank_node_divergence_01() throws IOException, InterruptedException {
    Assume.assumeTrue("Requires ParserProfile be respected", this.respectsParserProfile());
    Assume.assumeFalse("Requires that Blank Node identity not be preserved", this.preservesBlankNodeIdentity());
    // Temporary files
    File a = File.createTempFile("bnode_divergence", getInitialInputExtension());
    File intermediateOutputDir = Files.createTempDirectory("bnode_divergence", new FileAttribute[0]).toFile();
    try {
        // Prepare the input data
        // Two mentions of the same blank node in the same file
        List<T> tuples = new ArrayList<>();
        Node bnode = NodeFactory.createBlankNode();
        Node pred = NodeFactory.createURI("http://example.org/predicate");
        tuples.add(createTuple(bnode, pred, NodeFactory.createLiteral("first")));
        tuples.add(createTuple(bnode, pred, NodeFactory.createLiteral("second")));
        writeTuples(a, tuples);
        // Set up fake job which will process the file as a single split
        Configuration config = new Configuration(true);
        InputFormat<LongWritable, TValue> inputFormat = createInitialInputFormat();
        Job job = Job.getInstance(config);
        job.setInputFormatClass(inputFormat.getClass());
        NLineInputFormat.setNumLinesPerSplit(job, 100);
        FileInputFormat.setInputPaths(job, new Path(a.getAbsolutePath()));
        FileOutputFormat.setOutputPath(job, new Path(intermediateOutputDir.getAbsolutePath()));
        JobContext context = new JobContextImpl(job.getConfiguration(), job.getJobID());
        // Get the splits
        List<InputSplit> splits = inputFormat.getSplits(context);
        Assert.assertEquals(1, splits.size());
        for (InputSplit split : splits) {
            // Initialize the input reading
            TaskAttemptContext inputTaskContext = new TaskAttemptContextImpl(job.getConfiguration(), createAttemptID(1, 1, 1));
            RecordReader<LongWritable, TValue> reader = inputFormat.createRecordReader(split, inputTaskContext);
            reader.initialize(split, inputTaskContext);
            // Copy the input to the output - each triple goes to a separate
            // output file
            // This is how we force multiple files to be produced
            int taskID = 1;
            while (reader.nextKeyValue()) {
                // Prepare the output writing
                OutputFormat<LongWritable, TValue> outputFormat = createIntermediateOutputFormat();
                TaskAttemptContext outputTaskContext = new TaskAttemptContextImpl(job.getConfiguration(), createAttemptID(1, ++taskID, 1));
                RecordWriter<LongWritable, TValue> writer = outputFormat.getRecordWriter(outputTaskContext);
                writer.write(reader.getCurrentKey(), reader.getCurrentValue());
                writer.close(outputTaskContext);
            }
        }
        // Promote outputs from temporary status
        promoteInputs(intermediateOutputDir);
        // Now we need to create a subsequent job that reads the
        // intermediate outputs
        // As described in JENA-820 at this point the blank nodes are
        // consistent, however when we read them from different files they
        // by default get treated as different nodes and so the blank nodes
        // diverge which is incorrect and undesirable behaviour in
        // multi-stage pipelines
        LOGGER.debug("Intermediate output directory is {}", intermediateOutputDir.getAbsolutePath());
        job = Job.getInstance(config);
        inputFormat = createIntermediateInputFormat();
        job.setInputFormatClass(inputFormat.getClass());
        FileInputFormat.setInputPaths(job, new Path(intermediateOutputDir.getAbsolutePath()));
        // Enabling this flag works around the JENA-820 issue
        job.getConfiguration().setBoolean(RdfIOConstants.GLOBAL_BNODE_IDENTITY, true);
        context = new JobContextImpl(job.getConfiguration(), job.getJobID());
        // Get the splits
        splits = inputFormat.getSplits(context);
        Assert.assertEquals(2, splits.size());
        // Expect to end up with a single blank node
        Set<Node> nodes = new HashSet<Node>();
        for (InputSplit split : splits) {
            TaskAttemptContext inputTaskContext = new TaskAttemptContextImpl(job.getConfiguration(), new TaskAttemptID());
            RecordReader<LongWritable, TValue> reader = inputFormat.createRecordReader(split, inputTaskContext);
            reader.initialize(split, inputTaskContext);
            while (reader.nextKeyValue()) {
                nodes.add(getSubject(reader.getCurrentValue().get()));
            }
        }
        // Nodes should not have diverged
        Assert.assertEquals(1, nodes.size());
    } finally {
        a.delete();
        deleteDirectory(intermediateOutputDir);
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) Node(org.apache.jena.graph.Node) ArrayList(java.util.ArrayList) TaskAttemptContextImpl(org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl) LongWritable(org.apache.hadoop.io.LongWritable) JobContext(org.apache.hadoop.mapreduce.JobContext) Job(org.apache.hadoop.mapreduce.Job) InputSplit(org.apache.hadoop.mapreduce.InputSplit) HashSet(java.util.HashSet) Path(org.apache.hadoop.fs.Path) JobContextImpl(org.apache.hadoop.mapreduce.task.JobContextImpl) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) File(java.io.File) FileAttribute(java.nio.file.attribute.FileAttribute) Test(org.junit.Test)

Example 13 with FileAttribute

use of java.nio.file.attribute.FileAttribute in project qpid-broker-j by apache.

the class AESKeyFileEncrypterFactory method createEmptyKeyFile.

private void createEmptyKeyFile(File file) throws IOException {
    final Path parentFilePath = file.getAbsoluteFile().getParentFile().toPath();
    if (isPosixFileSystem(file)) {
        Set<PosixFilePermission> ownerOnly = EnumSet.of(PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE, PosixFilePermission.OWNER_EXECUTE);
        Files.createDirectories(parentFilePath, PosixFilePermissions.asFileAttribute(ownerOnly));
        Files.createFile(file.toPath(), PosixFilePermissions.asFileAttribute(EnumSet.of(PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE)));
    } else if (isAclFileSystem(file)) {
        Files.createDirectories(parentFilePath);
        final UserPrincipal owner = Files.getOwner(parentFilePath);
        AclFileAttributeView attributeView = Files.getFileAttributeView(parentFilePath, AclFileAttributeView.class);
        List<AclEntry> acls = new ArrayList<>(attributeView.getAcl());
        ListIterator<AclEntry> iter = acls.listIterator();
        boolean found = false;
        while (iter.hasNext()) {
            AclEntry acl = iter.next();
            if (!owner.equals(acl.principal())) {
                iter.remove();
            } else if (acl.type() == AclEntryType.ALLOW) {
                found = true;
                AclEntry.Builder builder = AclEntry.newBuilder(acl);
                Set<AclEntryPermission> permissions = acl.permissions().isEmpty() ? new HashSet<AclEntryPermission>() : EnumSet.copyOf(acl.permissions());
                permissions.addAll(Arrays.asList(AclEntryPermission.ADD_FILE, AclEntryPermission.ADD_SUBDIRECTORY, AclEntryPermission.LIST_DIRECTORY));
                builder.setPermissions(permissions);
                iter.set(builder.build());
            }
        }
        if (!found) {
            AclEntry.Builder builder = AclEntry.newBuilder();
            builder.setPermissions(AclEntryPermission.ADD_FILE, AclEntryPermission.ADD_SUBDIRECTORY, AclEntryPermission.LIST_DIRECTORY);
            builder.setType(AclEntryType.ALLOW);
            builder.setPrincipal(owner);
            acls.add(builder.build());
        }
        attributeView.setAcl(acls);
        Files.createFile(file.toPath(), new FileAttribute<List<AclEntry>>() {

            @Override
            public String name() {
                return "acl:acl";
            }

            @Override
            public List<AclEntry> value() {
                AclEntry.Builder builder = AclEntry.newBuilder();
                builder.setType(AclEntryType.ALLOW);
                builder.setPermissions(EnumSet.allOf(AclEntryPermission.class));
                builder.setPrincipal(owner);
                return Collections.singletonList(builder.build());
            }
        });
    } else {
        throw new IllegalArgumentException("Unable to determine a mechanism to protect access to the key file on this filesystem");
    }
}
Also used : Path(java.nio.file.Path) AclFileAttributeView(java.nio.file.attribute.AclFileAttributeView) AclEntry(java.nio.file.attribute.AclEntry) AclEntryPermission(java.nio.file.attribute.AclEntryPermission) PosixFilePermission(java.nio.file.attribute.PosixFilePermission) ListIterator(java.util.ListIterator) UserPrincipal(java.nio.file.attribute.UserPrincipal) ArrayList(java.util.ArrayList) List(java.util.List) FileAttribute(java.nio.file.attribute.FileAttribute)

Example 14 with FileAttribute

use of java.nio.file.attribute.FileAttribute in project vcell by virtualcell.

the class CopasiOptimizationSolver method solveLocalPython.

public static OptimizationResultSet solveLocalPython(ParameterEstimationTaskSimulatorIDA parestSimulator, ParameterEstimationTask parameterEstimationTask, CopasiOptSolverCallbacks optSolverCallbacks, MathMappingCallback mathMappingCallback) throws IOException, ExpressionException, OptimizationException {
    File dir = Files.createTempDirectory("parest", new FileAttribute<?>[] {}).toFile();
    try {
        String prefix = "testing_" + Math.abs(new Random().nextInt(10000));
        File optProblemThriftFile = new File(dir, prefix + ".optprob.bin");
        File optRunFile = new File(dir, prefix + ".optrun.bin");
        // 
        // Setup Python COPASI opt problem and write to disk
        // 
        OptProblem optProblem = CopasiServicePython.makeOptProblem(parameterEstimationTask);
        CopasiServicePython.writeOptProblem(optProblemThriftFile, optProblem);
        // 
        // run Python COPASI opt problem
        // 
        CopasiServicePython.runCopasiPython(optProblemThriftFile, optRunFile);
        if (!optRunFile.exists()) {
            throw new RuntimeException("COPASI optimization output file not found:\n" + optRunFile.getAbsolutePath());
        }
        OptRun optRun = CopasiServicePython.readOptRun(optRunFile);
        OptResultSet optResultSet = optRun.getOptResultSet();
        int numFittedParameters = optResultSet.getOptParameterValues().size();
        String[] paramNames = new String[numFittedParameters];
        double[] paramValues = new double[numFittedParameters];
        for (int pIndex = 0; pIndex < numFittedParameters; pIndex++) {
            OptParameterValue optParamValue = optResultSet.getOptParameterValues().get(pIndex);
            paramNames[pIndex] = optParamValue.parameterName;
            paramValues[pIndex] = optParamValue.bestValue;
        }
        OptimizationStatus status = new OptimizationStatus(OptimizationStatus.NORMAL_TERMINATION, optRun.statusMessage);
        OptRunResultSet optRunResultSet = new OptRunResultSet(paramValues, optResultSet.objectiveFunction, optResultSet.numFunctionEvaluations, status);
        OptSolverResultSet copasiOptSolverResultSet = new OptSolverResultSet(paramNames, optRunResultSet);
        RowColumnResultSet copasiRcResultSet = parestSimulator.getRowColumnRestultSetByBestEstimations(parameterEstimationTask, paramNames, paramValues);
        OptimizationResultSet copasiOptimizationResultSet = new OptimizationResultSet(copasiOptSolverResultSet, copasiRcResultSet);
        System.out.println("-----------SOLUTION FROM PYTHON---------------\n" + optResultSet.toString());
        return copasiOptimizationResultSet;
    } catch (Throwable e) {
        e.printStackTrace(System.out);
        throw new OptimizationException(e.getCause() != null ? e.getCause().getMessage() : e.getMessage());
    } finally {
        if (dir != null && dir.exists()) {
            FileUtils.deleteDirectory(dir);
        }
    }
}
Also used : OptimizationException(cbit.vcell.opt.OptimizationException) OptProblem(org.vcell.optimization.thrift.OptProblem) OptimizationResultSet(cbit.vcell.opt.OptimizationResultSet) OptParameterValue(org.vcell.optimization.thrift.OptParameterValue) OptSolverResultSet(cbit.vcell.opt.OptSolverResultSet) Random(java.util.Random) OptRun(org.vcell.optimization.thrift.OptRun) OptResultSet(org.vcell.optimization.thrift.OptResultSet) OptimizationStatus(cbit.vcell.opt.OptimizationStatus) OptRunResultSet(cbit.vcell.opt.OptSolverResultSet.OptRunResultSet) File(java.io.File) FileAttribute(java.nio.file.attribute.FileAttribute) RowColumnResultSet(cbit.vcell.math.RowColumnResultSet)

Example 15 with FileAttribute

use of java.nio.file.attribute.FileAttribute in project mycore by MyCoRe-Org.

the class MCRFileSystemProvider method newByteChannel.

/* (non-Javadoc)
     * @see java.nio.file.spi.FileSystemProvider#newByteChannel(java.nio.file.Path, java.util.Set, java.nio.file.attribute.FileAttribute[])
     */
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> options, FileAttribute<?>... attrs) throws IOException {
    if (attrs.length > 0) {
        throw new UnsupportedOperationException("Atomically setting of file attributes is not supported.");
    }
    MCRPath ifsPath = MCRFileSystemUtils.checkPathAbsolute(path);
    Set<? extends OpenOption> fileOpenOptions = options.stream().filter(option -> !(option == StandardOpenOption.CREATE || option == StandardOpenOption.CREATE_NEW)).collect(Collectors.toSet());
    boolean create = options.contains(StandardOpenOption.CREATE);
    boolean createNew = options.contains(StandardOpenOption.CREATE_NEW);
    if (create || createNew) {
        for (OpenOption option : fileOpenOptions) {
            // check before we create any file instance
            MCRFile.checkOpenOption(option);
        }
    }
    MCRFile mcrFile = MCRFileSystemUtils.getMCRFile(ifsPath, create, createNew);
    if (mcrFile == null) {
        throw new NoSuchFileException(path.toString());
    }
    return mcrFile.getFileChannel(fileOpenOptions);
}
Also used : NoSuchFileException(java.nio.file.NoSuchFileException) LoadingCache(com.google.common.cache.LoadingCache) FileTime(java.nio.file.attribute.FileTime) MCRFileAttributes(org.mycore.datamodel.niofs.MCRFileAttributes) DirectoryStream(java.nio.file.DirectoryStream) MCRMD5AttributeView(org.mycore.datamodel.niofs.MCRMD5AttributeView) BasicFileAttributeView(java.nio.file.attribute.BasicFileAttributeView) Map(java.util.Map) URI(java.net.URI) Path(java.nio.file.Path) EnumSet(java.util.EnumSet) MCRPath(org.mycore.datamodel.niofs.MCRPath) FileAttributeView(java.nio.file.attribute.FileAttributeView) StandardOpenOption(java.nio.file.StandardOpenOption) Set(java.util.Set) FileAttribute(java.nio.file.attribute.FileAttribute) FileSystem(java.nio.file.FileSystem) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) Objects(java.util.Objects) CacheLoader(com.google.common.cache.CacheLoader) SeekableByteChannel(java.nio.channels.SeekableByteChannel) FileSystemAlreadyExistsException(java.nio.file.FileSystemAlreadyExistsException) Logger(org.apache.logging.log4j.Logger) MCRFilesystemNode(org.mycore.datamodel.ifs.MCRFilesystemNode) Optional(java.util.Optional) CacheBuilder(com.google.common.cache.CacheBuilder) AccessDeniedException(java.nio.file.AccessDeniedException) MCRContentStoreFactory(org.mycore.datamodel.ifs.MCRContentStoreFactory) CopyOption(java.nio.file.CopyOption) HashMap(java.util.HashMap) MCRConfiguration(org.mycore.common.config.MCRConfiguration) FileSystemProvider(java.nio.file.spi.FileSystemProvider) MCRAbstractFileSystem(org.mycore.datamodel.niofs.MCRAbstractFileSystem) AccessMode(java.nio.file.AccessMode) StandardCopyOption(java.nio.file.StandardCopyOption) HashSet(java.util.HashSet) LinkOption(java.nio.file.LinkOption) InvalidPathException(java.nio.file.InvalidPathException) MCRDirectory(org.mycore.datamodel.ifs.MCRDirectory) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) Filter(java.nio.file.DirectoryStream.Filter) MCRFile(org.mycore.datamodel.ifs.MCRFile) FileStore(java.nio.file.FileStore) FileSystemNotFoundException(java.nio.file.FileSystemNotFoundException) Files(java.nio.file.Files) OpenOption(java.nio.file.OpenOption) NotDirectoryException(java.nio.file.NotDirectoryException) DirectoryNotEmptyException(java.nio.file.DirectoryNotEmptyException) IOException(java.io.IOException) FileAlreadyExistsException(java.nio.file.FileAlreadyExistsException) BasicFileAttributes(java.nio.file.attribute.BasicFileAttributes) File(java.io.File) AtomicMoveNotSupportedException(java.nio.file.AtomicMoveNotSupportedException) Collections(java.util.Collections) LogManager(org.apache.logging.log4j.LogManager) StandardOpenOption(java.nio.file.StandardOpenOption) OpenOption(java.nio.file.OpenOption) MCRFile(org.mycore.datamodel.ifs.MCRFile) NoSuchFileException(java.nio.file.NoSuchFileException) MCRPath(org.mycore.datamodel.niofs.MCRPath)

Aggregations

FileAttribute (java.nio.file.attribute.FileAttribute)20 File (java.io.File)15 Path (java.nio.file.Path)7 HashSet (java.util.HashSet)7 BeforeClass (org.junit.BeforeClass)7 ArrayList (java.util.ArrayList)6 Test (org.junit.Test)5 IOException (java.io.IOException)4 Set (java.util.Set)4 Configuration (org.apache.hadoop.conf.Configuration)4 Path (org.apache.hadoop.fs.Path)4 LongWritable (org.apache.hadoop.io.LongWritable)4 InputSplit (org.apache.hadoop.mapreduce.InputSplit)4 Job (org.apache.hadoop.mapreduce.Job)4 JobContext (org.apache.hadoop.mapreduce.JobContext)4 TaskAttemptContext (org.apache.hadoop.mapreduce.TaskAttemptContext)4 TaskAttemptID (org.apache.hadoop.mapreduce.TaskAttemptID)4 JobContextImpl (org.apache.hadoop.mapreduce.task.JobContextImpl)4 TaskAttemptContextImpl (org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl)4 Node (org.apache.jena.graph.Node)4