use of java.nio.file.attribute.FileAttribute in project jena by apache.
the class AbstractBlankNodeTests method blank_node_identity_02.
/**
* Test that starts with two blank nodes in two different files and checks
* that writing them to a single file does not conflate them
*
* @throws IOException
* @throws InterruptedException
*/
@Test
public void blank_node_identity_02() throws IOException, InterruptedException {
Assume.assumeTrue("Requires ParserProfile be respected", this.respectsParserProfile());
Assume.assumeFalse("Requires that Blank Node identity not be preserved", this.preservesBlankNodeIdentity());
// Temporary files
File a = File.createTempFile("bnode_identity", getInitialInputExtension());
File b = File.createTempFile("bnode_identity", getInitialInputExtension());
File intermediateOutputDir = Files.createTempDirectory("bnode_identity", new FileAttribute[0]).toFile();
try {
// Prepare the input data
// Same blank node but in different files so must be treated as
// different blank nodes and not converge
List<T> tuples = new ArrayList<>();
Node bnode = NodeFactory.createBlankNode();
Node pred = NodeFactory.createURI("http://example.org/predicate");
tuples.add(createTuple(bnode, pred, NodeFactory.createLiteral("first")));
writeTuples(a, tuples);
tuples.clear();
tuples.add(createTuple(bnode, pred, NodeFactory.createLiteral("second")));
writeTuples(b, tuples);
// Set up fake job which will process the two files
Configuration config = new Configuration(true);
InputFormat<LongWritable, TValue> inputFormat = createInitialInputFormat();
Job job = Job.getInstance(config);
job.setInputFormatClass(inputFormat.getClass());
NLineInputFormat.setNumLinesPerSplit(job, 100);
FileInputFormat.setInputPaths(job, new Path(a.getAbsolutePath()), new Path(b.getAbsolutePath()));
FileOutputFormat.setOutputPath(job, new Path(intermediateOutputDir.getAbsolutePath()));
JobContext context = new JobContextImpl(job.getConfiguration(), job.getJobID());
// Get the splits
List<InputSplit> splits = inputFormat.getSplits(context);
Assert.assertEquals(2, splits.size());
// Prepare the output writing - putting all output to a single file
OutputFormat<LongWritable, TValue> outputFormat = createIntermediateOutputFormat();
TaskAttemptContext outputTaskContext = new TaskAttemptContextImpl(job.getConfiguration(), createAttemptID(1, 2, 1));
RecordWriter<LongWritable, TValue> writer = outputFormat.getRecordWriter(outputTaskContext);
for (InputSplit split : splits) {
// Initialize the input reading
TaskAttemptContext inputTaskContext = new TaskAttemptContextImpl(job.getConfiguration(), createAttemptID(1, 1, 1));
RecordReader<LongWritable, TValue> reader = inputFormat.createRecordReader(split, inputTaskContext);
reader.initialize(split, inputTaskContext);
// output
while (reader.nextKeyValue()) {
writer.write(reader.getCurrentKey(), reader.getCurrentValue());
}
}
writer.close(outputTaskContext);
// Promote outputs from temporary status
promoteInputs(intermediateOutputDir);
// Now we need to create a subsequent job that reads the
// intermediate outputs
// The Blank nodes should have been given separate identities so we
// should not be conflating them, this is the opposite problem to
// that described in JENA-820
LOGGER.debug("Intermediate output directory is {}", intermediateOutputDir.getAbsolutePath());
job = Job.getInstance(config);
inputFormat = createIntermediateInputFormat();
job.setInputFormatClass(inputFormat.getClass());
NLineInputFormat.setNumLinesPerSplit(job, 100);
FileInputFormat.setInputPaths(job, new Path(intermediateOutputDir.getAbsolutePath()));
context = new JobContextImpl(job.getConfiguration(), job.getJobID());
// Get the splits
splits = inputFormat.getSplits(context);
Assert.assertEquals(1, splits.size());
// Expect to end up with a single blank node
Set<Node> nodes = new HashSet<Node>();
for (InputSplit split : splits) {
TaskAttemptContext inputTaskContext = new TaskAttemptContextImpl(job.getConfiguration(), new TaskAttemptID());
RecordReader<LongWritable, TValue> reader = inputFormat.createRecordReader(split, inputTaskContext);
reader.initialize(split, inputTaskContext);
while (reader.nextKeyValue()) {
nodes.add(getSubject(reader.getCurrentValue().get()));
}
}
// Nodes must not diverge
Assert.assertEquals(2, nodes.size());
} finally {
a.delete();
b.delete();
deleteDirectory(intermediateOutputDir);
}
}
use of java.nio.file.attribute.FileAttribute in project jena by apache.
the class AbstractBlankNodeTests method blank_node_divergence_01.
/**
* Test that starts with two blank nodes with the same identity in a single
* file, splits them over two files and checks that we can workaround
* JENA-820 successfully by setting the
* {@link RdfIOConstants#GLOBAL_BNODE_IDENTITY} flag for our subsequent job
*
* @throws IOException
* @throws InterruptedException
*/
@Test
public final void blank_node_divergence_01() throws IOException, InterruptedException {
Assume.assumeTrue("Requires ParserProfile be respected", this.respectsParserProfile());
Assume.assumeFalse("Requires that Blank Node identity not be preserved", this.preservesBlankNodeIdentity());
// Temporary files
File a = File.createTempFile("bnode_divergence", getInitialInputExtension());
File intermediateOutputDir = Files.createTempDirectory("bnode_divergence", new FileAttribute[0]).toFile();
try {
// Prepare the input data
// Two mentions of the same blank node in the same file
List<T> tuples = new ArrayList<>();
Node bnode = NodeFactory.createBlankNode();
Node pred = NodeFactory.createURI("http://example.org/predicate");
tuples.add(createTuple(bnode, pred, NodeFactory.createLiteral("first")));
tuples.add(createTuple(bnode, pred, NodeFactory.createLiteral("second")));
writeTuples(a, tuples);
// Set up fake job which will process the file as a single split
Configuration config = new Configuration(true);
InputFormat<LongWritable, TValue> inputFormat = createInitialInputFormat();
Job job = Job.getInstance(config);
job.setInputFormatClass(inputFormat.getClass());
NLineInputFormat.setNumLinesPerSplit(job, 100);
FileInputFormat.setInputPaths(job, new Path(a.getAbsolutePath()));
FileOutputFormat.setOutputPath(job, new Path(intermediateOutputDir.getAbsolutePath()));
JobContext context = new JobContextImpl(job.getConfiguration(), job.getJobID());
// Get the splits
List<InputSplit> splits = inputFormat.getSplits(context);
Assert.assertEquals(1, splits.size());
for (InputSplit split : splits) {
// Initialize the input reading
TaskAttemptContext inputTaskContext = new TaskAttemptContextImpl(job.getConfiguration(), createAttemptID(1, 1, 1));
RecordReader<LongWritable, TValue> reader = inputFormat.createRecordReader(split, inputTaskContext);
reader.initialize(split, inputTaskContext);
// Copy the input to the output - each triple goes to a separate
// output file
// This is how we force multiple files to be produced
int taskID = 1;
while (reader.nextKeyValue()) {
// Prepare the output writing
OutputFormat<LongWritable, TValue> outputFormat = createIntermediateOutputFormat();
TaskAttemptContext outputTaskContext = new TaskAttemptContextImpl(job.getConfiguration(), createAttemptID(1, ++taskID, 1));
RecordWriter<LongWritable, TValue> writer = outputFormat.getRecordWriter(outputTaskContext);
writer.write(reader.getCurrentKey(), reader.getCurrentValue());
writer.close(outputTaskContext);
}
}
// Promote outputs from temporary status
promoteInputs(intermediateOutputDir);
// Now we need to create a subsequent job that reads the
// intermediate outputs
// As described in JENA-820 at this point the blank nodes are
// consistent, however when we read them from different files they
// by default get treated as different nodes and so the blank nodes
// diverge which is incorrect and undesirable behaviour in
// multi-stage pipelines
LOGGER.debug("Intermediate output directory is {}", intermediateOutputDir.getAbsolutePath());
job = Job.getInstance(config);
inputFormat = createIntermediateInputFormat();
job.setInputFormatClass(inputFormat.getClass());
FileInputFormat.setInputPaths(job, new Path(intermediateOutputDir.getAbsolutePath()));
// Enabling this flag works around the JENA-820 issue
job.getConfiguration().setBoolean(RdfIOConstants.GLOBAL_BNODE_IDENTITY, true);
context = new JobContextImpl(job.getConfiguration(), job.getJobID());
// Get the splits
splits = inputFormat.getSplits(context);
Assert.assertEquals(2, splits.size());
// Expect to end up with a single blank node
Set<Node> nodes = new HashSet<Node>();
for (InputSplit split : splits) {
TaskAttemptContext inputTaskContext = new TaskAttemptContextImpl(job.getConfiguration(), new TaskAttemptID());
RecordReader<LongWritable, TValue> reader = inputFormat.createRecordReader(split, inputTaskContext);
reader.initialize(split, inputTaskContext);
while (reader.nextKeyValue()) {
nodes.add(getSubject(reader.getCurrentValue().get()));
}
}
// Nodes should not have diverged
Assert.assertEquals(1, nodes.size());
} finally {
a.delete();
deleteDirectory(intermediateOutputDir);
}
}
use of java.nio.file.attribute.FileAttribute in project qpid-broker-j by apache.
the class AESKeyFileEncrypterFactory method createEmptyKeyFile.
private void createEmptyKeyFile(File file) throws IOException {
final Path parentFilePath = file.getAbsoluteFile().getParentFile().toPath();
if (isPosixFileSystem(file)) {
Set<PosixFilePermission> ownerOnly = EnumSet.of(PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE, PosixFilePermission.OWNER_EXECUTE);
Files.createDirectories(parentFilePath, PosixFilePermissions.asFileAttribute(ownerOnly));
Files.createFile(file.toPath(), PosixFilePermissions.asFileAttribute(EnumSet.of(PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE)));
} else if (isAclFileSystem(file)) {
Files.createDirectories(parentFilePath);
final UserPrincipal owner = Files.getOwner(parentFilePath);
AclFileAttributeView attributeView = Files.getFileAttributeView(parentFilePath, AclFileAttributeView.class);
List<AclEntry> acls = new ArrayList<>(attributeView.getAcl());
ListIterator<AclEntry> iter = acls.listIterator();
boolean found = false;
while (iter.hasNext()) {
AclEntry acl = iter.next();
if (!owner.equals(acl.principal())) {
iter.remove();
} else if (acl.type() == AclEntryType.ALLOW) {
found = true;
AclEntry.Builder builder = AclEntry.newBuilder(acl);
Set<AclEntryPermission> permissions = acl.permissions().isEmpty() ? new HashSet<AclEntryPermission>() : EnumSet.copyOf(acl.permissions());
permissions.addAll(Arrays.asList(AclEntryPermission.ADD_FILE, AclEntryPermission.ADD_SUBDIRECTORY, AclEntryPermission.LIST_DIRECTORY));
builder.setPermissions(permissions);
iter.set(builder.build());
}
}
if (!found) {
AclEntry.Builder builder = AclEntry.newBuilder();
builder.setPermissions(AclEntryPermission.ADD_FILE, AclEntryPermission.ADD_SUBDIRECTORY, AclEntryPermission.LIST_DIRECTORY);
builder.setType(AclEntryType.ALLOW);
builder.setPrincipal(owner);
acls.add(builder.build());
}
attributeView.setAcl(acls);
Files.createFile(file.toPath(), new FileAttribute<List<AclEntry>>() {
@Override
public String name() {
return "acl:acl";
}
@Override
public List<AclEntry> value() {
AclEntry.Builder builder = AclEntry.newBuilder();
builder.setType(AclEntryType.ALLOW);
builder.setPermissions(EnumSet.allOf(AclEntryPermission.class));
builder.setPrincipal(owner);
return Collections.singletonList(builder.build());
}
});
} else {
throw new IllegalArgumentException("Unable to determine a mechanism to protect access to the key file on this filesystem");
}
}
use of java.nio.file.attribute.FileAttribute in project vcell by virtualcell.
the class CopasiOptimizationSolver method solveLocalPython.
public static OptimizationResultSet solveLocalPython(ParameterEstimationTaskSimulatorIDA parestSimulator, ParameterEstimationTask parameterEstimationTask, CopasiOptSolverCallbacks optSolverCallbacks, MathMappingCallback mathMappingCallback) throws IOException, ExpressionException, OptimizationException {
File dir = Files.createTempDirectory("parest", new FileAttribute<?>[] {}).toFile();
try {
String prefix = "testing_" + Math.abs(new Random().nextInt(10000));
File optProblemThriftFile = new File(dir, prefix + ".optprob.bin");
File optRunFile = new File(dir, prefix + ".optrun.bin");
//
// Setup Python COPASI opt problem and write to disk
//
OptProblem optProblem = CopasiServicePython.makeOptProblem(parameterEstimationTask);
CopasiServicePython.writeOptProblem(optProblemThriftFile, optProblem);
//
// run Python COPASI opt problem
//
CopasiServicePython.runCopasiPython(optProblemThriftFile, optRunFile);
if (!optRunFile.exists()) {
throw new RuntimeException("COPASI optimization output file not found:\n" + optRunFile.getAbsolutePath());
}
OptRun optRun = CopasiServicePython.readOptRun(optRunFile);
OptResultSet optResultSet = optRun.getOptResultSet();
int numFittedParameters = optResultSet.getOptParameterValues().size();
String[] paramNames = new String[numFittedParameters];
double[] paramValues = new double[numFittedParameters];
for (int pIndex = 0; pIndex < numFittedParameters; pIndex++) {
OptParameterValue optParamValue = optResultSet.getOptParameterValues().get(pIndex);
paramNames[pIndex] = optParamValue.parameterName;
paramValues[pIndex] = optParamValue.bestValue;
}
OptimizationStatus status = new OptimizationStatus(OptimizationStatus.NORMAL_TERMINATION, optRun.statusMessage);
OptRunResultSet optRunResultSet = new OptRunResultSet(paramValues, optResultSet.objectiveFunction, optResultSet.numFunctionEvaluations, status);
OptSolverResultSet copasiOptSolverResultSet = new OptSolverResultSet(paramNames, optRunResultSet);
RowColumnResultSet copasiRcResultSet = parestSimulator.getRowColumnRestultSetByBestEstimations(parameterEstimationTask, paramNames, paramValues);
OptimizationResultSet copasiOptimizationResultSet = new OptimizationResultSet(copasiOptSolverResultSet, copasiRcResultSet);
System.out.println("-----------SOLUTION FROM PYTHON---------------\n" + optResultSet.toString());
return copasiOptimizationResultSet;
} catch (Throwable e) {
e.printStackTrace(System.out);
throw new OptimizationException(e.getCause() != null ? e.getCause().getMessage() : e.getMessage());
} finally {
if (dir != null && dir.exists()) {
FileUtils.deleteDirectory(dir);
}
}
}
use of java.nio.file.attribute.FileAttribute in project mycore by MyCoRe-Org.
the class MCRFileSystemProvider method newByteChannel.
/* (non-Javadoc)
* @see java.nio.file.spi.FileSystemProvider#newByteChannel(java.nio.file.Path, java.util.Set, java.nio.file.attribute.FileAttribute[])
*/
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> options, FileAttribute<?>... attrs) throws IOException {
if (attrs.length > 0) {
throw new UnsupportedOperationException("Atomically setting of file attributes is not supported.");
}
MCRPath ifsPath = MCRFileSystemUtils.checkPathAbsolute(path);
Set<? extends OpenOption> fileOpenOptions = options.stream().filter(option -> !(option == StandardOpenOption.CREATE || option == StandardOpenOption.CREATE_NEW)).collect(Collectors.toSet());
boolean create = options.contains(StandardOpenOption.CREATE);
boolean createNew = options.contains(StandardOpenOption.CREATE_NEW);
if (create || createNew) {
for (OpenOption option : fileOpenOptions) {
// check before we create any file instance
MCRFile.checkOpenOption(option);
}
}
MCRFile mcrFile = MCRFileSystemUtils.getMCRFile(ifsPath, create, createNew);
if (mcrFile == null) {
throw new NoSuchFileException(path.toString());
}
return mcrFile.getFileChannel(fileOpenOptions);
}
Aggregations