use of org.apache.hadoop.fs.FileContext in project apex-core by apache.
the class AsyncFSStorageAgentTest method testDelete.
@Test
public void testDelete() throws IOException {
testLoad();
testMeta.storageAgent.delete(1, 1);
Path appPath = new Path(testMeta.applicationPath);
FileContext fileContext = FileContext.getFileContext();
Assert.assertTrue("operator 2 window 1", fileContext.util().exists(new Path(appPath + "/" + 2 + "/" + 1)));
Assert.assertFalse("operator 1 window 1", fileContext.util().exists(new Path(appPath + "/" + 1 + "/" + 1)));
}
use of org.apache.hadoop.fs.FileContext in project apex-core by apache.
the class FSStorageAgentTest method testDelete.
@Test
public void testDelete() throws IOException {
testLoad();
testMeta.storageAgent.delete(1, 1);
Path appPath = new Path(testMeta.applicationPath);
FileContext fileContext = FileContext.getFileContext();
Assert.assertTrue("operator 2 window 1", fileContext.util().exists(new Path(appPath + "/" + 2 + "/" + 1)));
Assert.assertFalse("operator 1 window 1", fileContext.util().exists(new Path(appPath + "/" + 1 + "/" + 1)));
}
use of org.apache.hadoop.fs.FileContext in project storm by apache.
the class HdfsBlobStoreFile method commit.
@Override
public void commit() throws IOException {
checkIsNotTmp();
// FileContext supports atomic rename, whereas FileSystem doesn't
FileContext fc = FileContext.getFileContext(hadoopConf);
Path dest = new Path(path.getParent(), BLOBSTORE_DATA_FILE);
if (mustBeNew) {
fc.rename(path, dest);
} else {
fc.rename(path, dest, Options.Rename.OVERWRITE);
}
// Note, we could add support for setting the replication factor
}
use of org.apache.hadoop.fs.FileContext in project Gaffer by gchq.
the class WriteData method renameFiles.
private void renameFiles(final int partitionId, final long taskAttemptId, final Set<String> groups, final Map<String, Path> groupToWriterPath) throws Exception {
LOGGER.info("Renaming output files from {} to {}", "input-" + partitionId + "-" + taskAttemptId + ".parquet", "input-" + partitionId);
final FileContext fileContext = FileContext.getFileContext(new Configuration());
for (final String group : groups) {
final Path src = groupToWriterPath.get(group);
final String newName = "input-" + partitionId + ".parquet";
final Path dst = new Path(groupToDirectory.get(group) + "/" + newName);
try {
fileContext.rename(src, dst, Options.Rename.NONE);
LOGGER.debug("Renamed {} to {}", src, dst);
} catch (final FileAlreadyExistsException e) {
// Another task got there first
LOGGER.debug("Not renaming {} to {} as the destination already exists", src, dst);
}
}
}
use of org.apache.hadoop.fs.FileContext in project apex-malhar by apache.
the class IOUtilsTest method testCopyPartialHelper.
private void testCopyPartialHelper(int dataSize, int offset, long size) throws IOException {
FileUtils.deleteQuietly(new File("target/IOUtilsTest"));
File file = new File("target/IOUtilsTest/testCopyPartial/input");
createDataFile(file, dataSize);
FileContext fileContext = FileContext.getFileContext();
DataInputStream inputStream = fileContext.open(new Path(file.getAbsolutePath()));
Path output = new Path("target/IOUtilsTest/testCopyPartial/output");
DataOutputStream outputStream = fileContext.create(output, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), Options.CreateOpts.CreateParent.createParent());
if (offset == 0) {
IOUtils.copyPartial(inputStream, size, outputStream);
} else {
IOUtils.copyPartial(inputStream, offset, size, outputStream);
}
outputStream.close();
Assert.assertTrue("output exists", fileContext.util().exists(output));
Assert.assertEquals("output size", size, fileContext.getFileStatus(output).getLen());
// FileUtils.deleteQuietly(new File("target/IOUtilsTest"));
}
Aggregations