use of org.apache.hadoop.fs.FileAlreadyExistsException in project ignite by apache.
the class HadoopIgfs20FileSystemAbstractSelfTest method testRenameDirectoryIfDstPathExists.
/**
* @throws Exception If failed.
*/
public void testRenameDirectoryIfDstPathExists() throws Exception {
Path fsHome = new Path(primaryFsUri);
Path srcDir = new Path(fsHome, "/tmp/");
Path dstDir = new Path(fsHome, "/tmpNew/");
FSDataOutputStream os = fs.create(new Path(srcDir, "file1"), EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault()));
os.close();
os = fs.create(new Path(dstDir, "file2"), EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault()));
os.close();
try {
fs.rename(srcDir, dstDir);
fail("FileAlreadyExistsException expected.");
} catch (FileAlreadyExistsException ignore) {
// No-op.
}
// Check all the files stay unchanged:
assertPathExists(fs, dstDir);
assertPathExists(fs, new Path(dstDir, "file2"));
assertPathExists(fs, srcDir);
assertPathExists(fs, new Path(srcDir, "file1"));
}
use of org.apache.hadoop.fs.FileAlreadyExistsException in project ignite by apache.
the class HadoopRawLocalFileSystem method mkdirs.
/**
* {@inheritDoc}
*/
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
if (f == null)
throw new IllegalArgumentException("mkdirs path arg is null");
Path parent = f.getParent();
File p2f = convert(f);
if (parent != null) {
File parent2f = convert(parent);
if (parent2f != null && parent2f.exists() && !parent2f.isDirectory())
throw new FileAlreadyExistsException("Parent path is not a directory: " + parent);
}
return (parent == null || mkdirs(parent)) && (p2f.mkdir() || p2f.isDirectory());
}
use of org.apache.hadoop.fs.FileAlreadyExistsException in project stocator by SparkTC.
the class ObjectStoreFileSystem method mkdirs.
/**
* {@inheritDoc}
*
* When path is of the form schema://dataroot.provider/objectname/_temporary/0
* it is assumed that new job started to write it's data.
* In this case we create an empty object schema://dataroot.provider/objectname
* that will later be used to identify objects that were created by Spark.
* This is needed for fault tolerance coverage to identify data that was created
* by failed jobs or tasks.
* dataroot/object created as a 0 size object with type application/directory
*/
@Override
public boolean mkdirs(Path f) throws IOException, FileAlreadyExistsException {
LOG.debug("mkdirs: {}", f.toString());
if (stocatorPath.isTemporaryPathTarget(f.getParent())) {
Path path = storageClient.qualify(f);
String objNameModified = stocatorPath.getObjectNameRoot(path, true, storageClient.getDataRoot(), true);
Path pathToObj = new Path(objNameModified);
LOG.trace("mkdirs {} modified name", objNameModified);
// make sure there is no overwrite of existing data
try {
String directoryToExpect = stocatorPath.getBaseDirectory(f.toString());
FileStatus fileStatus = getFileStatus(new Path(directoryToExpect));
if (fileStatus != null) {
LOG.debug("mkdirs found {} as exists. Directory : {}", directoryToExpect, fileStatus.isDirectory());
throw new FileAlreadyExistsException("mkdir on existing directory " + directoryToExpect);
}
} catch (FileNotFoundException e) {
LOG.debug("mkdirs {} - not exists. Proceed", pathToObj.getParent().toString());
}
String plainObjName = pathToObj.getParent().toString();
LOG.debug("Going to create identifier {}", plainObjName);
Map<String, String> metadata = new HashMap<String, String>();
metadata.put("Data-Origin", "stocator");
FSDataOutputStream outStream = storageClient.createObject(plainObjName, Constants.APPLICATION_DIRECTORY, metadata, statistics);
outStream.close();
}
return true;
}
Aggregations