use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.
the class SwiftNativeFileSystem method create.
/**
* @param permission Currently ignored.
*/
@Override
public FSDataOutputStream create(Path file, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
LOG.debug("SwiftFileSystem.create");
FileStatus fileStatus = null;
Path absolutePath = makeAbsolute(file);
try {
fileStatus = getFileStatus(absolutePath);
} catch (FileNotFoundException e) {
//the file isn't there.
}
if (fileStatus != null) {
//no need to bother creating any parent entries
if (fileStatus.isDirectory()) {
/* we can't throw an exception here as there is no easy way to distinguish
a file from the dir
throw new SwiftPathExistsException("Cannot create a file over a directory:"
+ file);
*/
if (LOG.isDebugEnabled()) {
LOG.debug("Overwriting either an empty file or a directory");
}
}
if (overwrite) {
//overwrite set -> delete the object.
store.delete(absolutePath, true);
} else {
throw new FileAlreadyExistsException("Path exists: " + file);
}
} else {
// destination does not exist -trigger creation of the parent
Path parent = file.getParent();
if (parent != null) {
if (!mkdirs(parent)) {
throw new SwiftOperationFailedException("Mkdirs failed to create " + parent);
}
}
}
SwiftNativeOutputStream out = createSwiftOutputStream(file);
return new FSDataOutputStream(out, statistics);
}
use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.
the class SwiftNativeFileSystemStore method rename.
/**
* Rename through copy-and-delete. this is a consequence of the
* Swift filesystem using the path as the hash
* into the Distributed Hash Table, "the ring" of filenames.
* <p>
* Because of the nature of the operation, it is not atomic.
*
* @param src source file/dir
* @param dst destination
* @throws IOException IO failure
* @throws SwiftOperationFailedException if the rename failed
* @throws FileNotFoundException if the source directory is missing, or
* the parent directory of the destination
*/
public void rename(Path src, Path dst) throws FileNotFoundException, SwiftOperationFailedException, IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("mv " + src + " " + dst);
}
boolean renamingOnToSelf = src.equals(dst);
SwiftObjectPath srcObject = toObjectPath(src);
SwiftObjectPath destObject = toObjectPath(dst);
if (SwiftUtils.isRootDir(srcObject)) {
throw new SwiftOperationFailedException("cannot rename root dir");
}
final SwiftFileStatus srcMetadata;
srcMetadata = getObjectMetadata(src);
SwiftFileStatus dstMetadata;
try {
dstMetadata = getObjectMetadata(dst);
} catch (FileNotFoundException e) {
//destination does not exist.
LOG.debug("Destination does not exist");
dstMetadata = null;
}
//check to see if the destination parent directory exists
Path srcParent = src.getParent();
Path dstParent = dst.getParent();
//directory is root, in which case it must also exist
if (dstParent != null && !dstParent.equals(srcParent)) {
try {
getObjectMetadata(dstParent);
} catch (FileNotFoundException e) {
//destination parent doesn't exist; bail out
LOG.debug("destination parent directory " + dstParent + " doesn't exist");
throw e;
}
}
boolean destExists = dstMetadata != null;
boolean destIsDir = destExists && SwiftUtils.isDirectory(dstMetadata);
//calculate the destination
SwiftObjectPath destPath;
//enum the child entries and everything underneath
List<FileStatus> childStats = listDirectory(srcObject, true, true);
boolean srcIsFile = !srcMetadata.isDir();
if (srcIsFile) {
// #3 dest does not exist: use dest as name
if (destExists) {
if (destIsDir) {
//outcome #2 -move to subdir of dest
destPath = toObjectPath(new Path(dst, src.getName()));
} else {
//outcome #1 dest it's a file: fail if different
if (!renamingOnToSelf) {
throw new FileAlreadyExistsException("cannot rename a file over one that already exists");
} else {
//is mv self self where self is a file. this becomes a no-op
LOG.debug("Renaming file onto self: no-op => success");
return;
}
}
} else {
//outcome #3 -new entry
destPath = toObjectPath(dst);
}
int childCount = childStats.size();
// ->
if (childCount == 0) {
copyThenDeleteObject(srcObject, destPath);
} else {
//do the copy
SwiftUtils.debug(LOG, "Source file appears to be partitioned." + " copying file and deleting children");
copyObject(srcObject, destPath);
for (FileStatus stat : childStats) {
SwiftUtils.debug(LOG, "Deleting partitioned file %s ", stat);
deleteObject(stat.getPath());
}
swiftRestClient.delete(srcObject);
}
} else {
if (destExists && !destIsDir) {
// #1 destination is a file: fail
throw new FileAlreadyExistsException("the source is a directory, but not the destination");
}
Path targetPath;
if (destExists) {
// #2 destination is a directory: create a new dir under that one
targetPath = new Path(dst, src.getName());
} else {
// #3 destination doesn't exist: create a new dir with that name
targetPath = dst;
}
SwiftObjectPath targetObjectPath = toObjectPath(targetPath);
//final check for any recursive operations
if (srcObject.isEqualToOrParentOf(targetObjectPath)) {
//you can't rename a directory onto itself
throw new SwiftOperationFailedException("cannot move a directory under itself");
}
LOG.info("mv " + srcObject + " " + targetPath);
logDirectory("Directory to copy ", srcObject, childStats);
// iterative copy of everything under the directory.
// by listing all children this can be done iteratively
// rather than recursively -everything in this list is either a file
// or a 0-byte-len file pretending to be a directory.
String srcURI = src.toUri().toString();
int prefixStripCount = srcURI.length() + 1;
for (FileStatus fileStatus : childStats) {
Path copySourcePath = fileStatus.getPath();
String copySourceURI = copySourcePath.toUri().toString();
String copyDestSubPath = copySourceURI.substring(prefixStripCount);
Path copyDestPath = new Path(targetPath, copyDestSubPath);
if (LOG.isTraceEnabled()) {
//trace to debug some low-level rename path problems; retained
//in case they ever come back.
LOG.trace("srcURI=" + srcURI + "; copySourceURI=" + copySourceURI + "; copyDestSubPath=" + copyDestSubPath + "; copyDestPath=" + copyDestPath);
}
SwiftObjectPath copyDestination = toObjectPath(copyDestPath);
try {
copyThenDeleteObject(toObjectPath(copySourcePath), copyDestination);
} catch (FileNotFoundException e) {
LOG.info("Skipping rename of " + copySourcePath);
}
//add a throttle delay
throttle();
}
//now rename self. If missing, create the dest directory and warn
if (!SwiftUtils.isRootDir(srcObject)) {
try {
copyThenDeleteObject(srcObject, targetObjectPath);
} catch (FileNotFoundException e) {
//create the destination directory
LOG.warn("Source directory deleted during rename", e);
innerCreateDirectory(destObject);
}
}
}
}
use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.
the class TestRegistryOperations method testOverwrite.
@Test
public void testOverwrite() throws Throwable {
ServiceRecord written = putExampleServiceEntry(ENTRY_PATH, 0);
ServiceRecord resolved1 = operations.resolve(ENTRY_PATH);
resolved1.description = "resolved1";
try {
operations.bind(ENTRY_PATH, resolved1, 0);
fail("overwrite succeeded when it should have failed");
} catch (FileAlreadyExistsException expected) {
// expected
}
// verify there's no changed
ServiceRecord resolved2 = operations.resolve(ENTRY_PATH);
assertMatches(written, resolved2);
operations.bind(ENTRY_PATH, resolved1, BindFlags.OVERWRITE);
ServiceRecord resolved3 = operations.resolve(ENTRY_PATH);
assertMatches(resolved1, resolved3);
}
use of org.apache.hadoop.fs.FileAlreadyExistsException in project hbase by apache.
the class WALProcedureStore method rollWriter.
private boolean rollWriter(final long logId) throws IOException {
assert logId > flushLogId : "logId=" + logId + " flushLogId=" + flushLogId;
assert lock.isHeldByCurrentThread() : "expected to be the lock owner. " + lock.isLocked();
ProcedureWALHeader header = ProcedureWALHeader.newBuilder().setVersion(ProcedureWALFormat.HEADER_VERSION).setType(ProcedureWALFormat.LOG_TYPE_STREAM).setMinProcId(storeTracker.getActiveMinProcId()).setLogId(logId).build();
FSDataOutputStream newStream = null;
Path newLogFile = null;
long startPos = -1;
newLogFile = getLogFilePath(logId);
try {
newStream = fs.create(newLogFile, false);
} catch (FileAlreadyExistsException e) {
LOG.error("Log file with id=" + logId + " already exists", e);
return false;
} catch (RemoteException re) {
LOG.warn("failed to create log file with id=" + logId, re);
return false;
}
try {
ProcedureWALFormat.writeHeader(newStream, header);
startPos = newStream.getPos();
} catch (IOException ioe) {
LOG.warn("Encountered exception writing header", ioe);
newStream.close();
return false;
}
closeCurrentLogStream();
storeTracker.resetUpdates();
stream = newStream;
flushLogId = logId;
totalSynced.set(0);
long rollTs = System.currentTimeMillis();
lastRollTs.set(rollTs);
logs.add(new ProcedureWALFile(fs, newLogFile, header, startPos, rollTs));
// if it's the first next WAL being added, build the holding cleanup tracker
if (logs.size() == 2) {
buildHoldingCleanupTracker();
} else if (logs.size() > walCountWarnThreshold) {
LOG.warn("procedure WALs count=" + logs.size() + " above the warning threshold " + walCountWarnThreshold + ". check running procedures to see if something is stuck.");
}
if (LOG.isDebugEnabled()) {
LOG.debug("Roll new state log: " + logId);
}
return true;
}
use of org.apache.hadoop.fs.FileAlreadyExistsException in project lucene-solr by apache.
the class HdfsLockFactory method obtainLock.
@Override
public Lock obtainLock(Directory dir, String lockName) throws IOException {
if (!(dir instanceof HdfsDirectory)) {
throw new UnsupportedOperationException("HdfsLockFactory can only be used with HdfsDirectory subclasses, got: " + dir);
}
final HdfsDirectory hdfsDir = (HdfsDirectory) dir;
final Configuration conf = hdfsDir.getConfiguration();
final Path lockPath = hdfsDir.getHdfsDirPath();
final Path lockFile = new Path(lockPath, lockName);
FSDataOutputStream file = null;
final FileSystem fs = FileSystem.get(lockPath.toUri(), conf);
while (true) {
try {
if (!fs.exists(lockPath)) {
boolean success = fs.mkdirs(lockPath);
if (!success) {
throw new RuntimeException("Could not create directory: " + lockPath);
}
} else {
// just to check for safe mode
fs.mkdirs(lockPath);
}
file = fs.create(lockFile, false);
break;
} catch (FileAlreadyExistsException e) {
throw new LockObtainFailedException("Cannot obtain lock file: " + lockFile, e);
} catch (RemoteException e) {
if (e.getClassName().equals("org.apache.hadoop.hdfs.server.namenode.SafeModeException")) {
log.warn("The NameNode is in SafeMode - Solr will wait 5 seconds and try again.");
try {
Thread.sleep(5000);
} catch (InterruptedException e1) {
Thread.interrupted();
}
continue;
}
throw new LockObtainFailedException("Cannot obtain lock file: " + lockFile, e);
} catch (IOException e) {
throw new LockObtainFailedException("Cannot obtain lock file: " + lockFile, e);
} finally {
IOUtils.closeQuietly(file);
}
}
return new HdfsLock(conf, lockFile);
}
Aggregations