use of java.nio.file.FileAlreadyExistsException in project bnd by bndtools.
the class IO method copy.
public static Path copy(Path src, Path tgt) throws IOException {
final Path source = src.toAbsolutePath();
final Path target = tgt.toAbsolutePath();
if (Files.isRegularFile(source)) {
Files.copy(source, target, StandardCopyOption.REPLACE_EXISTING);
return tgt;
}
if (Files.isDirectory(source)) {
if (Files.notExists(target)) {
mkdirs(target);
}
if (!Files.isDirectory(target))
throw new IllegalArgumentException("target directory for a directory must be a directory: " + target);
if (target.startsWith(source))
throw new IllegalArgumentException("target directory can not be child of source directory.");
Files.walkFileTree(source, EnumSet.of(FileVisitOption.FOLLOW_LINKS), Integer.MAX_VALUE, new FileVisitor<Path>() {
final FileTime now = FileTime.fromMillis(System.currentTimeMillis());
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
Path targetdir = target.resolve(source.relativize(dir));
try {
Files.copy(dir, targetdir);
} catch (FileAlreadyExistsException e) {
if (!Files.isDirectory(targetdir))
throw e;
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
Path targetFile = target.resolve(source.relativize(file));
Files.copy(file, targetFile, StandardCopyOption.REPLACE_EXISTING);
Files.setLastModifiedTime(targetFile, now);
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
if (exc != null) {
// directory iteration failed
throw exc;
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
if (exc != null) {
throw exc;
}
return FileVisitResult.CONTINUE;
}
});
return tgt;
}
throw new FileNotFoundException("During copy: " + source.toString());
}
use of java.nio.file.FileAlreadyExistsException in project cdap by caskdata.
the class AbstractStorageProviderNamespaceAdmin method createLocation.
private void createLocation(NamespaceMeta namespaceMeta) throws IOException {
NamespaceId namespaceId = namespaceMeta.getNamespaceId();
boolean createdHome = false;
Location namespaceHome;
if (hasCustomLocation(namespaceMeta)) {
namespaceHome = validateCustomLocation(namespaceMeta);
} else {
// no namespace custom location was provided one must be created by cdap
namespaceHome = namespacedLocationFactory.get(namespaceMeta);
if (namespaceHome.exists()) {
throw new FileAlreadyExistsException(null, null, String.format("HDFS directory '%s' for '%s' already exists.", namespaceHome, namespaceId));
}
createdHome = createNamespaceDir(namespaceHome, "home", namespaceId);
}
// data/
Location dataLoc = namespaceHome.append(Constants.Dataset.DEFAULT_DATA_DIR);
// tmp/
Location tempLoc = namespaceHome.append(cConf.get(Constants.AppFabric.TEMP_DIR));
// streams/
Location streamsLoc = namespaceHome.append(cConf.get(Constants.Stream.BASE_DIR));
// streams/.deleted/
Location deletedLoc = streamsLoc.append(StreamUtils.DELETED);
String configuredGroupName = namespaceMeta.getConfig().getGroupName();
boolean createdData = false;
boolean createdTemp = false;
boolean createdStreams = false;
try {
if (createdHome && SecurityUtil.isKerberosEnabled(cConf)) {
// set the group id of the namespace home if configured, or the current user's primary group
String groupToSet = configuredGroupName;
if (groupToSet == null) {
// attempt to determine the current user's primary group. Note that we cannot use ugi.getPrimaryGroup()
// because that is not implemented at least in Hadoop 2.0 and 2.2, possibly other versions. Also note
// that there is no guarantee that getGroupNames() returns anything.
String[] groups = UserGroupInformation.getCurrentUser().getGroupNames();
if (groups != null && groups.length > 0) {
groupToSet = groups[0];
}
}
// if this is still null at this point, then the directory will have whatever HDFS assigned at creation
if (groupToSet != null) {
namespaceHome.setGroup(groupToSet);
}
}
// create all the directories with default permissions
createdData = createNamespaceDir(dataLoc, "data", namespaceId);
createdTemp = createNamespaceDir(tempLoc, "temp", namespaceId);
createdStreams = createNamespaceDir(streamsLoc, "streams", namespaceId);
createNamespaceDir(deletedLoc, "deleted streams", namespaceId);
// if a group name is configured, then that group; otherwise the same group as the namespace home dir
if (SecurityUtil.isKerberosEnabled(cConf)) {
String groupToSet = configuredGroupName != null ? configuredGroupName : namespaceHome.getGroup();
for (Location loc : new Location[] { dataLoc, tempLoc, streamsLoc, deletedLoc }) {
loc.setGroup(groupToSet);
// set the permissions to rwx for group, if a group name was configured for the namespace
if (configuredGroupName != null) {
String permissions = loc.getPermissions();
loc.setPermissions(permissions.substring(0, 3) + "rwx" + permissions.substring(6));
}
}
}
} catch (Throwable t) {
if (createdHome) {
deleteDirSilently(namespaceHome, t, "home", namespaceMeta.getNamespaceId());
} else {
if (createdData) {
deleteDirSilently(dataLoc, t, "data", namespaceMeta.getNamespaceId());
}
if (createdTemp) {
deleteDirSilently(tempLoc, t, "temp", namespaceMeta.getNamespaceId());
}
if (createdStreams) {
deleteDirSilently(streamsLoc, t, "streams", namespaceMeta.getNamespaceId());
}
}
throw t;
}
}
use of java.nio.file.FileAlreadyExistsException in project druid by druid-io.
the class LocalDataSegmentPusher method push.
@Override
public DataSegment push(File dataSegmentFile, DataSegment segment) throws IOException {
final String storageDir = DataSegmentPusherUtil.getStorageDir(segment);
final File baseStorageDir = config.getStorageDirectory();
final File outDir = new File(baseStorageDir, storageDir);
log.info("Copying segment[%s] to local filesystem at location[%s]", segment.getIdentifier(), outDir.toString());
if (dataSegmentFile.equals(outDir)) {
long size = 0;
for (File file : dataSegmentFile.listFiles()) {
size += file.length();
}
return createDescriptorFile(segment.withLoadSpec(makeLoadSpec(outDir)).withSize(size).withBinaryVersion(SegmentUtils.getVersionFromDir(dataSegmentFile)), outDir);
}
final File tmpOutDir = new File(baseStorageDir, intermediateDirFor(storageDir));
log.info("Creating intermediate directory[%s] for segment[%s]", tmpOutDir.toString(), segment.getIdentifier());
final long size = compressSegment(dataSegmentFile, tmpOutDir);
final DataSegment dataSegment = createDescriptorFile(segment.withLoadSpec(makeLoadSpec(new File(outDir, "index.zip"))).withSize(size).withBinaryVersion(SegmentUtils.getVersionFromDir(dataSegmentFile)), tmpOutDir);
// moving the temporary directory to the final destination, once success the potentially concurrent push operations
// will be failed and will read the descriptor.json created by current push operation directly
FileUtils.forceMkdir(outDir.getParentFile());
try {
java.nio.file.Files.move(tmpOutDir.toPath(), outDir.toPath());
} catch (FileAlreadyExistsException e) {
log.warn("Push destination directory[%s] exists, ignore this message if replication is configured.", outDir);
FileUtils.deleteDirectory(tmpOutDir);
return jsonMapper.readValue(new File(outDir, "descriptor.json"), DataSegment.class);
}
return dataSegment;
}
use of java.nio.file.FileAlreadyExistsException in project elasticsearch by elastic.
the class StreamOutput method writeException.
public void writeException(Throwable throwable) throws IOException {
if (throwable == null) {
writeBoolean(false);
} else {
writeBoolean(true);
boolean writeCause = true;
boolean writeMessage = true;
if (throwable instanceof CorruptIndexException) {
writeVInt(1);
writeOptionalString(((CorruptIndexException) throwable).getOriginalMessage());
writeOptionalString(((CorruptIndexException) throwable).getResourceDescription());
writeMessage = false;
} else if (throwable instanceof IndexFormatTooNewException) {
writeVInt(2);
writeOptionalString(((IndexFormatTooNewException) throwable).getResourceDescription());
writeInt(((IndexFormatTooNewException) throwable).getVersion());
writeInt(((IndexFormatTooNewException) throwable).getMinVersion());
writeInt(((IndexFormatTooNewException) throwable).getMaxVersion());
writeMessage = false;
writeCause = false;
} else if (throwable instanceof IndexFormatTooOldException) {
writeVInt(3);
IndexFormatTooOldException t = (IndexFormatTooOldException) throwable;
writeOptionalString(t.getResourceDescription());
if (t.getVersion() == null) {
writeBoolean(false);
writeOptionalString(t.getReason());
} else {
writeBoolean(true);
writeInt(t.getVersion());
writeInt(t.getMinVersion());
writeInt(t.getMaxVersion());
}
writeMessage = false;
writeCause = false;
} else if (throwable instanceof NullPointerException) {
writeVInt(4);
writeCause = false;
} else if (throwable instanceof NumberFormatException) {
writeVInt(5);
writeCause = false;
} else if (throwable instanceof IllegalArgumentException) {
writeVInt(6);
} else if (throwable instanceof AlreadyClosedException) {
writeVInt(7);
} else if (throwable instanceof EOFException) {
writeVInt(8);
writeCause = false;
} else if (throwable instanceof SecurityException) {
writeVInt(9);
} else if (throwable instanceof StringIndexOutOfBoundsException) {
writeVInt(10);
writeCause = false;
} else if (throwable instanceof ArrayIndexOutOfBoundsException) {
writeVInt(11);
writeCause = false;
} else if (throwable instanceof FileNotFoundException) {
writeVInt(12);
writeCause = false;
} else if (throwable instanceof FileSystemException) {
writeVInt(13);
if (throwable instanceof NoSuchFileException) {
writeVInt(0);
} else if (throwable instanceof NotDirectoryException) {
writeVInt(1);
} else if (throwable instanceof DirectoryNotEmptyException) {
writeVInt(2);
} else if (throwable instanceof AtomicMoveNotSupportedException) {
writeVInt(3);
} else if (throwable instanceof FileAlreadyExistsException) {
writeVInt(4);
} else if (throwable instanceof AccessDeniedException) {
writeVInt(5);
} else if (throwable instanceof FileSystemLoopException) {
writeVInt(6);
} else {
writeVInt(7);
}
writeOptionalString(((FileSystemException) throwable).getFile());
writeOptionalString(((FileSystemException) throwable).getOtherFile());
writeOptionalString(((FileSystemException) throwable).getReason());
writeCause = false;
} else if (throwable instanceof IllegalStateException) {
writeVInt(14);
} else if (throwable instanceof LockObtainFailedException) {
writeVInt(15);
} else if (throwable instanceof InterruptedException) {
writeVInt(16);
writeCause = false;
} else if (throwable instanceof IOException) {
writeVInt(17);
} else {
ElasticsearchException ex;
if (throwable instanceof ElasticsearchException && ElasticsearchException.isRegistered(throwable.getClass(), version)) {
ex = (ElasticsearchException) throwable;
} else {
ex = new NotSerializableExceptionWrapper(throwable);
}
writeVInt(0);
writeVInt(ElasticsearchException.getId(ex.getClass()));
ex.writeTo(this);
return;
}
if (writeMessage) {
writeOptionalString(throwable.getMessage());
}
if (writeCause) {
writeException(throwable.getCause());
}
ElasticsearchException.writeStackTraces(throwable, this);
}
}
use of java.nio.file.FileAlreadyExistsException in project vert.x by eclipse.
the class FileResolver method unpackFromFileURL.
private synchronized File unpackFromFileURL(URL url, String fileName, ClassLoader cl) {
File resource;
try {
resource = new File(URLDecoder.decode(url.getPath(), "UTF-8"));
} catch (UnsupportedEncodingException e) {
throw new VertxException(e);
}
boolean isDirectory = resource.isDirectory();
File cacheFile = new File(cacheDir, fileName);
if (!isDirectory) {
cacheFile.getParentFile().mkdirs();
try {
if (ENABLE_CACHING) {
Files.copy(resource.toPath(), cacheFile.toPath());
} else {
Files.copy(resource.toPath(), cacheFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
}
} catch (FileAlreadyExistsException ignore) {
} catch (IOException e) {
throw new VertxException(e);
}
} else {
cacheFile.mkdirs();
String[] listing = resource.list();
for (String file : listing) {
String subResource = fileName + "/" + file;
URL url2 = cl.getResource(subResource);
unpackFromFileURL(url2, subResource, cl);
}
}
return cacheFile;
}
Aggregations