use of java.nio.file.NoSuchFileException in project gatk by broadinstitute.
the class ParallelCopyGCSDirectoryIntoHDFSSpark method runTool.
@Override
protected void runTool(final JavaSparkContext ctx) {
if (!BucketUtils.isCloudStorageUrl(inputGCSPath)) {
throw new UserException("Input path " + inputGCSPath + " is not a GCS URI");
}
if (!BucketUtils.isHadoopUrl(outputHDFSDirectory)) {
throw new UserException("Output directory " + outputHDFSDirectory + " is not an HDFS URI");
}
final String inputGCSPathFinal = inputGCSPath;
final String outputDirectoryFinal = outputHDFSDirectory;
org.apache.hadoop.fs.Path outputHdfsDirectoryPath = new org.apache.hadoop.fs.Path(outputHDFSDirectory);
try (FileSystem fs = outputHdfsDirectoryPath.getFileSystem(new Configuration())) {
if (fs.exists(outputHdfsDirectoryPath)) {
throw new UserException("Specified output directory " + outputHdfsDirectoryPath + " already exists. Please specify a new directory name.");
}
fs.mkdirs(outputHdfsDirectoryPath);
final long chunkSize = getChunkSize(fs);
final List<Path> gcsNIOPaths = getGCSFilePathsToCopy(inputGCSPathFinal);
List<Tuple2<String, Integer>> chunkList = setupChunks(chunkSize, gcsNIOPaths);
if (chunkList.size() == 0) {
logger.info("no files found to copy");
return;
}
final JavaPairRDD<String, Integer> chunkRDD = ctx.parallelizePairs(chunkList, chunkList.size());
final JavaPairRDD<String, Tuple2<Integer, String>> chunkMappingRDD = chunkRDD.mapToPair(p -> new Tuple2<>(p._1(), readChunkToHdfs(p._1(), chunkSize, p._2(), outputDirectoryFinal)));
final Map<String, Iterable<Tuple2<Integer, String>>> chunksByFilePath = chunkMappingRDD.groupByKey().collectAsMap();
concatenateChunks(outputDirectoryFinal, fs, gcsNIOPaths, chunksByFilePath);
} catch (NoSuchFileException e) {
throw new UserException("Could not locate input path " + e.getFile() + ". If you are trying to copy an entire directory, please include a trailing slash on your path.");
} catch (IOException e) {
throw new GATKException(e.getMessage(), e);
}
}
use of java.nio.file.NoSuchFileException in project google-cloud-java by GoogleCloudPlatform.
the class ITGcsNio method testWriteOnClose.
@Test
public void testWriteOnClose() throws Exception {
CloudStorageFileSystem testBucket = getTestBucket();
Path path = testBucket.getPath(PREFIX + randomSuffix());
// file shouldn't exist initially (see above)
assertThat(Files.exists(path)).isFalse();
try {
long expectedSize = 0;
try (SeekableByteChannel chan = Files.newByteChannel(path, StandardOpenOption.WRITE)) {
// writing lots of contents to defeat channel-internal buffering.
for (String s : FILE_CONTENTS) {
byte[] sBytes = s.getBytes(UTF_8);
expectedSize += sBytes.length * 9999;
for (int i = 0; i < 9999; i++) {
chan.write(ByteBuffer.wrap(sBytes));
}
}
try {
Files.size(path);
// we shouldn't make it to this line. Not using thrown.expect because
// I still want to run a few lines after the exception.
Assert.fail("Files.size should have thrown an exception");
} catch (NoSuchFileException nsf) {
// that's what we wanted, we're good.
}
}
// channel now closed, the file should be there and with the new contents.
assertThat(Files.exists(path)).isTrue();
assertThat(Files.size(path)).isEqualTo(expectedSize);
} finally {
Files.deleteIfExists(path);
}
}
use of java.nio.file.NoSuchFileException in project jabref by JabRef.
the class CitationStyle method createCitationStyleFromFile.
/**
* Loads the CitationStyle from the given file
*/
public static CitationStyle createCitationStyleFromFile(final String styleFile) {
if (!isCitationStyleFile(styleFile)) {
LOGGER.error("Can only load style files: " + styleFile);
return null;
}
try {
String text;
String internalFile = (styleFile.startsWith("/") ? "" : "/") + styleFile;
URL url = CitationStyle.class.getResource(internalFile);
if (url != null) {
text = CSLUtils.readURLToString(url, StandardCharsets.UTF_8.toString());
} else {
// if the url is null then the style is located outside the classpath
text = new String(Files.readAllBytes(Paths.get(styleFile)), StandardCharsets.UTF_8);
}
return createCitationStyleFromSource(text, styleFile);
} catch (NoSuchFileException e) {
LOGGER.error("Could not find file: " + styleFile, e);
} catch (IOException e) {
LOGGER.error("Error reading source file", e);
}
return null;
}
use of java.nio.file.NoSuchFileException in project Terasology by MovingBlocks.
the class AbstractEditorScreen method loadAutosave.
/**
* Resets the editor based on the state of the autosave file.
*/
protected void loadAutosave() {
if (!disableAutosave) {
try (JsonReader reader = new JsonReader(new InputStreamReader(Files.newInputStream(getAutosaveFile())))) {
reader.setLenient(true);
String autosaveString = new JsonParser().parse(reader).toString();
JsonObject autosaveObject = new JsonParser().parse(autosaveString).getAsJsonObject();
String selectedAsset = autosaveObject.get("selectedAsset").getAsString();
setSelectedAsset(selectedAsset);
try {
ResourceUrn urn = new ResourceUrn(selectedAsset);
setSelectedAssetPath(urn);
} catch (InvalidUrnException ignored) {
}
JsonTree editorContents = JsonTreeConverter.serialize(autosaveObject.get("editorContents"));
resetState(editorContents);
setUnsavedChangesPresent(true);
} catch (NoSuchFileException ignored) {
} catch (IOException e) {
logger.warn("Could not load autosaved info", e);
}
}
}
use of java.nio.file.NoSuchFileException in project Singularity by HubSpot.
the class SingularityS3UploaderDriver method checkUploads.
private int checkUploads() {
if (metadataToUploader.isEmpty() && metadataToImmediateUploader.isEmpty()) {
return 0;
}
int totesUploads = 0;
// Check results of immediate uploaders
List<S3UploadMetadata> toRetry = new ArrayList<>();
List<S3UploadMetadata> toRemove = new ArrayList<>();
for (Map.Entry<S3UploadMetadata, CompletableFuture<Integer>> entry : immediateUploadersFutures.entrySet()) {
SingularityUploader uploader = metadataToImmediateUploader.get(entry.getKey());
if (uploader == null) {
toRemove.add(entry.getKey());
continue;
}
try {
int uploadedFiles = entry.getValue().get();
List<Path> remainingFiles = uploader.filesToUpload(isFinished(uploader));
if (!remainingFiles.isEmpty() || uploadedFiles == -1) {
LOG.debug("Immediate uploader had {} remaining files, previously uploaded {}, will retry", remainingFiles.size(), uploadedFiles);
toRetry.add(entry.getKey());
} else {
totesUploads += uploadedFiles;
toRemove.add(entry.getKey());
}
} catch (Throwable t) {
metrics.error();
LOG.error("Waiting on future", t);
exceptionNotifier.notify(String.format("Error waiting on uploader future (%s)", t.getMessage()), t, ImmutableMap.of("metadataPath", uploader.getMetadataPath().toString()));
toRetry.add(entry.getKey());
}
}
for (S3UploadMetadata uploaderMetadata : toRemove) {
metrics.getImmediateUploaderCounter().dec();
SingularityUploader uploader = metadataToImmediateUploader.remove(uploaderMetadata);
CompletableFuture<Integer> uploaderFuture = immediateUploadersFutures.remove(uploaderMetadata);
if (uploaderFuture != null) {
try {
// All uploaders reaching this point should already be finished, if it isn't done in 30s, it's stuck
uploaderFuture.get(30, TimeUnit.SECONDS);
} catch (Throwable t) {
LOG.error("Exception waiting for immediate uploader to complete for metadata {}", uploaderMetadata, t);
}
}
if (uploader == null) {
continue;
}
expiring.remove(uploader);
try {
LOG.debug("Deleting finished immediate uploader {}", uploader.getMetadataPath());
Files.delete(uploader.getMetadataPath());
} catch (NoSuchFileException nfe) {
LOG.warn("File {} was already deleted", nfe.getFile());
} catch (IOException e) {
LOG.warn("Couldn't delete {}", uploader.getMetadataPath(), e);
exceptionNotifier.notify("Could not delete metadata file", e, ImmutableMap.of("metadataPath", uploader.getMetadataPath().toString()));
}
}
for (S3UploadMetadata uploaderMetadata : toRetry) {
SingularityUploader uploader = metadataToImmediateUploader.get(uploaderMetadata);
if (uploader != null) {
LOG.debug("Retrying immediate uploader {}", uploaderMetadata);
performImmediateUpload(uploader);
} else {
LOG.debug("Uploader for metadata {} not found to retry upload", uploaderMetadata);
}
}
// Check regular uploaders
int initialExpectedSize = Math.max(metadataToUploader.size(), 1);
final Map<SingularityUploader, CompletableFuture<Integer>> futures = Maps.newHashMapWithExpectedSize(initialExpectedSize);
final Map<SingularityUploader, Boolean> finishing = Maps.newHashMapWithExpectedSize(initialExpectedSize);
for (final SingularityUploader uploader : metadataToUploader.values()) {
final boolean isFinished = isFinished(uploader);
// do this here so we run at least once with isFinished = true
finishing.put(uploader, isFinished);
futures.put(uploader, CompletableFuture.supplyAsync(performUploadSupplier(uploader, isFinished, false), executorService));
}
LOG.info("Waiting on {} future(s)", futures.size());
final long now = System.currentTimeMillis();
final Set<SingularityUploader> expiredUploaders = Sets.newHashSetWithExpectedSize(initialExpectedSize);
for (Entry<SingularityUploader, CompletableFuture<Integer>> uploaderToFuture : futures.entrySet()) {
final SingularityUploader uploader = uploaderToFuture.getKey();
try {
final int foundFiles = uploaderToFuture.getValue().get();
final boolean isFinished = finishing.get(uploader);
if (foundFiles == 0 && shouldExpire(uploader, isFinished)) {
LOG.info("Expiring {}", uploader);
expiredUploaders.add(uploader);
} else {
LOG.trace("Updating uploader {} last expire time", uploader);
uploaderLastHadFilesAt.put(uploader, now);
}
totesUploads += foundFiles;
} catch (Throwable t) {
metrics.error();
LOG.error("Waiting on future", t);
exceptionNotifier.notify(String.format("Error waiting on uploader future (%s)", t.getMessage()), t, ImmutableMap.of("metadataPath", uploader.getMetadataPath().toString()));
}
}
for (SingularityUploader expiredUploader : expiredUploaders) {
metrics.getUploaderCounter().dec();
metadataToUploader.remove(expiredUploader.getUploadMetadata());
uploaderLastHadFilesAt.remove(expiredUploader);
expiring.remove(expiredUploader);
try {
LOG.debug("Deleting expired uploader {}", expiredUploader.getMetadataPath());
Files.delete(expiredUploader.getMetadataPath());
} catch (NoSuchFileException nfe) {
LOG.warn("File {} was already deleted", nfe.getFile());
} catch (IOException e) {
LOG.warn("Couldn't delete {}", expiredUploader.getMetadataPath(), e);
exceptionNotifier.notify("Could not delete metadata file", e, ImmutableMap.of("metadataPath", expiredUploader.getMetadataPath().toString()));
}
}
return totesUploads;
}
Aggregations