use of org.sonatype.nexus.blobstore.api.BlobStoreException in project nexus-blobstore-google-cloud by sonatype-nexus-community.
the class MultipartUploader method parallelUpload.
/**
* @param storage an initialized {@link Storage} instance
* @param bucket the name of the bucket
* @param destination the the destination (relative to the bucket)
* @param contents the stream of data to store
* @return the successfully stored {@link Blob}
* @throws BlobStoreException if any part of the upload failed
*/
Blob parallelUpload(final Storage storage, final String bucket, final String destination, final InputStream contents) {
log.debug("Starting parallel multipart upload for destination {} in bucket {}", destination, bucket);
// this must represent the bucket-relative paths to the chunks, in order of composition
List<String> chunkNames = new ArrayList<>();
Optional<Blob> singleChunk = Optional.empty();
try (InputStream current = contents) {
List<ListenableFuture<Blob>> chunkFutures = new ArrayList<>();
// MUST respect hard limit of 32 chunks per compose request
for (int partNumber = 1; partNumber <= COMPOSE_REQUEST_LIMIT; partNumber++) {
final byte[] chunk;
if (partNumber < COMPOSE_REQUEST_LIMIT) {
chunk = readChunk(current);
} else {
// we've hit compose request limit
composeLimitHitCounter.inc();
chunk = EMPTY;
log.debug("Upload for {} has hit Google Cloud Storage multipart-compose limit ({} total times limit hit)", destination, getNumberOfTimesComposeLimitHit());
final String finalChunkName = toChunkName(destination, COMPOSE_REQUEST_LIMIT);
chunkNames.add(finalChunkName);
chunkFutures.add(executorService.submit(() -> {
log.debug("Uploading final chunk {} for {} of unknown remaining bytes", COMPOSE_REQUEST_LIMIT, destination);
BlobInfo blobInfo = BlobInfo.newBuilder(bucket, finalChunkName).build();
// downside here is that since we don't know the stream size, we can't chunk it.
return storage.create(blobInfo, current, BlobWriteOption.disableGzipContent());
}));
}
if (chunk == EMPTY && partNumber > 1) {
break;
}
final String chunkName = toChunkName(destination, partNumber);
chunkNames.add(chunkName);
if (partNumber == 1) {
// upload the first part on the current thread
BlobInfo blobInfo = BlobInfo.newBuilder(bucket, chunkName).build();
Blob blob = storage.create(blobInfo, chunk, BlobTargetOption.disableGzipContent());
singleChunk = Optional.of(blob);
} else {
singleChunk = Optional.empty();
// 2nd through N chunks will happen off current thread in parallel
final int chunkIndex = partNumber;
chunkFutures.add(executorService.submit(() -> {
log.debug("Uploading chunk {} for {} of {} bytes", chunkIndex, destination, chunk.length);
BlobInfo blobInfo = BlobInfo.newBuilder(bucket, chunkName).build();
return storage.create(blobInfo, chunk, BlobTargetOption.disableGzipContent());
}));
}
}
// return the single result if it exists; otherwise finalize the parallel multipart workers
return singleChunk.orElseGet(() -> {
CountDownLatch block = new CountDownLatch(1);
Futures.whenAllComplete(chunkFutures).run(() -> block.countDown(), MoreExecutors.directExecutor());
// wait for all the futures to complete
log.debug("waiting for {} remaining chunks to complete", chunkFutures.size());
try {
block.await();
} catch (InterruptedException e) {
log.error("caught InterruptedException waiting for multipart upload to complete on {}", destination);
throw new RuntimeException(e);
}
log.debug("chunk uploads completed, sending compose request");
// finalize with compose request to coalesce the chunks
Blob finalBlob = storage.compose(ComposeRequest.of(bucket, chunkNames, destination));
log.debug("Parallel multipart upload of {} complete", destination);
return finalBlob;
});
} catch (Exception e) {
throw new BlobStoreException("Error uploading blob", e, null);
} finally {
numberOfChunks.update(chunkNames.size());
// remove any .chunkN files off-thread
// make sure not to delete the first chunk (which has the desired destination name with no suffix)
deferredCleanup(storage, bucket, chunkNames);
}
}
use of org.sonatype.nexus.blobstore.api.BlobStoreException in project nexus-public by sonatype.
the class DatastoreDeadBlobFinderTest method anAssetBlobCanBeDeletedWhileTheSystemIsInspected.
@Test
public void anAssetBlobCanBeDeletedWhileTheSystemIsInspected() {
AssetBlob missingAssetBlob = mockAssetBlob(mock(AssetBlob.class));
// first pass we have a missing blobRef
when(asset.blob()).thenReturn(Optional.of(missingAssetBlob));
FluentAsset reloadedAsset = createAsset(assetBlob);
// second pass the blobRef is there but file does not exist
Blob reloadedBlob = mock(Blob.class);
when(reloadedBlob.getMetrics()).thenReturn(blobMetrics);
BlobId missingBlobId = reloadedAsset.blob().get().blobRef().getBlobId();
when(blobStore.get(missingBlobId)).thenReturn(reloadedBlob);
mockAssetBrowse();
mockAssetReload(reloadedAsset);
when(reloadedBlob.getMetrics()).thenReturn(blobMetrics);
when(reloadedBlob.getInputStream()).thenThrow(new BlobStoreException("Blob has been deleted", new BlobId("foo")));
List<DeadBlobResult<Asset>> result = deadBlobFinder.find(repository, true);
assertThat(result, hasSize(1));
assertThat(result.get(0).getResultState(), is(DELETED));
}
use of org.sonatype.nexus.blobstore.api.BlobStoreException in project nexus-public by sonatype.
the class BlobStoreConfigurationData method copy.
@Override
public BlobStoreConfiguration copy(final String name) {
BlobStoreConfigurationData clone = new BlobStoreConfigurationData();
// don't copy entity id
clone.setName(name);
clone.setType(getType());
if (attributes != null && attributes.size() > 0) {
String attribsJson;
try {
attribsJson = MAPPER.writer().writeValueAsString(getAttributes());
} catch (JsonProcessingException e) {
throw new BlobStoreException("failed to marshal blob store configuration attributes to JSON", e, null);
}
Map<String, Map<String, Object>> clonedAttributes;
try {
clonedAttributes = MAPPER.readValue(attribsJson, new TypeReference<Map<String, Map<String, Object>>>() {
});
} catch (IOException e) {
throw new BlobStoreException("failed to parse blob store configuration attributes from JSON", e, null);
}
clone.setAttributes(clonedAttributes);
}
return clone;
}
use of org.sonatype.nexus.blobstore.api.BlobStoreException in project nexus-public by sonatype.
the class OrientBlobStoreConfiguration method copy.
@Override
public BlobStoreConfiguration copy(String name) {
OrientBlobStoreConfiguration clone = new OrientBlobStoreConfiguration();
clone.setName(name);
clone.setType(getType());
if (attributes != null && attributes.size() > 0) {
String attribsJson;
try {
attribsJson = MAPPER.writer().writeValueAsString(getAttributes());
} catch (JsonProcessingException e) {
throw new BlobStoreException("failed to marshal blob store configuration attributes to JSON", e, null);
}
Map<String, Map<String, Object>> clonedAttributes;
try {
clonedAttributes = MAPPER.readValue(attribsJson, new TypeReference<Map<String, Map<String, Object>>>() {
});
} catch (IOException e) {
throw new BlobStoreException("failed to parse blob store configuration attributes from JSON", e, null);
}
clone.setAttributes(clonedAttributes);
}
return clone;
}
use of org.sonatype.nexus.blobstore.api.BlobStoreException in project nexus-public by sonatype.
the class BlobStoreGroup method copy.
@Override
@Guarded(by = STARTED)
public Blob copy(final BlobId blobId, final Map<String, String> headers) {
BlobStore target = locate(blobId).orElseThrow(() -> new BlobStoreException("Unable to find blob", blobId));
Blob blob = target.copy(blobId, headers);
locatedBlobs.put(blob.getId(), target.getBlobStoreConfiguration().getName());
return blob;
}
Aggregations