use of org.opensearch.common.blobstore.BlobPath in project OpenSearch by opensearch-project.
the class HdfsBlobStoreContainerTests method testReadOnly.
public void testReadOnly() throws Exception {
FileContext fileContext = createTestContext();
// Constructor will not create dir if read only
HdfsBlobStore hdfsBlobStore = new HdfsBlobStore(fileContext, "dir", 1024, true);
FileContext.Util util = fileContext.util();
Path root = fileContext.makeQualified(new Path("dir"));
assertFalse(util.exists(root));
BlobPath blobPath = BlobPath.cleanPath().add("path");
// blobContainer() will not create path if read only
hdfsBlobStore.blobContainer(blobPath);
Path hdfsPath = root;
for (String p : blobPath) {
hdfsPath = new Path(hdfsPath, p);
}
assertFalse(util.exists(hdfsPath));
// if not read only, directory will be created
hdfsBlobStore = new HdfsBlobStore(fileContext, "dir", 1024, false);
assertTrue(util.exists(root));
BlobContainer container = hdfsBlobStore.blobContainer(blobPath);
assertTrue(util.exists(hdfsPath));
byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
writeBlob(container, "foo", new BytesArray(data), randomBoolean());
assertArrayEquals(readBlobFully(container, "foo", data.length), data);
assertTrue(container.blobExists("foo"));
}
use of org.opensearch.common.blobstore.BlobPath in project OpenSearch by opensearch-project.
the class FsBlobStoreRepositoryIT method testReadOnly.
public void testReadOnly() throws Exception {
Path tempDir = createTempDir();
Path path = tempDir.resolve("bar");
try (FsBlobStore store = new FsBlobStore(randomIntBetween(1, 8) * 1024, path, true)) {
assertFalse(Files.exists(path));
BlobPath blobPath = BlobPath.cleanPath().add("foo");
store.blobContainer(blobPath);
Path storePath = store.path();
for (String d : blobPath) {
storePath = storePath.resolve(d);
}
assertFalse(Files.exists(storePath));
}
try (FsBlobStore store = new FsBlobStore(randomIntBetween(1, 8) * 1024, path, false)) {
assertTrue(Files.exists(path));
BlobPath blobPath = BlobPath.cleanPath().add("foo");
BlobContainer container = store.blobContainer(blobPath);
Path storePath = store.path();
for (String d : blobPath) {
storePath = storePath.resolve(d);
}
assertTrue(Files.exists(storePath));
assertTrue(Files.isDirectory(storePath));
byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
writeBlob(container, "test", new BytesArray(data));
assertArrayEquals(readBlobFully(container, "test", data.length), data);
assertTrue(container.blobExists("test"));
}
}
use of org.opensearch.common.blobstore.BlobPath in project OpenSearch by opensearch-project.
the class S3BlobStoreContainerTests method testExecuteSingleUpload.
public void testExecuteSingleUpload() throws IOException {
final String bucketName = randomAlphaOfLengthBetween(1, 10);
final String blobName = randomAlphaOfLengthBetween(1, 10);
final BlobPath blobPath = new BlobPath();
if (randomBoolean()) {
IntStream.of(randomIntBetween(1, 5)).forEach(value -> blobPath.add("path_" + value));
}
final int bufferSize = randomIntBetween(1024, 2048);
final int blobSize = randomIntBetween(0, bufferSize);
final S3BlobStore blobStore = mock(S3BlobStore.class);
when(blobStore.bucket()).thenReturn(bucketName);
when(blobStore.bufferSizeInBytes()).thenReturn((long) bufferSize);
final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore);
final boolean serverSideEncryption = randomBoolean();
when(blobStore.serverSideEncryption()).thenReturn(serverSideEncryption);
final StorageClass storageClass = randomFrom(StorageClass.values());
when(blobStore.getStorageClass()).thenReturn(storageClass);
final CannedAccessControlList cannedAccessControlList = randomBoolean() ? randomFrom(CannedAccessControlList.values()) : null;
if (cannedAccessControlList != null) {
when(blobStore.getCannedACL()).thenReturn(cannedAccessControlList);
}
final AmazonS3 client = mock(AmazonS3.class);
final AmazonS3Reference clientReference = new AmazonS3Reference(client);
when(blobStore.clientReference()).thenReturn(clientReference);
final ArgumentCaptor<PutObjectRequest> argumentCaptor = ArgumentCaptor.forClass(PutObjectRequest.class);
when(client.putObject(argumentCaptor.capture())).thenReturn(new PutObjectResult());
final ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[blobSize]);
blobContainer.executeSingleUpload(blobStore, blobName, inputStream, blobSize);
final PutObjectRequest request = argumentCaptor.getValue();
assertEquals(bucketName, request.getBucketName());
assertEquals(blobPath.buildAsString() + blobName, request.getKey());
assertEquals(inputStream, request.getInputStream());
assertEquals(blobSize, request.getMetadata().getContentLength());
assertEquals(storageClass.toString(), request.getStorageClass());
assertEquals(cannedAccessControlList, request.getCannedAcl());
if (serverSideEncryption) {
assertEquals(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION, request.getMetadata().getSSEAlgorithm());
}
}
use of org.opensearch.common.blobstore.BlobPath in project OpenSearch by opensearch-project.
the class S3BlobStoreContainerTests method testExecuteMultipartUpload.
public void testExecuteMultipartUpload() throws IOException {
final String bucketName = randomAlphaOfLengthBetween(1, 10);
final String blobName = randomAlphaOfLengthBetween(1, 10);
final BlobPath blobPath = new BlobPath();
if (randomBoolean()) {
IntStream.of(randomIntBetween(1, 5)).forEach(value -> blobPath.add("path_" + value));
}
final long blobSize = ByteSizeUnit.GB.toBytes(randomIntBetween(1, 128));
final long bufferSize = ByteSizeUnit.MB.toBytes(randomIntBetween(5, 1024));
final S3BlobStore blobStore = mock(S3BlobStore.class);
when(blobStore.bucket()).thenReturn(bucketName);
when(blobStore.bufferSizeInBytes()).thenReturn(bufferSize);
final boolean serverSideEncryption = randomBoolean();
when(blobStore.serverSideEncryption()).thenReturn(serverSideEncryption);
final StorageClass storageClass = randomFrom(StorageClass.values());
when(blobStore.getStorageClass()).thenReturn(storageClass);
final CannedAccessControlList cannedAccessControlList = randomBoolean() ? randomFrom(CannedAccessControlList.values()) : null;
if (cannedAccessControlList != null) {
when(blobStore.getCannedACL()).thenReturn(cannedAccessControlList);
}
final AmazonS3 client = mock(AmazonS3.class);
final AmazonS3Reference clientReference = new AmazonS3Reference(client);
when(blobStore.clientReference()).thenReturn(clientReference);
final ArgumentCaptor<InitiateMultipartUploadRequest> initArgCaptor = ArgumentCaptor.forClass(InitiateMultipartUploadRequest.class);
final InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult();
initResult.setUploadId(randomAlphaOfLength(10));
when(client.initiateMultipartUpload(initArgCaptor.capture())).thenReturn(initResult);
final ArgumentCaptor<UploadPartRequest> uploadArgCaptor = ArgumentCaptor.forClass(UploadPartRequest.class);
final List<String> expectedEtags = new ArrayList<>();
final long partSize = Math.min(bufferSize, blobSize);
long totalBytes = 0;
do {
expectedEtags.add(randomAlphaOfLength(50));
totalBytes += partSize;
} while (totalBytes < blobSize);
when(client.uploadPart(uploadArgCaptor.capture())).thenAnswer(invocationOnMock -> {
final UploadPartRequest request = (UploadPartRequest) invocationOnMock.getArguments()[0];
final UploadPartResult response = new UploadPartResult();
response.setPartNumber(request.getPartNumber());
response.setETag(expectedEtags.get(request.getPartNumber() - 1));
return response;
});
final ArgumentCaptor<CompleteMultipartUploadRequest> compArgCaptor = ArgumentCaptor.forClass(CompleteMultipartUploadRequest.class);
when(client.completeMultipartUpload(compArgCaptor.capture())).thenReturn(new CompleteMultipartUploadResult());
final ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[0]);
final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore);
blobContainer.executeMultipartUpload(blobStore, blobName, inputStream, blobSize);
final InitiateMultipartUploadRequest initRequest = initArgCaptor.getValue();
assertEquals(bucketName, initRequest.getBucketName());
assertEquals(blobPath.buildAsString() + blobName, initRequest.getKey());
assertEquals(storageClass, initRequest.getStorageClass());
assertEquals(cannedAccessControlList, initRequest.getCannedACL());
if (serverSideEncryption) {
assertEquals(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION, initRequest.getObjectMetadata().getSSEAlgorithm());
}
final Tuple<Long, Long> numberOfParts = S3BlobContainer.numberOfMultiparts(blobSize, bufferSize);
final List<UploadPartRequest> uploadRequests = uploadArgCaptor.getAllValues();
assertEquals(numberOfParts.v1().intValue(), uploadRequests.size());
for (int i = 0; i < uploadRequests.size(); i++) {
final UploadPartRequest uploadRequest = uploadRequests.get(i);
assertEquals(bucketName, uploadRequest.getBucketName());
assertEquals(blobPath.buildAsString() + blobName, uploadRequest.getKey());
assertEquals(initResult.getUploadId(), uploadRequest.getUploadId());
assertEquals(i + 1, uploadRequest.getPartNumber());
assertEquals(inputStream, uploadRequest.getInputStream());
if (i == (uploadRequests.size() - 1)) {
assertTrue(uploadRequest.isLastPart());
assertEquals(numberOfParts.v2().longValue(), uploadRequest.getPartSize());
} else {
assertFalse(uploadRequest.isLastPart());
assertEquals(bufferSize, uploadRequest.getPartSize());
}
}
final CompleteMultipartUploadRequest compRequest = compArgCaptor.getValue();
assertEquals(bucketName, compRequest.getBucketName());
assertEquals(blobPath.buildAsString() + blobName, compRequest.getKey());
assertEquals(initResult.getUploadId(), compRequest.getUploadId());
final List<String> actualETags = compRequest.getPartETags().stream().map(PartETag::getETag).collect(Collectors.toList());
assertEquals(expectedEtags, actualETags);
}
use of org.opensearch.common.blobstore.BlobPath in project OpenSearch by opensearch-project.
the class S3BlobStoreContainerTests method testExecuteMultipartUploadAborted.
public void testExecuteMultipartUploadAborted() {
final String bucketName = randomAlphaOfLengthBetween(1, 10);
final String blobName = randomAlphaOfLengthBetween(1, 10);
final BlobPath blobPath = new BlobPath();
final long blobSize = ByteSizeUnit.MB.toBytes(765);
final long bufferSize = ByteSizeUnit.MB.toBytes(150);
final S3BlobStore blobStore = mock(S3BlobStore.class);
when(blobStore.bucket()).thenReturn(bucketName);
when(blobStore.bufferSizeInBytes()).thenReturn(bufferSize);
when(blobStore.getStorageClass()).thenReturn(randomFrom(StorageClass.values()));
final AmazonS3 client = mock(AmazonS3.class);
final AmazonS3Reference clientReference = new AmazonS3Reference(client);
doAnswer(invocation -> {
clientReference.incRef();
return clientReference;
}).when(blobStore).clientReference();
final String uploadId = randomAlphaOfLength(25);
final int stage = randomInt(2);
final List<AmazonClientException> exceptions = Arrays.asList(new AmazonClientException("Expected initialization request to fail"), new AmazonClientException("Expected upload part request to fail"), new AmazonClientException("Expected completion request to fail"));
if (stage == 0) {
// Fail the initialization request
when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenThrow(exceptions.get(stage));
} else if (stage == 1) {
final InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult();
initResult.setUploadId(uploadId);
when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(initResult);
// Fail the upload part request
when(client.uploadPart(any(UploadPartRequest.class))).thenThrow(exceptions.get(stage));
} else {
final InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult();
initResult.setUploadId(uploadId);
when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(initResult);
when(client.uploadPart(any(UploadPartRequest.class))).thenAnswer(invocationOnMock -> {
final UploadPartRequest request = (UploadPartRequest) invocationOnMock.getArguments()[0];
final UploadPartResult response = new UploadPartResult();
response.setPartNumber(request.getPartNumber());
response.setETag(randomAlphaOfLength(20));
return response;
});
// Fail the completion request
when(client.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))).thenThrow(exceptions.get(stage));
}
final ArgumentCaptor<AbortMultipartUploadRequest> argumentCaptor = ArgumentCaptor.forClass(AbortMultipartUploadRequest.class);
doNothing().when(client).abortMultipartUpload(argumentCaptor.capture());
final IOException e = expectThrows(IOException.class, () -> {
final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore);
blobContainer.executeMultipartUpload(blobStore, blobName, new ByteArrayInputStream(new byte[0]), blobSize);
});
assertEquals("Unable to upload object [" + blobName + "] using multipart upload", e.getMessage());
assertThat(e.getCause(), instanceOf(AmazonClientException.class));
assertEquals(exceptions.get(stage).getMessage(), e.getCause().getMessage());
if (stage == 0) {
verify(client, times(1)).initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
verify(client, times(0)).uploadPart(any(UploadPartRequest.class));
verify(client, times(0)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
verify(client, times(0)).abortMultipartUpload(any(AbortMultipartUploadRequest.class));
} else {
verify(client, times(1)).initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
if (stage == 1) {
verify(client, times(1)).uploadPart(any(UploadPartRequest.class));
verify(client, times(0)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
} else {
verify(client, times(6)).uploadPart(any(UploadPartRequest.class));
verify(client, times(1)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
}
verify(client, times(1)).abortMultipartUpload(any(AbortMultipartUploadRequest.class));
final AbortMultipartUploadRequest abortRequest = argumentCaptor.getValue();
assertEquals(bucketName, abortRequest.getBucketName());
assertEquals(blobName, abortRequest.getKey());
assertEquals(uploadId, abortRequest.getUploadId());
}
}
Aggregations