use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class TestObjectPut method testCopyObject.
@Test
public void testCopyObject() throws IOException, OS3Exception {
// Put object in to source bucket
HttpHeaders headers = Mockito.mock(HttpHeaders.class);
ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8));
objectEndpoint.setHeaders(headers);
keyName = "sourceKey";
Response response = objectEndpoint.put(bucketName, keyName, CONTENT.length(), 1, null, body);
OzoneInputStream ozoneInputStream = clientStub.getObjectStore().getS3Bucket(bucketName).readKey(keyName);
String keyContent = IOUtils.toString(ozoneInputStream, UTF_8);
Assert.assertEquals(200, response.getStatus());
Assert.assertEquals(CONTENT, keyContent);
// Add copy header, and then call put
when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(bucketName + "/" + urlEncode(keyName));
response = objectEndpoint.put(destBucket, destkey, CONTENT.length(), 1, null, body);
// Check destination key and response
ozoneInputStream = clientStub.getObjectStore().getS3Bucket(destBucket).readKey(destkey);
keyContent = IOUtils.toString(ozoneInputStream, UTF_8);
Assert.assertEquals(200, response.getStatus());
Assert.assertEquals(CONTENT, keyContent);
// source and dest same
try {
objectEndpoint.put(bucketName, keyName, CONTENT.length(), 1, null, body);
fail("test copy object failed");
} catch (OS3Exception ex) {
Assert.assertTrue(ex.getErrorMessage().contains("This copy request is " + "illegal"));
}
// source bucket not found
try {
when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(nonexist + "/" + urlEncode(keyName));
objectEndpoint.put(destBucket, destkey, CONTENT.length(), 1, null, body);
fail("test copy object failed");
} catch (OS3Exception ex) {
Assert.assertTrue(ex.getCode().contains("NoSuchBucket"));
}
// dest bucket not found
try {
when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(bucketName + "/" + urlEncode(keyName));
objectEndpoint.put(nonexist, destkey, CONTENT.length(), 1, null, body);
fail("test copy object failed");
} catch (OS3Exception ex) {
Assert.assertTrue(ex.getCode().contains("NoSuchBucket"));
}
// Both source and dest bucket not found
try {
when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(nonexist + "/" + urlEncode(keyName));
objectEndpoint.put(nonexist, destkey, CONTENT.length(), 1, null, body);
fail("test copy object failed");
} catch (OS3Exception ex) {
Assert.assertTrue(ex.getCode().contains("NoSuchBucket"));
}
// source key not found
try {
when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(bucketName + "/" + urlEncode(nonexist));
objectEndpoint.put("nonexistent", keyName, CONTENT.length(), 1, null, body);
fail("test copy object failed");
} catch (OS3Exception ex) {
Assert.assertTrue(ex.getCode().contains("NoSuchBucket"));
}
}
use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class TestObjectPut method testPutObjectWithSignedChunks.
@Test
public void testPutObjectWithSignedChunks() throws IOException, OS3Exception {
// GIVEN
HttpHeaders headers = Mockito.mock(HttpHeaders.class);
objectEndpoint.setHeaders(headers);
String chunkedContent = "0a;chunk-signature=signature\r\n" + "1234567890\r\n" + "05;chunk-signature=signature\r\n" + "abcde\r\n";
when(headers.getHeaderString("x-amz-content-sha256")).thenReturn("STREAMING-AWS4-HMAC-SHA256-PAYLOAD");
// WHEN
Response response = objectEndpoint.put(bucketName, keyName, chunkedContent.length(), 1, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8)));
// THEN
OzoneInputStream ozoneInputStream = clientStub.getObjectStore().getS3Bucket(bucketName).readKey(keyName);
String keyContent = IOUtils.toString(ozoneInputStream, UTF_8);
Assert.assertEquals(200, response.getStatus());
Assert.assertEquals("1234567890abcde", keyContent);
}
use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class TestObjectPut method testPutObject.
@Test
public void testPutObject() throws IOException, OS3Exception {
// GIVEN
HttpHeaders headers = Mockito.mock(HttpHeaders.class);
ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8));
objectEndpoint.setHeaders(headers);
// WHEN
Response response = objectEndpoint.put(bucketName, keyName, CONTENT.length(), 1, null, body);
// THEN
OzoneInputStream ozoneInputStream = clientStub.getObjectStore().getS3Bucket(bucketName).readKey(keyName);
String keyContent = IOUtils.toString(ozoneInputStream, UTF_8);
Assert.assertEquals(200, response.getStatus());
Assert.assertEquals(CONTENT, keyContent);
}
use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class TestDataScrubber method testOpenContainerIntegrity.
@Test
public void testOpenContainerIntegrity() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
Instant testStartTime = Instant.now();
String value = "sample value";
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
for (int i = 0; i < 10; i++) {
String keyName = UUID.randomUUID().toString();
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
OzoneKey key = bucket.getKey(keyName);
Assert.assertEquals(keyName, key.getName());
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
is.read(fileContent);
Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, keyName, RATIS, ONE));
Assert.assertEquals(value, new String(fileContent, UTF_8));
Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
}
// wait for the container report to propagate to SCM
Thread.sleep(5000);
Assert.assertEquals(1, cluster.getHddsDatanodes().size());
HddsDatanodeService dn = cluster.getHddsDatanodes().get(0);
OzoneContainer oc = dn.getDatanodeStateMachine().getContainer();
ContainerSet cs = oc.getContainerSet();
Container c = cs.getContainerIterator().next();
Assert.assertTrue(cs.containerCount() > 0);
// delete the chunks directory.
File chunksDir = new File(c.getContainerData().getContainerPath(), "chunks");
deleteDirectory(chunksDir);
Assert.assertFalse(chunksDir.exists());
ContainerScrubberConfiguration conf = ozoneConfig.getObject(ContainerScrubberConfiguration.class);
ContainerMetadataScanner sb = new ContainerMetadataScanner(conf, oc.getController());
sb.scrub(c);
// wait for the incremental container report to propagate to SCM
Thread.sleep(5000);
ContainerManager cm = cluster.getStorageContainerManager().getContainerManager();
Set<ContainerReplica> replicas = cm.getContainerReplicas(ContainerID.valueOf(c.getContainerData().getContainerID()));
Assert.assertEquals(1, replicas.size());
ContainerReplica r = replicas.iterator().next();
Assert.assertEquals(StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.UNHEALTHY, r.getState());
}
use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class TestDatanodeHddsVolumeFailureDetection method testHddsVolumeFailureOnChunkFileCorrupt.
@Test
public void testHddsVolumeFailureOnChunkFileCorrupt() throws Exception {
// write a file
String keyName = UUID.randomUUID().toString();
String value = "sample value";
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
OzoneKey key = bucket.getKey(keyName);
Assert.assertEquals(keyName, key.getName());
// corrupt chunk file by rename file->dir
HddsDatanodeService dn = datanodes.get(0);
OzoneContainer oc = dn.getDatanodeStateMachine().getContainer();
MutableVolumeSet volSet = oc.getVolumeSet();
StorageVolume vol0 = volSet.getVolumesList().get(0);
Assert.assertTrue(vol0 instanceof HddsVolume);
File clusterDir = DatanodeTestUtils.getHddsVolumeClusterDir((HddsVolume) vol0);
File currentDir = new File(clusterDir, Storage.STORAGE_DIR_CURRENT);
File containerTopDir = new File(currentDir, Storage.CONTAINER_DIR + "0");
File containerDir = new File(containerTopDir, "1");
File chunksDir = new File(containerDir, OzoneConsts.STORAGE_DIR_CHUNKS);
File[] chunkFiles = chunksDir.listFiles();
Assert.assertNotNull(chunkFiles);
for (File chunkFile : chunkFiles) {
DatanodeTestUtils.injectDataFileFailure(chunkFile);
}
// simulate bad volume by removing write permission on root dir
// refer to HddsVolume.check()
DatanodeTestUtils.simulateBadVolume(vol0);
// read written file to trigger checkVolumeAsync
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
try {
is.read(fileContent);
Assert.fail();
} catch (Exception e) {
Assert.assertTrue(e instanceof IOException);
} finally {
is.close();
}
// should trigger checkVolumeAsync and
// a failed volume should be detected
DatanodeTestUtils.waitForCheckVolume(volSet, 1L);
DatanodeTestUtils.waitForHandleFailedVolume(volSet, 1);
// restore for cleanup
DatanodeTestUtils.restoreBadVolume(vol0);
for (File chunkFile : chunkFiles) {
DatanodeTestUtils.restoreDataFileFromFailure(chunkFile);
}
}
Aggregations