Search in sources :

Example 21 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class TestObjectPut method testCopyObject.

@Test
public void testCopyObject() throws IOException, OS3Exception {
    // Put object in to source bucket
    HttpHeaders headers = Mockito.mock(HttpHeaders.class);
    ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8));
    objectEndpoint.setHeaders(headers);
    keyName = "sourceKey";
    Response response = objectEndpoint.put(bucketName, keyName, CONTENT.length(), 1, null, body);
    OzoneInputStream ozoneInputStream = clientStub.getObjectStore().getS3Bucket(bucketName).readKey(keyName);
    String keyContent = IOUtils.toString(ozoneInputStream, UTF_8);
    Assert.assertEquals(200, response.getStatus());
    Assert.assertEquals(CONTENT, keyContent);
    // Add copy header, and then call put
    when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(bucketName + "/" + urlEncode(keyName));
    response = objectEndpoint.put(destBucket, destkey, CONTENT.length(), 1, null, body);
    // Check destination key and response
    ozoneInputStream = clientStub.getObjectStore().getS3Bucket(destBucket).readKey(destkey);
    keyContent = IOUtils.toString(ozoneInputStream, UTF_8);
    Assert.assertEquals(200, response.getStatus());
    Assert.assertEquals(CONTENT, keyContent);
    // source and dest same
    try {
        objectEndpoint.put(bucketName, keyName, CONTENT.length(), 1, null, body);
        fail("test copy object failed");
    } catch (OS3Exception ex) {
        Assert.assertTrue(ex.getErrorMessage().contains("This copy request is " + "illegal"));
    }
    // source bucket not found
    try {
        when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(nonexist + "/" + urlEncode(keyName));
        objectEndpoint.put(destBucket, destkey, CONTENT.length(), 1, null, body);
        fail("test copy object failed");
    } catch (OS3Exception ex) {
        Assert.assertTrue(ex.getCode().contains("NoSuchBucket"));
    }
    // dest bucket not found
    try {
        when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(bucketName + "/" + urlEncode(keyName));
        objectEndpoint.put(nonexist, destkey, CONTENT.length(), 1, null, body);
        fail("test copy object failed");
    } catch (OS3Exception ex) {
        Assert.assertTrue(ex.getCode().contains("NoSuchBucket"));
    }
    // Both source and dest bucket not found
    try {
        when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(nonexist + "/" + urlEncode(keyName));
        objectEndpoint.put(nonexist, destkey, CONTENT.length(), 1, null, body);
        fail("test copy object failed");
    } catch (OS3Exception ex) {
        Assert.assertTrue(ex.getCode().contains("NoSuchBucket"));
    }
    // source key not found
    try {
        when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(bucketName + "/" + urlEncode(nonexist));
        objectEndpoint.put("nonexistent", keyName, CONTENT.length(), 1, null, body);
        fail("test copy object failed");
    } catch (OS3Exception ex) {
        Assert.assertTrue(ex.getCode().contains("NoSuchBucket"));
    }
}
Also used : Response(javax.ws.rs.core.Response) HttpHeaders(javax.ws.rs.core.HttpHeaders) OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) Test(org.junit.Test)

Example 22 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class TestObjectPut method testPutObjectWithSignedChunks.

@Test
public void testPutObjectWithSignedChunks() throws IOException, OS3Exception {
    // GIVEN
    HttpHeaders headers = Mockito.mock(HttpHeaders.class);
    objectEndpoint.setHeaders(headers);
    String chunkedContent = "0a;chunk-signature=signature\r\n" + "1234567890\r\n" + "05;chunk-signature=signature\r\n" + "abcde\r\n";
    when(headers.getHeaderString("x-amz-content-sha256")).thenReturn("STREAMING-AWS4-HMAC-SHA256-PAYLOAD");
    // WHEN
    Response response = objectEndpoint.put(bucketName, keyName, chunkedContent.length(), 1, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8)));
    // THEN
    OzoneInputStream ozoneInputStream = clientStub.getObjectStore().getS3Bucket(bucketName).readKey(keyName);
    String keyContent = IOUtils.toString(ozoneInputStream, UTF_8);
    Assert.assertEquals(200, response.getStatus());
    Assert.assertEquals("1234567890abcde", keyContent);
}
Also used : Response(javax.ws.rs.core.Response) HttpHeaders(javax.ws.rs.core.HttpHeaders) OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) Test(org.junit.Test)

Example 23 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class TestObjectPut method testPutObject.

@Test
public void testPutObject() throws IOException, OS3Exception {
    // GIVEN
    HttpHeaders headers = Mockito.mock(HttpHeaders.class);
    ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8));
    objectEndpoint.setHeaders(headers);
    // WHEN
    Response response = objectEndpoint.put(bucketName, keyName, CONTENT.length(), 1, null, body);
    // THEN
    OzoneInputStream ozoneInputStream = clientStub.getObjectStore().getS3Bucket(bucketName).readKey(keyName);
    String keyContent = IOUtils.toString(ozoneInputStream, UTF_8);
    Assert.assertEquals(200, response.getStatus());
    Assert.assertEquals(CONTENT, keyContent);
}
Also used : Response(javax.ws.rs.core.Response) HttpHeaders(javax.ws.rs.core.HttpHeaders) OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) Test(org.junit.Test)

Example 24 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class TestDataScrubber method testOpenContainerIntegrity.

@Test
public void testOpenContainerIntegrity() throws Exception {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    Instant testStartTime = Instant.now();
    String value = "sample value";
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    for (int i = 0; i < 10; i++) {
        String keyName = UUID.randomUUID().toString();
        OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
        out.write(value.getBytes(UTF_8));
        out.close();
        OzoneKey key = bucket.getKey(keyName);
        Assert.assertEquals(keyName, key.getName());
        OzoneInputStream is = bucket.readKey(keyName);
        byte[] fileContent = new byte[value.getBytes(UTF_8).length];
        is.read(fileContent);
        Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, keyName, RATIS, ONE));
        Assert.assertEquals(value, new String(fileContent, UTF_8));
        Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
        Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
    }
    // wait for the container report to propagate to SCM
    Thread.sleep(5000);
    Assert.assertEquals(1, cluster.getHddsDatanodes().size());
    HddsDatanodeService dn = cluster.getHddsDatanodes().get(0);
    OzoneContainer oc = dn.getDatanodeStateMachine().getContainer();
    ContainerSet cs = oc.getContainerSet();
    Container c = cs.getContainerIterator().next();
    Assert.assertTrue(cs.containerCount() > 0);
    // delete the chunks directory.
    File chunksDir = new File(c.getContainerData().getContainerPath(), "chunks");
    deleteDirectory(chunksDir);
    Assert.assertFalse(chunksDir.exists());
    ContainerScrubberConfiguration conf = ozoneConfig.getObject(ContainerScrubberConfiguration.class);
    ContainerMetadataScanner sb = new ContainerMetadataScanner(conf, oc.getController());
    sb.scrub(c);
    // wait for the incremental container report to propagate to SCM
    Thread.sleep(5000);
    ContainerManager cm = cluster.getStorageContainerManager().getContainerManager();
    Set<ContainerReplica> replicas = cm.getContainerReplicas(ContainerID.valueOf(c.getContainerData().getContainerID()));
    Assert.assertEquals(1, replicas.size());
    ContainerReplica r = replicas.iterator().next();
    Assert.assertEquals(StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.UNHEALTHY, r.getState());
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) ContainerManager(org.apache.hadoop.hdds.scm.container.ContainerManager) Instant(java.time.Instant) ContainerScrubberConfiguration(org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneContainer(org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) ContainerSet(org.apache.hadoop.ozone.container.common.impl.ContainerSet) ContainerReplica(org.apache.hadoop.hdds.scm.container.ContainerReplica) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) ContainerMetadataScanner(org.apache.hadoop.ozone.container.ozoneimpl.ContainerMetadataScanner) OzoneContainer(org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer) File(java.io.File) Test(org.junit.Test)

Example 25 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class TestDatanodeHddsVolumeFailureDetection method testHddsVolumeFailureOnChunkFileCorrupt.

@Test
public void testHddsVolumeFailureOnChunkFileCorrupt() throws Exception {
    // write a file
    String keyName = UUID.randomUUID().toString();
    String value = "sample value";
    OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    OzoneKey key = bucket.getKey(keyName);
    Assert.assertEquals(keyName, key.getName());
    // corrupt chunk file by rename file->dir
    HddsDatanodeService dn = datanodes.get(0);
    OzoneContainer oc = dn.getDatanodeStateMachine().getContainer();
    MutableVolumeSet volSet = oc.getVolumeSet();
    StorageVolume vol0 = volSet.getVolumesList().get(0);
    Assert.assertTrue(vol0 instanceof HddsVolume);
    File clusterDir = DatanodeTestUtils.getHddsVolumeClusterDir((HddsVolume) vol0);
    File currentDir = new File(clusterDir, Storage.STORAGE_DIR_CURRENT);
    File containerTopDir = new File(currentDir, Storage.CONTAINER_DIR + "0");
    File containerDir = new File(containerTopDir, "1");
    File chunksDir = new File(containerDir, OzoneConsts.STORAGE_DIR_CHUNKS);
    File[] chunkFiles = chunksDir.listFiles();
    Assert.assertNotNull(chunkFiles);
    for (File chunkFile : chunkFiles) {
        DatanodeTestUtils.injectDataFileFailure(chunkFile);
    }
    // simulate bad volume by removing write permission on root dir
    // refer to HddsVolume.check()
    DatanodeTestUtils.simulateBadVolume(vol0);
    // read written file to trigger checkVolumeAsync
    OzoneInputStream is = bucket.readKey(keyName);
    byte[] fileContent = new byte[value.getBytes(UTF_8).length];
    try {
        is.read(fileContent);
        Assert.fail();
    } catch (Exception e) {
        Assert.assertTrue(e instanceof IOException);
    } finally {
        is.close();
    }
    // should trigger checkVolumeAsync and
    // a failed volume should be detected
    DatanodeTestUtils.waitForCheckVolume(volSet, 1L);
    DatanodeTestUtils.waitForHandleFailedVolume(volSet, 1);
    // restore for cleanup
    DatanodeTestUtils.restoreBadVolume(vol0);
    for (File chunkFile : chunkFiles) {
        DatanodeTestUtils.restoreDataFileFromFailure(chunkFile);
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) IOException(java.io.IOException) IOException(java.io.IOException) HddsVolume(org.apache.hadoop.ozone.container.common.volume.HddsVolume) StorageVolume(org.apache.hadoop.ozone.container.common.volume.StorageVolume) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) OzoneContainer(org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer) File(java.io.File) Test(org.junit.Test)

Aggregations

OzoneInputStream (org.apache.hadoop.ozone.client.io.OzoneInputStream)47 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)33 OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)26 Test (org.junit.Test)26 OzoneVolume (org.apache.hadoop.ozone.client.OzoneVolume)22 OzoneKey (org.apache.hadoop.ozone.client.OzoneKey)17 IOException (java.io.IOException)15 OzoneKeyDetails (org.apache.hadoop.ozone.client.OzoneKeyDetails)13 Instant (java.time.Instant)12 HashMap (java.util.HashMap)11 LinkedHashMap (java.util.LinkedHashMap)10 HddsDatanodeService (org.apache.hadoop.ozone.HddsDatanodeService)8 ArrayList (java.util.ArrayList)7 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)7 OmKeyArgs (org.apache.hadoop.ozone.om.helpers.OmKeyArgs)7 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)7 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)6 RepeatedOmKeyInfo (org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo)6 File (java.io.File)5 HttpHeaders (javax.ws.rs.core.HttpHeaders)5