Search in sources :

Example 21 with OzoneKey

use of org.apache.hadoop.ozone.client.OzoneKey in project ozone by apache.

the class TestOzoneRpcClientAbstract method testZReadKeyWithUnhealthyContainerReplica.

// Make this executed at last, for it has some side effect to other UTs
@Test
public void testZReadKeyWithUnhealthyContainerReplica() throws Exception {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    String value = "sample value";
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName1 = UUID.randomUUID().toString();
    // Write first key
    OzoneOutputStream out = bucket.createKey(keyName1, value.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    // Write second key
    String keyName2 = UUID.randomUUID().toString();
    value = "unhealthy container replica";
    out = bucket.createKey(keyName2, value.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    // Find container ID
    OzoneKey key = bucket.getKey(keyName2);
    long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0).getContainerID();
    // Set container replica to UNHEALTHY
    Container container;
    int index = 1;
    List<HddsDatanodeService> involvedDNs = new ArrayList<>();
    for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) {
        container = hddsDatanode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID);
        if (container == null) {
            continue;
        }
        container.markContainerUnhealthy();
        // Change first and second replica commit sequenceId
        if (index < 3) {
            long newBCSID = container.getBlockCommitSequenceId() - 1;
            try (ReferenceCountedDB db = BlockUtils.getDB((KeyValueContainerData) container.getContainerData(), cluster.getConf())) {
                db.getStore().getMetadataTable().put(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID, newBCSID);
            }
            container.updateBlockCommitSequenceId(newBCSID);
            index++;
        }
        involvedDNs.add(hddsDatanode);
    }
    // Restart DNs
    int dnCount = involvedDNs.size();
    for (index = 0; index < dnCount; index++) {
        if (index == dnCount - 1) {
            cluster.restartHddsDatanode(involvedDNs.get(index).getDatanodeDetails(), true);
        } else {
            cluster.restartHddsDatanode(involvedDNs.get(index).getDatanodeDetails(), false);
        }
    }
    StorageContainerManager scm = cluster.getStorageContainerManager();
    GenericTestUtils.waitFor(() -> {
        try {
            ContainerInfo containerInfo = scm.getContainerInfo(containerID);
            System.out.println("state " + containerInfo.getState());
            return containerInfo.getState() == HddsProtos.LifeCycleState.CLOSING;
        } catch (IOException e) {
            fail("Failed to get container info for " + e.getMessage());
            return false;
        }
    }, 1000, 10000);
    // Try reading keyName2
    try {
        GenericTestUtils.setLogLevel(XceiverClientGrpc.getLogger(), DEBUG);
        OzoneInputStream is = bucket.readKey(keyName2);
        byte[] content = new byte[100];
        is.read(content);
        String retValue = new String(content, UTF_8);
        Assert.assertTrue(value.equals(retValue.trim()));
    } catch (IOException e) {
        fail("Reading unhealthy replica should succeed.");
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) StorageContainerManager(org.apache.hadoop.hdds.scm.server.StorageContainerManager) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) IOException(java.io.IOException) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) Test(org.junit.Test)

Example 22 with OzoneKey

use of org.apache.hadoop.ozone.client.OzoneKey in project ozone by apache.

the class TestOzoneRpcClientAbstract method testPutKeyRatisOneNode.

@Test
public void testPutKeyRatisOneNode() throws IOException {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    Instant testStartTime = Instant.now();
    String value = "sample value";
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    for (int i = 0; i < 10; i++) {
        String keyName = UUID.randomUUID().toString();
        OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ONE, new HashMap<>());
        out.write(value.getBytes(UTF_8));
        out.close();
        OzoneKey key = bucket.getKey(keyName);
        Assert.assertEquals(keyName, key.getName());
        OzoneInputStream is = bucket.readKey(keyName);
        byte[] fileContent = new byte[value.getBytes(UTF_8).length];
        is.read(fileContent);
        is.close();
        Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, keyName, ReplicationType.RATIS, ONE));
        Assert.assertEquals(value, new String(fileContent, UTF_8));
        Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
        Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
    }
}
Also used : OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) Instant(java.time.Instant) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) Test(org.junit.Test)

Example 23 with OzoneKey

use of org.apache.hadoop.ozone.client.OzoneKey in project ozone by apache.

the class TestOzoneRpcClientAbstract method testPutKey.

@Test
public void testPutKey() throws IOException {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    Instant testStartTime = Instant.now();
    String value = "sample value";
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    for (int i = 0; i < 10; i++) {
        String keyName = UUID.randomUUID().toString();
        OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
        out.write(value.getBytes(UTF_8));
        out.close();
        OzoneKey key = bucket.getKey(keyName);
        Assert.assertEquals(keyName, key.getName());
        OzoneInputStream is = bucket.readKey(keyName);
        byte[] fileContent = new byte[value.getBytes(UTF_8).length];
        is.read(fileContent);
        Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, keyName, RATIS, ONE));
        Assert.assertEquals(value, new String(fileContent, UTF_8));
        Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
        Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
    }
}
Also used : OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) Instant(java.time.Instant) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) Test(org.junit.Test)

Example 24 with OzoneKey

use of org.apache.hadoop.ozone.client.OzoneKey in project ozone by apache.

the class TestOzoneRpcClientAbstract method readKey.

private void readKey(OzoneBucket bucket, String keyName, String data) throws IOException {
    OzoneKey key = bucket.getKey(keyName);
    Assert.assertEquals(keyName, key.getName());
    OzoneInputStream is = bucket.readKey(keyName);
    byte[] fileContent = new byte[data.getBytes(UTF_8).length];
    is.read(fileContent);
    is.close();
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey)

Example 25 with OzoneKey

use of org.apache.hadoop.ozone.client.OzoneKey in project ozone by apache.

the class TestOzoneRpcClientAbstract method testReadKeyWithCorruptedData.

/**
 * Tests reading a corrputed chunk file throws checksum exception.
 * @throws IOException
 */
@Test
public void testReadKeyWithCorruptedData() throws IOException {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    String value = "sample value";
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName = UUID.randomUUID().toString();
    // Write data into a key
    OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ONE, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    // We need to find the location of the chunk file corresponding to the
    // data we just wrote.
    OzoneKey key = bucket.getKey(keyName);
    long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0).getContainerID();
    // Get the container by traversing the datanodes. Atleast one of the
    // datanode must have this container.
    Container container = null;
    for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) {
        container = hddsDatanode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID);
        if (container != null) {
            break;
        }
    }
    Assert.assertNotNull("Container not found", container);
    corruptData(container, key);
    // throw a checksum mismatch exception.
    try {
        OzoneInputStream is = bucket.readKey(keyName);
        is.read(new byte[100]);
        fail("Reading corrupted data should fail.");
    } catch (IOException e) {
        GenericTestUtils.assertExceptionContains("Checksum mismatch", e);
    }
}
Also used : OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) IOException(java.io.IOException) Test(org.junit.Test)

Aggregations

OzoneKey (org.apache.hadoop.ozone.client.OzoneKey)29 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)20 OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)19 OzoneVolume (org.apache.hadoop.ozone.client.OzoneVolume)18 Test (org.junit.Test)18 OzoneInputStream (org.apache.hadoop.ozone.client.io.OzoneInputStream)15 Instant (java.time.Instant)7 HddsDatanodeService (org.apache.hadoop.ozone.HddsDatanodeService)7 IOException (java.io.IOException)6 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)6 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)6 OzoneKeyDetails (org.apache.hadoop.ozone.client.OzoneKeyDetails)5 HashMap (java.util.HashMap)4 File (java.io.File)3 ObjectStore (org.apache.hadoop.ozone.client.ObjectStore)3 OzoneContainer (org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer)3 ArrayList (java.util.ArrayList)2 LinkedHashMap (java.util.LinkedHashMap)2 LinkedList (java.util.LinkedList)2 StorageContainerManager (org.apache.hadoop.hdds.scm.server.StorageContainerManager)2