Search in sources :

Example 26 with OmKeyLocationInfo

use of org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo in project ozone by apache.

the class TestFailureHandlingByClient method testBlockCountOnFailures.

/**
 * Test whether blockData and Container metadata (block count and used
 * bytes) is updated correctly when there is a write failure.
 * We can combine this test with {@link #testBlockWritesWithDnFailures()}
 * as that test also simulates a write failure and client writes failed
 * chunk writes to a new block.
 */
private void testBlockCountOnFailures(OmKeyInfo omKeyInfo) throws Exception {
    // testBlockWritesWithDnFailures writes chunkSize*2.5 size of data into
    // KeyOutputStream. But before closing the outputStream, 2 of the DNs in
    // the pipeline being written to are closed. So the key will be written
    // to 2 blocks as atleast the last 0.5 chunk would not be committed to the
    // first block before the stream is closed.
    /**
     * There are 3 possible scenarios:
     * 1. Block1 has 2 chunks and OMKeyInfo also has 2 chunks against this block
     *    => Block2 should have 1 chunk
     *    (2 chunks were written to Block1, committed and acknowledged by
     *    CommitWatcher)
     * 2. Block1 has 1 chunk and OMKeyInfo has 1 chunk against this block
     *    => Block2 should have 2 chunks
     *    (Possibly 2 chunks were written but only 1 was committed to the
     *    block)
     * 3. Block1 has 2 chunks but OMKeyInfo has only 1 chunk against this block
     *    => Block2 should have 2 chunks
     *    (This happens when the 2nd chunk has been committed to Block1 but
     *    not acknowledged by CommitWatcher before pipeline shutdown)
     */
    // Get information about the first and second block (in different pipelines)
    List<OmKeyLocationInfo> locationList = omKeyInfo.getLatestVersionLocations().getLocationList();
    long containerId1 = locationList.get(0).getContainerID();
    List<DatanodeDetails> block1DNs = locationList.get(0).getPipeline().getNodes();
    long containerId2 = locationList.get(1).getContainerID();
    List<DatanodeDetails> block2DNs = locationList.get(1).getPipeline().getNodes();
    int block2ExpectedChunkCount;
    if (locationList.get(0).getLength() == 2 * chunkSize) {
        // Scenario 1
        block2ExpectedChunkCount = 1;
    } else {
        // Scenario 2
        block2ExpectedChunkCount = 2;
    }
    // For the first block, first 2 DNs in the pipeline are shutdown (to
    // simulate a failure). It should have 1 or 2 chunks (depending on
    // whether the DN CommitWatcher successfully acknowledged the 2nd chunk
    // write or not). The 3rd chunk would not exist on the first pipeline as
    // the pipeline would be closed before the last 0.5 chunk was committed
    // to the block.
    KeyValueContainerData containerData1 = ((KeyValueContainer) cluster.getHddsDatanode(block1DNs.get(2)).getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerId1)).getContainerData();
    try (ReferenceCountedDB containerDb1 = BlockUtils.getDB(containerData1, conf)) {
        BlockData blockData1 = containerDb1.getStore().getBlockDataTable().get(Long.toString(locationList.get(0).getBlockID().getLocalID()));
        // The first Block could have 1 or 2 chunkSize of data
        int block1NumChunks = blockData1.getChunks().size();
        Assert.assertTrue(block1NumChunks >= 1);
        Assert.assertEquals(chunkSize * block1NumChunks, blockData1.getSize());
        Assert.assertEquals(1, containerData1.getBlockCount());
        Assert.assertEquals(chunkSize * block1NumChunks, containerData1.getBytesUsed());
    }
    // Verify that the second block has the remaining 0.5*chunkSize of data
    KeyValueContainerData containerData2 = ((KeyValueContainer) cluster.getHddsDatanode(block2DNs.get(0)).getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerId2)).getContainerData();
    try (ReferenceCountedDB containerDb2 = BlockUtils.getDB(containerData2, conf)) {
        BlockData blockData2 = containerDb2.getStore().getBlockDataTable().get(Long.toString(locationList.get(1).getBlockID().getLocalID()));
        // The second Block should have 0.5 chunkSize of data
        Assert.assertEquals(block2ExpectedChunkCount, blockData2.getChunks().size());
        Assert.assertEquals(1, containerData2.getBlockCount());
        int expectedBlockSize;
        if (block2ExpectedChunkCount == 1) {
            expectedBlockSize = chunkSize / 2;
        } else {
            expectedBlockSize = chunkSize + chunkSize / 2;
        }
        Assert.assertEquals(expectedBlockSize, blockData2.getSize());
        Assert.assertEquals(expectedBlockSize, containerData2.getBytesUsed());
    }
}
Also used : DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)

Example 27 with OmKeyLocationInfo

use of org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo in project ozone by apache.

the class TestCloseContainerHandlingByClient method testMultiBlockWrites.

@Test
public void testMultiBlockWrites() throws Exception {
    String keyName = getKeyName();
    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, (3 * blockSize));
    KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
    // With the initial size provided, it should have preallocated 4 blocks
    Assert.assertEquals(3, keyOutputStream.getStreamEntries().size());
    // write data more than 1 block
    byte[] data = ContainerTestHelper.getFixedLengthString(keyString, (3 * blockSize)).getBytes(UTF_8);
    Assert.assertEquals(data.length, 3 * blockSize);
    key.write(data);
    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
    // get the name of a valid container
    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(RatisReplicationConfig.getInstance(ONE)).setKeyName(keyName).setRefreshPipeline(true).build();
    waitForContainerClose(key);
    // write 1 more block worth of data. It will fail and new block will be
    // allocated
    key.write(ContainerTestHelper.getFixedLengthString(keyString, blockSize).getBytes(UTF_8));
    key.close();
    // read the key from OM again and match the length.The length will still
    // be the equal to the original data size.
    OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
    List<OmKeyLocationInfo> keyLocationInfos = keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly();
    // Though we have written only block initially, the close will hit
    // closeContainerException and remaining data in the chunkOutputStream
    // buffer will be copied into a different allocated block and will be
    // committed.
    Assert.assertEquals(4, keyLocationInfos.size());
    Assert.assertEquals(4 * blockSize, keyInfo.getDataSize());
    for (OmKeyLocationInfo locationInfo : keyLocationInfos) {
        Assert.assertEquals(blockSize, locationInfo.getLength());
    }
}
Also used : OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) Test(org.junit.Test)

Example 28 with OmKeyLocationInfo

use of org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo in project ozone by apache.

the class OzoneTestUtils method performOperationOnKeyContainers.

/**
 * Performs the provided consumer on containers which contain the blocks
 * listed in omKeyLocationInfoGroups.
 *
 * @param consumer Consumer which accepts BlockID as argument.
 * @param omKeyLocationInfoGroups locationInfos for a key.
 * @throws IOException
 */
public static void performOperationOnKeyContainers(CheckedConsumer<BlockID, Exception> consumer, List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups) throws Exception {
    for (OmKeyLocationInfoGroup omKeyLocationInfoGroup : omKeyLocationInfoGroups) {
        List<OmKeyLocationInfo> omKeyLocationInfos = omKeyLocationInfoGroup.getLocationList();
        for (OmKeyLocationInfo omKeyLocationInfo : omKeyLocationInfos) {
            BlockID blockID = omKeyLocationInfo.getBlockID();
            consumer.accept(blockID);
        }
    }
}
Also used : OmKeyLocationInfoGroup(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup) BlockID(org.apache.hadoop.hdds.client.BlockID) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)

Example 29 with OmKeyLocationInfo

use of org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo in project ozone by apache.

the class ContainerKeyMapperTask method writeOMKeyToContainerDB.

/**
 * Write an OM key to container DB and update containerID -> no. of keys
 * count.
 *
 * @param key key String
 * @param omKeyInfo omKeyInfo value
 * @throws IOException if unable to write to recon DB.
 */
private void writeOMKeyToContainerDB(String key, OmKeyInfo omKeyInfo) throws IOException {
    long containerCountToIncrement = 0;
    for (OmKeyLocationInfoGroup omKeyLocationInfoGroup : omKeyInfo.getKeyLocationVersions()) {
        long keyVersion = omKeyLocationInfoGroup.getVersion();
        for (OmKeyLocationInfo omKeyLocationInfo : omKeyLocationInfoGroup.getLocationList()) {
            long containerId = omKeyLocationInfo.getContainerID();
            ContainerKeyPrefix containerKeyPrefix = new ContainerKeyPrefix(containerId, key, keyVersion);
            if (reconContainerMetadataManager.getCountForContainerKeyPrefix(containerKeyPrefix) == 0) {
                // Save on writes. No need to save same container-key prefix
                // mapping again.
                reconContainerMetadataManager.storeContainerKeyMapping(containerKeyPrefix, 1);
                // increment the count of containers if it does not exist
                if (!reconContainerMetadataManager.doesContainerExists(containerId)) {
                    containerCountToIncrement++;
                }
                // update the count of keys for the given containerID
                long keyCount = reconContainerMetadataManager.getKeyCountForContainer(containerId);
                // increment the count and update containerKeyCount.
                // keyCount will be 0 if containerID is not found. So, there is no
                // need to initialize keyCount for the first time.
                reconContainerMetadataManager.storeContainerKeyCount(containerId, ++keyCount);
            }
        }
    }
    if (containerCountToIncrement > 0) {
        reconContainerMetadataManager.incrementContainerCountBy(containerCountToIncrement);
    }
}
Also used : OmKeyLocationInfoGroup(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup) ContainerKeyPrefix(org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)

Example 30 with OmKeyLocationInfo

use of org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo in project ozone by apache.

the class TestBlockDeletion method testContainerStatisticsAfterDelete.

@Test
public void testContainerStatisticsAfterDelete() throws Exception {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    String value = RandomStringUtils.random(1024 * 1024);
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName = UUID.randomUUID().toString();
    OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setDataSize(0).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)).setRefreshPipeline(true).build();
    List<OmKeyLocationInfoGroup> omKeyLocationInfoGroupList = om.lookupKey(keyArgs).getKeyLocationVersions();
    Thread.sleep(5000);
    List<ContainerInfo> containerInfos = scm.getContainerManager().getContainers();
    final int valueSize = value.getBytes(UTF_8).length;
    final int keyCount = 1;
    containerInfos.stream().forEach(container -> {
        Assert.assertEquals(valueSize, container.getUsedBytes());
        Assert.assertEquals(keyCount, container.getNumberOfKeys());
    });
    OzoneTestUtils.closeAllContainers(scm.getEventQueue(), scm);
    // Wait for container to close
    Thread.sleep(2000);
    // make sure the containers are closed on the dn
    omKeyLocationInfoGroupList.forEach((group) -> {
        List<OmKeyLocationInfo> locationInfo = group.getLocationList();
        locationInfo.forEach((info) -> cluster.getHddsDatanodes().get(0).getDatanodeStateMachine().getContainer().getContainerSet().getContainer(info.getContainerID()).getContainerData().setState(ContainerProtos.ContainerDataProto.State.CLOSED));
    });
    writeClient.deleteKey(keyArgs);
    // Wait for blocks to be deleted and container reports to be processed
    Thread.sleep(5000);
    containerInfos = scm.getContainerManager().getContainers();
    containerInfos.stream().forEach(container -> {
        Assert.assertEquals(0, container.getUsedBytes());
        Assert.assertEquals(0, container.getNumberOfKeys());
    });
    // Verify that pending block delete num are as expected with resent cmds
    cluster.getHddsDatanodes().forEach(dn -> {
        Map<Long, Container<?>> containerMap = dn.getDatanodeStateMachine().getContainer().getContainerSet().getContainerMap();
        containerMap.values().forEach(container -> {
            KeyValueContainerData containerData = (KeyValueContainerData) container.getContainerData();
            Assert.assertEquals(0, containerData.getNumPendingDeletionBlocks());
        });
    });
    cluster.shutdownHddsDatanode(0);
    scm.getReplicationManager().processAll();
    ((EventQueue) scm.getEventQueue()).processAll(1000);
    containerInfos = scm.getContainerManager().getContainers();
    containerInfos.stream().forEach(container -> Assert.assertEquals(HddsProtos.LifeCycleState.DELETING, container.getState()));
    LogCapturer logCapturer = LogCapturer.captureLogs(ReplicationManager.LOG);
    logCapturer.clearOutput();
    scm.getReplicationManager().processAll();
    ((EventQueue) scm.getEventQueue()).processAll(1000);
    GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains("Resend delete Container"), 500, 3000);
    cluster.restartHddsDatanode(0, true);
    Thread.sleep(1000);
    scm.getReplicationManager().processAll();
    ((EventQueue) scm.getEventQueue()).processAll(1000);
    GenericTestUtils.waitFor(() -> {
        List<ContainerInfo> infos = scm.getContainerManager().getContainers();
        try {
            infos.stream().forEach(container -> {
                Assert.assertEquals(HddsProtos.LifeCycleState.DELETED, container.getState());
                try {
                    Assert.assertTrue(scm.getScmMetadataStore().getContainerTable().get(container.containerID()).getState() == HddsProtos.LifeCycleState.DELETED);
                } catch (IOException e) {
                    Assert.fail("Container from SCM DB should be marked as DELETED");
                }
            });
        } catch (Throwable e) {
            return false;
        }
        return true;
    }, 500, 5000);
    LOG.info(metrics.toString());
}
Also used : OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) IOException(java.io.IOException) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) EventQueue(org.apache.hadoop.hdds.server.events.EventQueue) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) OmKeyLocationInfoGroup(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) LogCapturer(org.apache.ozone.test.GenericTestUtils.LogCapturer) Test(org.junit.Test)

Aggregations

OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)104 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)55 Test (org.junit.Test)50 ArrayList (java.util.ArrayList)39 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)36 OmKeyArgs (org.apache.hadoop.ozone.om.helpers.OmKeyArgs)35 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)33 OmKeyLocationInfoGroup (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup)26 IOException (java.io.IOException)25 BlockID (org.apache.hadoop.hdds.client.BlockID)25 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)23 ContainerInfo (org.apache.hadoop.hdds.scm.container.ContainerInfo)22 KeyOutputStream (org.apache.hadoop.ozone.client.io.KeyOutputStream)21 OMClientResponse (org.apache.hadoop.ozone.om.response.OMClientResponse)16 KeyArgs (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs)16 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)14 HddsDatanodeService (org.apache.hadoop.ozone.HddsDatanodeService)12 File (java.io.File)11 HashMap (java.util.HashMap)11 OMMetadataManager (org.apache.hadoop.ozone.om.OMMetadataManager)11