Search in sources :

Example 21 with BlockID

use of org.apache.hadoop.hdds.client.BlockID in project ozone by apache.

the class KeyValueHandler method handleGetBlock.

/**
 * Handle Get Block operation. Calls BlockManager to process the request.
 */
ContainerCommandResponseProto handleGetBlock(ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
    if (!request.hasGetBlock()) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Malformed Get Key request. trace ID: {}", request.getTraceID());
        }
        return malformedRequest(request);
    }
    ContainerProtos.BlockData responseData;
    try {
        BlockID blockID = BlockID.getFromProtobuf(request.getGetBlock().getBlockID());
        checkContainerIsHealthy(kvContainer, blockID, Type.GetBlock);
        responseData = blockManager.getBlock(kvContainer, blockID).getProtoBufMessage();
        final long numBytes = responseData.getSerializedSize();
        metrics.incContainerBytesStats(Type.GetBlock, numBytes);
    } catch (StorageContainerException ex) {
        return ContainerUtils.logAndReturnError(LOG, ex, request);
    } catch (IOException ex) {
        return ContainerUtils.logAndReturnError(LOG, new StorageContainerException("Get Key failed", ex, IO_EXCEPTION), request);
    }
    return getBlockDataResponse(request, responseData);
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) BlockID(org.apache.hadoop.hdds.client.BlockID) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) IOException(java.io.IOException)

Example 22 with BlockID

use of org.apache.hadoop.hdds.client.BlockID in project ozone by apache.

the class TestCommitWatcher method testReleaseBuffers.

@Test
public void testReleaseBuffers() throws Exception {
    int capacity = 2;
    BufferPool bufferPool = new BufferPool(chunkSize, capacity);
    XceiverClientManager clientManager = new XceiverClientManager(conf);
    ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
    Pipeline pipeline = container.getPipeline();
    long containerId = container.getContainerInfo().getContainerID();
    XceiverClientSpi xceiverClient = clientManager.acquireClient(pipeline);
    Assert.assertEquals(1, xceiverClient.getRefcount());
    Assert.assertTrue(xceiverClient instanceof XceiverClientRatis);
    XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
    CommitWatcher watcher = new CommitWatcher(bufferPool, ratisClient);
    BlockID blockID = ContainerTestHelper.getTestBlockID(containerId);
    List<XceiverClientReply> replies = new ArrayList<>();
    long length = 0;
    List<CompletableFuture<ContainerProtos.ContainerCommandResponseProto>> futures = new ArrayList<>();
    for (int i = 0; i < capacity; i++) {
        ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, chunkSize, null);
        // add the data to the buffer pool
        final ChunkBuffer byteBuffer = bufferPool.allocateBuffer(0);
        byteBuffer.put(writeChunkRequest.getWriteChunk().getData());
        ratisClient.sendCommandAsync(writeChunkRequest);
        ContainerProtos.ContainerCommandRequestProto putBlockRequest = ContainerTestHelper.getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
        XceiverClientReply reply = ratisClient.sendCommandAsync(putBlockRequest);
        final List<ChunkBuffer> bufferList = singletonList(byteBuffer);
        length += byteBuffer.position();
        CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future = reply.getResponse().thenApply(v -> {
            watcher.updateCommitInfoMap(reply.getLogIndex(), bufferList);
            return v;
        });
        futures.add(future);
        watcher.getFutureMap().put(length, future);
        replies.add(reply);
    }
    Assert.assertTrue(replies.size() == 2);
    // wait on the 1st putBlock to complete
    CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future1 = futures.get(0);
    CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future2 = futures.get(1);
    future1.get();
    Assert.assertNotNull(watcher.getFutureMap().get((long) chunkSize));
    Assert.assertTrue(watcher.getFutureMap().get((long) chunkSize).equals(future1));
    // wait on 2nd putBlock to complete
    future2.get();
    Assert.assertNotNull(watcher.getFutureMap().get((long) 2 * chunkSize));
    Assert.assertTrue(watcher.getFutureMap().get((long) 2 * chunkSize).equals(future2));
    Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().size() == 2);
    watcher.watchOnFirstIndex();
    Assert.assertFalse(watcher.getCommitIndex2flushedDataMap().containsKey(replies.get(0).getLogIndex()));
    Assert.assertFalse(watcher.getFutureMap().containsKey(chunkSize));
    Assert.assertTrue(watcher.getTotalAckDataLength() >= chunkSize);
    watcher.watchOnLastIndex();
    Assert.assertFalse(watcher.getCommitIndex2flushedDataMap().containsKey(replies.get(1).getLogIndex()));
    Assert.assertFalse(watcher.getFutureMap().containsKey(2 * chunkSize));
    Assert.assertTrue(watcher.getTotalAckDataLength() == 2 * chunkSize);
    Assert.assertTrue(watcher.getFutureMap().isEmpty());
    Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().isEmpty());
}
Also used : CommitWatcher(org.apache.hadoop.hdds.scm.storage.CommitWatcher) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) ArrayList(java.util.ArrayList) XceiverClientRatis(org.apache.hadoop.hdds.scm.XceiverClientRatis) XceiverClientManager(org.apache.hadoop.hdds.scm.XceiverClientManager) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) XceiverClientReply(org.apache.hadoop.hdds.scm.XceiverClientReply) CompletableFuture(java.util.concurrent.CompletableFuture) BufferPool(org.apache.hadoop.hdds.scm.storage.BufferPool) BlockID(org.apache.hadoop.hdds.client.BlockID) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) Test(org.junit.Test)

Example 23 with BlockID

use of org.apache.hadoop.hdds.client.BlockID in project ozone by apache.

the class TestFailureHandlingByClient method testWriteSmallFile.

@Test
public void testWriteSmallFile() throws Exception {
    startCluster();
    String keyName = UUID.randomUUID().toString();
    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
    String data = ContainerTestHelper.getFixedLengthString(keyString, chunkSize / 2);
    key.write(data.getBytes(UTF_8));
    // get the name of a valid container
    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
    KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
    List<OmKeyLocationInfo> locationInfoList = keyOutputStream.getLocationInfoList();
    long containerId = locationInfoList.get(0).getContainerID();
    BlockID blockId = locationInfoList.get(0).getBlockID();
    ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerId));
    Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
    List<DatanodeDetails> datanodes = pipeline.getNodes();
    cluster.shutdownHddsDatanode(datanodes.get(0));
    cluster.shutdownHddsDatanode(datanodes.get(1));
    key.close();
    // this will throw AlreadyClosedException and and current stream
    // will be discarded and write a new block
    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(RatisReplicationConfig.getInstance(THREE)).setKeyName(keyName).setRefreshPipeline(true).build();
    OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
    // Make sure a new block is written
    Assert.assertNotEquals(keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0).getBlockID(), blockId);
    Assert.assertEquals(data.getBytes(UTF_8).length, keyInfo.getDataSize());
    validateData(keyName, data.getBytes(UTF_8));
}
Also used : OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) BlockID(org.apache.hadoop.hdds.client.BlockID) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) Test(org.junit.Test)

Example 24 with BlockID

use of org.apache.hadoop.hdds.client.BlockID in project ozone by apache.

the class TestFailureHandlingByClientFlushDelay method testPipelineExclusionWithPipelineFailure.

@Test
public void testPipelineExclusionWithPipelineFailure() throws Exception {
    startCluster();
    String keyName = UUID.randomUUID().toString();
    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, blockSize);
    String data = ContainerTestHelper.getFixedLengthString(keyString, chunkSize);
    // get the name of a valid container
    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
    KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
    List<BlockOutputStreamEntry> streamEntryList = keyOutputStream.getStreamEntries();
    // Assert that 1 block will be preallocated
    Assert.assertEquals(1, streamEntryList.size());
    key.write(data.getBytes(UTF_8));
    key.flush();
    long containerId = streamEntryList.get(0).getBlockID().getContainerID();
    BlockID blockId = streamEntryList.get(0).getBlockID();
    ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerId));
    Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
    List<DatanodeDetails> datanodes = pipeline.getNodes();
    // Two nodes, next write will hit AlreadyClosedException , the pipeline
    // will be added in the exclude list
    cluster.shutdownHddsDatanode(datanodes.get(0));
    cluster.shutdownHddsDatanode(datanodes.get(1));
    key.write(data.getBytes(UTF_8));
    key.flush();
    Assert.assertTrue(keyOutputStream.getExcludeList().getContainerIds().isEmpty());
    Assert.assertTrue(keyOutputStream.getExcludeList().getDatanodes().isEmpty());
    Assert.assertTrue(keyOutputStream.getExcludeList().getDatanodes().isEmpty());
    key.write(data.getBytes(UTF_8));
    // The close will just write to the buffer
    key.close();
    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)).setKeyName(keyName).setRefreshPipeline(true).build();
    OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
    // Make sure a new block is written
    Assert.assertNotEquals(keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0).getBlockID(), blockId);
    Assert.assertEquals(3 * data.getBytes(UTF_8).length, keyInfo.getDataSize());
    validateData(keyName, data.concat(data).concat(data).getBytes(UTF_8));
}
Also used : OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) BlockOutputStreamEntry(org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) BlockID(org.apache.hadoop.hdds.client.BlockID) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) Test(org.junit.Test)

Example 25 with BlockID

use of org.apache.hadoop.hdds.client.BlockID in project ozone by apache.

the class OzoneTestUtils method performOperationOnKeyContainers.

/**
 * Performs the provided consumer on containers which contain the blocks
 * listed in omKeyLocationInfoGroups.
 *
 * @param consumer Consumer which accepts BlockID as argument.
 * @param omKeyLocationInfoGroups locationInfos for a key.
 * @throws IOException
 */
public static void performOperationOnKeyContainers(CheckedConsumer<BlockID, Exception> consumer, List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups) throws Exception {
    for (OmKeyLocationInfoGroup omKeyLocationInfoGroup : omKeyLocationInfoGroups) {
        List<OmKeyLocationInfo> omKeyLocationInfos = omKeyLocationInfoGroup.getLocationList();
        for (OmKeyLocationInfo omKeyLocationInfo : omKeyLocationInfos) {
            BlockID blockID = omKeyLocationInfo.getBlockID();
            consumer.accept(blockID);
        }
    }
}
Also used : OmKeyLocationInfoGroup(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup) BlockID(org.apache.hadoop.hdds.client.BlockID) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)

Aggregations

BlockID (org.apache.hadoop.hdds.client.BlockID)112 Test (org.junit.Test)64 ArrayList (java.util.ArrayList)41 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)37 ChunkInfo (org.apache.hadoop.ozone.container.common.helpers.ChunkInfo)31 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)25 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)25 BlockData (org.apache.hadoop.ozone.container.common.helpers.BlockData)21 IOException (java.io.IOException)20 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)20 KeyValueContainer (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)19 XceiverClientSpi (org.apache.hadoop.hdds.scm.XceiverClientSpi)18 ContainerWithPipeline (org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline)18 StorageContainerException (org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)18 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)15 ChunkBuffer (org.apache.hadoop.ozone.common.ChunkBuffer)14 OmKeyLocationInfoGroup (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup)14 ContainerBlockID (org.apache.hadoop.hdds.client.ContainerBlockID)12 ContainerCommandRequestProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto)11 MockPipeline (org.apache.hadoop.hdds.scm.pipeline.MockPipeline)11