Search in sources :

Example 6 with ContainerWithPipeline

use of org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline in project ozone by apache.

the class TestContainerSmallFile method testReadWriteWithBCSId.

@Test
public void testReadWriteWithBCSId() throws Exception {
    ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
    XceiverClientSpi client = xceiverClientManager.acquireClient(container.getPipeline());
    ContainerProtocolCalls.createContainer(client, container.getContainerInfo().getContainerID(), null);
    BlockID blockID1 = ContainerTestHelper.getTestBlockID(container.getContainerInfo().getContainerID());
    ContainerProtos.PutSmallFileResponseProto responseProto = ContainerProtocolCalls.writeSmallFile(client, blockID1, "data123".getBytes(UTF_8), null);
    long bcsId = responseProto.getCommittedBlockLength().getBlockID().getBlockCommitSequenceId();
    try {
        blockID1.setBlockCommitSequenceId(bcsId + 1);
        // read a file with higher bcsId than the container bcsId
        ContainerProtocolCalls.readSmallFile(client, blockID1, null);
        Assert.fail("Expected exception not thrown");
    } catch (StorageContainerException sce) {
        Assert.assertTrue(sce.getResult() == ContainerProtos.Result.UNKNOWN_BCSID);
    }
    // write a new block again to bump up the container bcsId
    BlockID blockID2 = ContainerTestHelper.getTestBlockID(container.getContainerInfo().getContainerID());
    ContainerProtocolCalls.writeSmallFile(client, blockID2, "data123".getBytes(UTF_8), null);
    try {
        blockID1.setBlockCommitSequenceId(bcsId + 1);
        // read a file with higher bcsId than the committed bcsId for the block
        ContainerProtocolCalls.readSmallFile(client, blockID1, null);
        Assert.fail("Expected exception not thrown");
    } catch (StorageContainerException sce) {
        Assert.assertTrue(sce.getResult() == ContainerProtos.Result.BCSID_MISMATCH);
    }
    blockID1.setBlockCommitSequenceId(bcsId);
    ContainerProtos.GetSmallFileResponseProto response = ContainerProtocolCalls.readSmallFile(client, blockID1, null);
    String readData = response.getData().getDataBuffers().getBuffersList().get(0).toStringUtf8();
    Assert.assertEquals("data123", readData);
    xceiverClientManager.releaseClient(client, false);
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) BlockID(org.apache.hadoop.hdds.client.BlockID) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) Test(org.junit.Test)

Example 7 with ContainerWithPipeline

use of org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline in project ozone by apache.

the class TestContainerSmallFile method testInvalidBlockRead.

@Test
public void testInvalidBlockRead() throws Exception {
    ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(SCMTestUtils.getReplicationType(ozoneConfig), HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
    XceiverClientSpi client = xceiverClientManager.acquireClient(container.getPipeline());
    ContainerProtocolCalls.createContainer(client, container.getContainerInfo().getContainerID(), null);
    thrown.expect(StorageContainerException.class);
    thrown.expectMessage("Unable to find the block");
    BlockID blockID = ContainerTestHelper.getTestBlockID(container.getContainerInfo().getContainerID());
    // Try to read a Key Container Name
    ContainerProtos.GetSmallFileResponseProto response = ContainerProtocolCalls.readSmallFile(client, blockID, null);
    xceiverClientManager.releaseClient(client, false);
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) BlockID(org.apache.hadoop.hdds.client.BlockID) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) Test(org.junit.Test)

Example 8 with ContainerWithPipeline

use of org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline in project ozone by apache.

the class TestGetCommittedBlockLengthAndPutKey method testGetCommittedBlockLengthForInvalidBlock.

@Test
public void testGetCommittedBlockLengthForInvalidBlock() throws Exception {
    ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(SCMTestUtils.getReplicationType(ozoneConfig), HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
    long containerID = container.getContainerInfo().getContainerID();
    XceiverClientSpi client = xceiverClientManager.acquireClient(container.getPipeline());
    ContainerProtocolCalls.createContainer(client, containerID, null);
    BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
    // move the container to closed state
    ContainerProtocolCalls.closeContainer(client, containerID, null);
    try {
        // There is no block written inside the container. The request should
        // fail.
        ContainerProtocolCalls.getCommittedBlockLength(client, blockID, null);
        Assert.fail("Expected exception not thrown");
    } catch (StorageContainerException sce) {
        Assert.assertTrue(sce.getMessage().contains("Unable to find the block"));
    }
    xceiverClientManager.releaseClient(client, false);
}
Also used : BlockID(org.apache.hadoop.hdds.client.BlockID) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) Test(org.junit.Test)

Example 9 with ContainerWithPipeline

use of org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline in project ozone by apache.

the class TestGetCommittedBlockLengthAndPutKey method tesGetCommittedBlockLength.

@Test
public void tesGetCommittedBlockLength() throws Exception {
    ContainerProtos.GetCommittedBlockLengthResponseProto response;
    ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(SCMTestUtils.getReplicationType(ozoneConfig), HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
    long containerID = container.getContainerInfo().getContainerID();
    Pipeline pipeline = container.getPipeline();
    XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline);
    // create the container
    ContainerProtocolCalls.createContainer(client, containerID, null);
    BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
    byte[] data = RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes(UTF_8);
    ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(container.getPipeline(), blockID, data.length, null);
    client.sendCommand(writeChunkRequest);
    // Now, explicitly make a putKey request for the block.
    ContainerProtos.ContainerCommandRequestProto putKeyRequest = ContainerTestHelper.getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
    client.sendCommand(putKeyRequest);
    response = ContainerProtocolCalls.getCommittedBlockLength(client, blockID, null);
    // make sure the block ids in the request and response are same.
    Assert.assertTrue(BlockID.getFromProtobuf(response.getBlockID()).equals(blockID));
    Assert.assertTrue(response.getBlockLength() == data.length);
    xceiverClientManager.releaseClient(client, false);
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) BlockID(org.apache.hadoop.hdds.client.BlockID) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Test(org.junit.Test)

Example 10 with ContainerWithPipeline

use of org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline in project ozone by apache.

the class ContainerOperationClient method deleteContainer.

/**
 * Delete the container, this will release any resource it uses.
 *
 * @param containerID - containerID.
 * @param force       - True to forcibly delete the container.
 * @throws IOException
 */
@Override
public void deleteContainer(long containerID, boolean force) throws IOException {
    ContainerWithPipeline info = getContainerWithPipeline(containerID);
    deleteContainer(containerID, info.getPipeline(), force);
}
Also used : ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline)

Aggregations

ContainerWithPipeline (org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline)76 Test (org.junit.Test)45 ContainerInfo (org.apache.hadoop.hdds.scm.container.ContainerInfo)27 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)25 XceiverClientSpi (org.apache.hadoop.hdds.scm.XceiverClientSpi)21 IOException (java.io.IOException)17 BlockID (org.apache.hadoop.hdds.client.BlockID)16 ArrayList (java.util.ArrayList)15 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)13 XceiverClientManager (org.apache.hadoop.hdds.scm.XceiverClientManager)13 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)12 ContainerID (org.apache.hadoop.hdds.scm.container.ContainerID)12 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)10 XceiverClientReply (org.apache.hadoop.hdds.scm.XceiverClientReply)6 Map (java.util.Map)5 HddsProtos (org.apache.hadoop.hdds.protocol.proto.HddsProtos)5 XceiverClientRatis (org.apache.hadoop.hdds.scm.XceiverClientRatis)5 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)5 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)5 StorageContainerServiceProvider (org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider)5