use of org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry in project ozone by apache.
the class TestDiscardPreallocatedBlocks method testDiscardPreallocatedBlocks.
@Test
public void testDiscardPreallocatedBlocks() throws Exception {
String keyName = getKeyName();
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 2 * blockSize);
KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
// With the initial size provided, it should have pre allocated 2 blocks
Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
long containerID1 = keyOutputStream.getStreamEntries().get(0).getBlockID().getContainerID();
long containerID2 = keyOutputStream.getStreamEntries().get(1).getBlockID().getContainerID();
Assert.assertEquals(containerID1, containerID2);
String dataString = ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
byte[] data = dataString.getBytes(UTF_8);
key.write(data);
List<OmKeyLocationInfo> locationInfos = new ArrayList<>(keyOutputStream.getLocationInfoList());
List<BlockOutputStreamEntry> locationStreamInfos = new ArrayList<>(keyOutputStream.getStreamEntries());
long containerID = locationInfos.get(0).getContainerID();
ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
List<DatanodeDetails> datanodes = pipeline.getNodes();
Assert.assertEquals(3, datanodes.size());
waitForContainerClose(key);
dataString = ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
data = dataString.getBytes(UTF_8);
key.write(data);
Assert.assertEquals(3, keyOutputStream.getStreamEntries().size());
// the 1st block got written. Now all the containers are closed, so the 2nd
// pre allocated block will be removed from the list and new block should
// have been allocated
Assert.assertTrue(keyOutputStream.getLocationInfoList().get(0).getBlockID().equals(locationInfos.get(0).getBlockID()));
Assert.assertFalse(locationStreamInfos.get(1).getBlockID().equals(keyOutputStream.getLocationInfoList().get(1).getBlockID()));
key.close();
}
use of org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry in project ozone by apache.
the class TestFailureHandlingByClientFlushDelay method testPipelineExclusionWithPipelineFailure.
@Test
public void testPipelineExclusionWithPipelineFailure() throws Exception {
startCluster();
String keyName = UUID.randomUUID().toString();
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, blockSize);
String data = ContainerTestHelper.getFixedLengthString(keyString, chunkSize);
// get the name of a valid container
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
List<BlockOutputStreamEntry> streamEntryList = keyOutputStream.getStreamEntries();
// Assert that 1 block will be preallocated
Assert.assertEquals(1, streamEntryList.size());
key.write(data.getBytes(UTF_8));
key.flush();
long containerId = streamEntryList.get(0).getBlockID().getContainerID();
BlockID blockId = streamEntryList.get(0).getBlockID();
ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerId));
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
List<DatanodeDetails> datanodes = pipeline.getNodes();
// Two nodes, next write will hit AlreadyClosedException , the pipeline
// will be added in the exclude list
cluster.shutdownHddsDatanode(datanodes.get(0));
cluster.shutdownHddsDatanode(datanodes.get(1));
key.write(data.getBytes(UTF_8));
key.flush();
Assert.assertTrue(keyOutputStream.getExcludeList().getContainerIds().isEmpty());
Assert.assertTrue(keyOutputStream.getExcludeList().getDatanodes().isEmpty());
Assert.assertTrue(keyOutputStream.getExcludeList().getDatanodes().isEmpty());
key.write(data.getBytes(UTF_8));
// The close will just write to the buffer
key.close();
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)).setKeyName(keyName).setRefreshPipeline(true).build();
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
// Make sure a new block is written
Assert.assertNotEquals(keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0).getBlockID(), blockId);
Assert.assertEquals(3 * data.getBytes(UTF_8).length, keyInfo.getDataSize());
validateData(keyName, data.concat(data).concat(data).getBytes(UTF_8));
}
use of org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry in project ozone by apache.
the class TestMultiBlockWritesWithDnFailures method testMultiBlockWritesWithIntermittentDnFailures.
@Test
public void testMultiBlockWritesWithIntermittentDnFailures() throws Exception {
startCluster(10);
String keyName = UUID.randomUUID().toString();
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 6 * blockSize);
String data = ContainerTestHelper.getFixedLengthString(keyString, blockSize + chunkSize);
key.write(data.getBytes(UTF_8));
// get the name of a valid container
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
List<BlockOutputStreamEntry> streamEntryList = keyOutputStream.getStreamEntries();
// Assert that 6 block will be preallocated
Assert.assertEquals(6, streamEntryList.size());
key.write(data.getBytes(UTF_8));
key.flush();
long containerId = streamEntryList.get(0).getBlockID().getContainerID();
ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerId));
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
List<DatanodeDetails> datanodes = pipeline.getNodes();
cluster.shutdownHddsDatanode(datanodes.get(0));
// The write will fail but exception will be handled and length will be
// updated correctly in OzoneManager once the steam is closed
key.write(data.getBytes(UTF_8));
// shutdown the second datanode
cluster.shutdownHddsDatanode(datanodes.get(1));
key.write(data.getBytes(UTF_8));
key.close();
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)).setKeyName(keyName).setRefreshPipeline(true).build();
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
Assert.assertEquals(4 * data.getBytes(UTF_8).length, keyInfo.getDataSize());
validateData(keyName, data.concat(data).concat(data).concat(data).getBytes(UTF_8));
}
use of org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry in project ozone by apache.
the class TestOzoneClientRetriesOnExceptions method testMaxRetriesByOzoneClient.
@Test
public void testMaxRetriesByOzoneClient() throws Exception {
String keyName = getKeyName();
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, (MAX_RETRIES + 1) * blockSize);
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
List<BlockOutputStreamEntry> entries = keyOutputStream.getStreamEntries();
Assert.assertEquals((MAX_RETRIES + 1), keyOutputStream.getStreamEntries().size());
int dataLength = maxFlushSize + 50;
// write data more than 1 chunk
byte[] data1 = ContainerTestHelper.getFixedLengthString(keyString, dataLength).getBytes(UTF_8);
long containerID;
List<Long> containerList = new ArrayList<>();
for (BlockOutputStreamEntry entry : entries) {
containerID = entry.getBlockID().getContainerID();
ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
XceiverClientSpi xceiverClient = xceiverClientManager.acquireClient(pipeline);
Assume.assumeFalse(containerList.contains(containerID));
containerList.add(containerID);
xceiverClient.sendCommand(ContainerTestHelper.getCreateContainerRequest(containerID, pipeline));
xceiverClientManager.releaseClient(xceiverClient, false);
}
key.write(data1);
OutputStream stream = entries.get(0).getOutputStream();
Assert.assertTrue(stream instanceof BlockOutputStream);
BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
TestHelper.waitForContainerClose(key, cluster);
// Ensure that blocks for the key have been allocated to at least N+1
// containers so that write request will be tried on N+1 different blocks
// of N+1 different containers and it will finally fail as it will hit
// the max retry count of N.
Assume.assumeTrue(containerList.size() + " <= " + MAX_RETRIES, containerList.size() > MAX_RETRIES);
try {
key.write(data1);
// ensure that write is flushed to dn
key.flush();
Assert.fail("Expected exception not thrown");
} catch (IOException ioe) {
Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream.getIoException()) instanceof ContainerNotOpenException);
Assert.assertTrue(ioe.getMessage().contains("Retry request failed. " + "retries get failed due to exceeded maximum " + "allowed retries number: " + MAX_RETRIES));
}
try {
key.flush();
Assert.fail("Expected exception not thrown");
} catch (IOException ioe) {
Assert.assertTrue(ioe.getMessage().contains("Stream is closed"));
}
try {
key.close();
} catch (IOException ioe) {
Assert.fail("Expected should not be thrown");
}
}
use of org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry in project ozone by apache.
the class TestHelper method waitForPipelineClose.
public static void waitForPipelineClose(OzoneOutputStream outputStream, MiniOzoneCluster cluster, boolean waitForContainerCreation) throws Exception {
KeyOutputStream keyOutputStream = (KeyOutputStream) outputStream.getOutputStream();
List<BlockOutputStreamEntry> streamEntryList = keyOutputStream.getStreamEntries();
List<Long> containerIdList = new ArrayList<>();
for (BlockOutputStreamEntry entry : streamEntryList) {
long id = entry.getBlockID().getContainerID();
if (!containerIdList.contains(id)) {
containerIdList.add(id);
}
}
Assert.assertTrue(!containerIdList.isEmpty());
waitForPipelineClose(cluster, waitForContainerCreation, containerIdList.toArray(new Long[0]));
}
Aggregations