use of org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException in project ozone by apache.
the class FilePerChunkStrategy method writeChunk.
/**
* writes a given chunk.
*
* @param container - Container for the chunk
* @param blockID - ID of the block
* @param info - ChunkInfo
* @param data - data of the chunk
* @param dispatcherContext - dispatcherContextInfo
* @throws StorageContainerException
*/
@Override
public void writeChunk(Container container, BlockID blockID, ChunkInfo info, ChunkBuffer data, DispatcherContext dispatcherContext) throws StorageContainerException {
checkLayoutVersion(container);
Preconditions.checkNotNull(dispatcherContext);
DispatcherContext.WriteChunkStage stage = dispatcherContext.getStage();
try {
KeyValueContainer kvContainer = (KeyValueContainer) container;
KeyValueContainerData containerData = kvContainer.getContainerData();
HddsVolume volume = containerData.getVolume();
File chunkFile = getChunkFile(kvContainer, blockID, info);
boolean isOverwrite = ChunkUtils.validateChunkForOverwrite(chunkFile, info);
File tmpChunkFile = getTmpChunkFile(chunkFile, dispatcherContext);
if (LOG.isDebugEnabled()) {
LOG.debug("writing chunk:{} chunk stage:{} chunk file:{} tmp chunk file:{}", info.getChunkName(), stage, chunkFile, tmpChunkFile);
}
long len = info.getLen();
// ignore offset in chunk info
long offset = 0;
switch(stage) {
case WRITE_DATA:
if (isOverwrite) {
// if the actual chunk file already exists here while writing the temp
// chunk file, then it means the same ozone client request has
// generated two raft log entries. This can happen either because
// retryCache expired in Ratis (or log index mismatch/corruption in
// Ratis). This can be solved by two approaches as of now:
// 1. Read the complete data in the actual chunk file ,
// verify the data integrity and in case it mismatches , either
// 2. Delete the chunk File and write the chunk again. For now,
// let's rewrite the chunk file
// TODO: once the checksum support for write chunks gets plugged in,
// the checksum needs to be verified for the actual chunk file and
// the data to be written here which should be efficient and
// it matches we can safely return without rewriting.
LOG.warn("ChunkFile already exists {}. Deleting it.", chunkFile);
FileUtil.fullyDelete(chunkFile);
}
if (tmpChunkFile.exists()) {
// If the tmp chunk file already exists it means the raft log got
// appended, but later on the log entry got truncated in Ratis leaving
// behind garbage.
// TODO: once the checksum support for data chunks gets plugged in,
// instead of rewriting the chunk here, let's compare the checkSums
LOG.warn("tmpChunkFile already exists {}. Overwriting it.", tmpChunkFile);
}
// Initially writes to temporary chunk file.
ChunkUtils.writeData(tmpChunkFile, data, offset, len, volume, doSyncWrite);
// committed here.
break;
case COMMIT_DATA:
// to actual chunk file.
if (isOverwrite) {
// if the actual chunk file already exists , it implies the write
// chunk transaction in the containerStateMachine is getting
// reapplied. This can happen when a node restarts.
// TODO: verify the checkSums for the existing chunkFile and the
// chunkInfo to be committed here
LOG.warn("ChunkFile already exists {}", chunkFile);
return;
}
// While committing a chunk , just rename the tmp chunk file which has
// the same term and log index appended as the current transaction
commitChunk(tmpChunkFile, chunkFile);
// Increment container stats here, as we commit the data.
containerData.updateWriteStats(len, isOverwrite);
break;
case COMBINED:
// directly write to the chunk file
ChunkUtils.writeData(chunkFile, data, offset, len, volume, doSyncWrite);
containerData.updateWriteStats(len, isOverwrite);
break;
default:
throw new IOException("Can not identify write operation.");
}
} catch (StorageContainerException ex) {
throw ex;
} catch (IOException ex) {
throw new StorageContainerException("Internal error: ", ex, IO_EXCEPTION);
}
}
use of org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException in project ozone by apache.
the class TestContainerPersistence method testUpdateContainer.
/**
* Tries to update an existing and non-existing container. Verifies container
* map and persistent data both updated.
*
* @throws IOException
*/
@Test
public void testUpdateContainer() throws IOException {
long testContainerID = ContainerTestHelper.getTestContainerID();
KeyValueContainer container = (KeyValueContainer) addContainer(containerSet, testContainerID);
File orgContainerFile = container.getContainerFile();
Assert.assertTrue(orgContainerFile.exists());
Map<String, String> newMetadata = Maps.newHashMap();
newMetadata.put("VOLUME", "shire_new");
newMetadata.put("owner", "bilbo_new");
container.update(newMetadata, false);
Assert.assertEquals(1, containerSet.getContainerMapCopy().size());
Assert.assertTrue(containerSet.getContainerMapCopy().containsKey(testContainerID));
// Verify in-memory map
KeyValueContainerData actualNewData = (KeyValueContainerData) containerSet.getContainer(testContainerID).getContainerData();
Assert.assertEquals("shire_new", actualNewData.getMetadata().get("VOLUME"));
Assert.assertEquals("bilbo_new", actualNewData.getMetadata().get("owner"));
// Verify container data on disk
File containerBaseDir = new File(actualNewData.getMetadataPath()).getParentFile();
File newContainerFile = ContainerUtils.getContainerFile(containerBaseDir);
Assert.assertTrue("Container file should exist.", newContainerFile.exists());
Assert.assertEquals("Container file should be in same location.", orgContainerFile.getAbsolutePath(), newContainerFile.getAbsolutePath());
ContainerData actualContainerData = ContainerDataYaml.readContainerFile(newContainerFile);
Assert.assertEquals("shire_new", actualContainerData.getMetadata().get("VOLUME"));
Assert.assertEquals("bilbo_new", actualContainerData.getMetadata().get("owner"));
// Test force update flag.
// Close the container and then try to update without force update flag.
container.close();
try {
container.update(newMetadata, false);
} catch (StorageContainerException ex) {
Assert.assertEquals("Updating a closed container without " + "force option is not allowed. ContainerID: " + testContainerID, ex.getMessage());
}
// Update with force flag, it should be success.
newMetadata.put("VOLUME", "shire_new_1");
newMetadata.put("owner", "bilbo_new_1");
container.update(newMetadata, true);
// Verify in-memory map
actualNewData = (KeyValueContainerData) containerSet.getContainer(testContainerID).getContainerData();
Assert.assertEquals("shire_new_1", actualNewData.getMetadata().get("VOLUME"));
Assert.assertEquals("bilbo_new_1", actualNewData.getMetadata().get("owner"));
}
use of org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException in project ozone by apache.
the class TestContainerPersistence method testPutBlockWithInvalidBCSId.
/**
* Tests a put block and read block with invalid bcsId.
*
* @throws IOException
* @throws NoSuchAlgorithmException
*/
@Test
public void testPutBlockWithInvalidBCSId() throws IOException, NoSuchAlgorithmException {
long testContainerID = getTestContainerID();
Container container = addContainer(containerSet, testContainerID);
BlockID blockID1 = ContainerTestHelper.getTestBlockID(testContainerID);
ChunkInfo info = writeChunkHelper(blockID1);
BlockData blockData = new BlockData(blockID1);
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
chunkList.add(info.getProtoBufMessage());
blockData.setChunks(chunkList);
blockData.setBlockCommitSequenceId(3);
blockManager.putBlock(container, blockData);
chunkList.clear();
// write a 2nd block
BlockID blockID2 = ContainerTestHelper.getTestBlockID(testContainerID);
info = writeChunkHelper(blockID2);
blockData = new BlockData(blockID2);
chunkList.add(info.getProtoBufMessage());
blockData.setChunks(chunkList);
blockData.setBlockCommitSequenceId(4);
blockManager.putBlock(container, blockData);
BlockData readBlockData;
try {
blockID1.setBlockCommitSequenceId(5);
// read with bcsId higher than container bcsId
blockManager.getBlock(container, blockID1);
Assert.fail("Expected exception not thrown");
} catch (StorageContainerException sce) {
Assert.assertTrue(sce.getResult() == UNKNOWN_BCSID);
}
try {
blockID1.setBlockCommitSequenceId(4);
// read with bcsId lower than container bcsId but greater than committed
// bcsId.
blockManager.getBlock(container, blockID1);
Assert.fail("Expected exception not thrown");
} catch (StorageContainerException sce) {
Assert.assertTrue(sce.getResult() == BCSID_MISMATCH);
}
readBlockData = blockManager.getBlock(container, blockData.getBlockID());
ChunkInfo readChunk = ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(0));
Assert.assertEquals(info.getChecksumData(), readChunk.getChecksumData());
}
use of org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException in project ozone by apache.
the class TestChunkUtils method readMissingFile.
@Test
public void readMissingFile() throws Exception {
// given
int len = 123;
int offset = 0;
File nonExistentFile = new File("nosuchfile");
ByteBuffer[] bufs = BufferUtils.assignByteBuffers(len, len);
// when
StorageContainerException e = LambdaTestUtils.intercept(StorageContainerException.class, () -> ChunkUtils.readData(nonExistentFile, bufs, offset, len, null));
// then
Assert.assertEquals(UNABLE_TO_FIND_CHUNK, e.getResult());
}
use of org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException in project ozone by apache.
the class TestBlockInputStream method testRefreshOnReadFailure.
@Test
public void testRefreshOnReadFailure() throws Exception {
// GIVEN
BlockID blockID = new BlockID(new ContainerBlockID(1, 1));
Pipeline pipeline = MockPipeline.createSingleNodePipeline();
Pipeline newPipeline = MockPipeline.createSingleNodePipeline();
final int len = 200;
final ChunkInputStream stream = mock(ChunkInputStream.class);
when(stream.read(any(), anyInt(), anyInt())).thenThrow(new StorageContainerException("test", CONTAINER_NOT_FOUND)).thenReturn(len);
when(stream.getRemaining()).thenReturn((long) len);
when(refreshPipeline.apply(blockID)).thenReturn(newPipeline);
BlockInputStream subject = new DummyBlockInputStream(blockID, blockSize, pipeline, null, false, null, refreshPipeline, chunks, null) {
@Override
protected ChunkInputStream createChunkInputStream(ChunkInfo chunkInfo) {
return stream;
}
};
try {
subject.initialize();
// WHEN
byte[] b = new byte[len];
int bytesRead = subject.read(b, 0, len);
// THEN
Assert.assertEquals(len, bytesRead);
verify(refreshPipeline).apply(blockID);
} finally {
subject.close();
}
}
Aggregations