use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.
the class TestOzoneContainer method addBlocks.
private long addBlocks(KeyValueContainer container, int blocks, int chunksPerBlock) throws Exception {
String strBlock = "block";
String strChunk = "-chunkFile";
int datalen = 65536;
long usedBytes = 0;
long freeBytes = container.getContainerData().getMaxSize();
long containerId = container.getContainerData().getContainerID();
ReferenceCountedDB db = BlockUtils.getDB(container.getContainerData(), conf);
Table<String, Long> metadataTable = db.getStore().getMetadataTable();
Table<String, BlockData> blockDataTable = db.getStore().getBlockDataTable();
for (int bi = 0; bi < blocks; bi++) {
// Creating BlockData
BlockID blockID = new BlockID(containerId, bi);
BlockData blockData = new BlockData(blockID);
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
chunkList.clear();
for (int ci = 0; ci < chunksPerBlock; ci++) {
String chunkName = strBlock + bi + strChunk + ci;
long offset = ci * (long) datalen;
ChunkInfo info = new ChunkInfo(chunkName, offset, datalen);
usedBytes += datalen;
chunkList.add(info.getProtoBufMessage());
}
blockData.setChunks(chunkList);
blockDataTable.put(Long.toString(blockID.getLocalID()), blockData);
}
// Set Block count and used bytes.
metadataTable.put(OzoneConsts.BLOCK_COUNT, (long) blocks);
metadataTable.put(OzoneConsts.CONTAINER_BYTES_USED, usedBytes);
// remaining available capacity of the container
db.close();
return (freeBytes - usedBytes);
}
use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.
the class TestKeyValueContainerIntegrityChecks method createContainerWithBlocks.
/**
* Creates a container with normal and deleted blocks.
* First it will insert normal blocks, and then it will insert
* deleted blocks.
*/
protected KeyValueContainer createContainerWithBlocks(long containerId, int normalBlocks, int deletedBlocks) throws Exception {
String strBlock = "block";
String strChunk = "-chunkFile";
long totalBlocks = normalBlocks + deletedBlocks;
int bytesPerChecksum = 2 * UNIT_LEN;
Checksum checksum = new Checksum(ContainerProtos.ChecksumType.SHA256, bytesPerChecksum);
byte[] chunkData = RandomStringUtils.randomAscii(CHUNK_LEN).getBytes(UTF_8);
ChecksumData checksumData = checksum.computeChecksum(chunkData);
DispatcherContext writeStage = new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build();
DispatcherContext commitStage = new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build();
KeyValueContainerData containerData = new KeyValueContainerData(containerId, containerLayoutTestInfo.getLayout(), (long) CHUNKS_PER_BLOCK * CHUNK_LEN * totalBlocks, UUID.randomUUID().toString(), UUID.randomUUID().toString());
KeyValueContainer container = new KeyValueContainer(containerData, conf);
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString());
try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData, conf)) {
assertNotNull(containerData.getChunksPath());
File chunksPath = new File(containerData.getChunksPath());
containerLayoutTestInfo.validateFileCount(chunksPath, 0, 0);
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
for (int i = 0; i < totalBlocks; i++) {
BlockID blockID = new BlockID(containerId, i);
BlockData blockData = new BlockData(blockID);
chunkList.clear();
for (long chunkCount = 0; chunkCount < CHUNKS_PER_BLOCK; chunkCount++) {
String chunkName = strBlock + i + strChunk + chunkCount;
long offset = chunkCount * CHUNK_LEN;
ChunkInfo info = new ChunkInfo(chunkName, offset, CHUNK_LEN);
info.setChecksumData(checksumData);
chunkList.add(info.getProtoBufMessage());
chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), writeStage);
chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), commitStage);
}
blockData.setChunks(chunkList);
// normal key
String key = Long.toString(blockID.getLocalID());
if (i >= normalBlocks) {
// deleted key
key = OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID();
}
metadataStore.getStore().getBlockDataTable().put(key, blockData);
}
containerLayoutTestInfo.validateFileCount(chunksPath, totalBlocks, totalBlocks * CHUNKS_PER_BLOCK);
}
return container;
}
use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.
the class TestContainerPersistence method testPutBlockWithInvalidBCSId.
/**
* Tests a put block and read block with invalid bcsId.
*
* @throws IOException
* @throws NoSuchAlgorithmException
*/
@Test
public void testPutBlockWithInvalidBCSId() throws IOException, NoSuchAlgorithmException {
long testContainerID = getTestContainerID();
Container container = addContainer(containerSet, testContainerID);
BlockID blockID1 = ContainerTestHelper.getTestBlockID(testContainerID);
ChunkInfo info = writeChunkHelper(blockID1);
BlockData blockData = new BlockData(blockID1);
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
chunkList.add(info.getProtoBufMessage());
blockData.setChunks(chunkList);
blockData.setBlockCommitSequenceId(3);
blockManager.putBlock(container, blockData);
chunkList.clear();
// write a 2nd block
BlockID blockID2 = ContainerTestHelper.getTestBlockID(testContainerID);
info = writeChunkHelper(blockID2);
blockData = new BlockData(blockID2);
chunkList.add(info.getProtoBufMessage());
blockData.setChunks(chunkList);
blockData.setBlockCommitSequenceId(4);
blockManager.putBlock(container, blockData);
BlockData readBlockData;
try {
blockID1.setBlockCommitSequenceId(5);
// read with bcsId higher than container bcsId
blockManager.getBlock(container, blockID1);
Assert.fail("Expected exception not thrown");
} catch (StorageContainerException sce) {
Assert.assertTrue(sce.getResult() == UNKNOWN_BCSID);
}
try {
blockID1.setBlockCommitSequenceId(4);
// read with bcsId lower than container bcsId but greater than committed
// bcsId.
blockManager.getBlock(container, blockID1);
Assert.fail("Expected exception not thrown");
} catch (StorageContainerException sce) {
Assert.assertTrue(sce.getResult() == BCSID_MISMATCH);
}
readBlockData = blockManager.getBlock(container, blockData.getBlockID());
ChunkInfo readChunk = ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(0));
Assert.assertEquals(info.getChecksumData(), readChunk.getChecksumData());
}
use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.
the class TestFilePerChunkStrategy method deletesChunkFileWithLengthIncludingOffset.
/**
* Tests that "new datanode" can delete chunks written to "old
* datanode" by "new client" (ie. where chunk file accidentally created with
* {@code size = chunk offset + chunk length}, instead of only chunk length).
*/
@Test
public void deletesChunkFileWithLengthIncludingOffset() throws Exception {
// GIVEN
ChunkManager chunkManager = createTestSubject();
KeyValueContainer container = getKeyValueContainer();
BlockID blockID = getBlockID();
ChunkInfo chunkInfo = getChunkInfo();
long offset = 1024;
ChunkInfo oldDatanodeChunkInfo = new ChunkInfo(chunkInfo.getChunkName(), offset, chunkInfo.getLen());
File file = ContainerLayoutVersion.FILE_PER_CHUNK.getChunkFile(container.getContainerData(), blockID, chunkInfo);
ChunkUtils.writeData(file, ChunkBuffer.wrap(getData()), offset, chunkInfo.getLen(), null, true);
checkChunkFileCount(1);
assertTrue(file.exists());
assertEquals(offset + chunkInfo.getLen(), file.length());
// WHEN
chunkManager.deleteChunk(container, blockID, oldDatanodeChunkInfo);
// THEN
checkChunkFileCount(0);
assertFalse(file.exists());
}
use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.
the class AbstractTestChunkManager method setUp.
@Before
public final void setUp() throws Exception {
OzoneConfiguration config = new OzoneConfiguration();
getStrategy().updateConfig(config);
UUID datanodeId = UUID.randomUUID();
hddsVolume = new HddsVolume.Builder(folder.getRoot().getAbsolutePath()).conf(config).datanodeUuid(datanodeId.toString()).build();
VolumeSet volumeSet = mock(MutableVolumeSet.class);
RoundRobinVolumeChoosingPolicy volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())).thenReturn(hddsVolume);
keyValueContainerData = new KeyValueContainerData(1L, ContainerLayoutVersion.getConfiguredVersion(config), (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), datanodeId.toString());
keyValueContainer = new KeyValueContainer(keyValueContainerData, config);
keyValueContainer.create(volumeSet, volumeChoosingPolicy, UUID.randomUUID().toString());
header = "my header".getBytes(UTF_8);
byte[] bytes = "testing write chunks".getBytes(UTF_8);
data = ByteBuffer.allocate(header.length + bytes.length).put(header).put(bytes);
rewindBufferToDataStart();
// Creating BlockData
blockID = new BlockID(1L, 1L);
chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID.getLocalID(), 0), 0, bytes.length);
}
Aggregations