use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.
the class TestOzoneContainer method addBlocks.
private long addBlocks(KeyValueContainer container, int blocks, int chunksPerBlock) throws Exception {
String strBlock = "block";
String strChunk = "-chunkFile";
int datalen = 65536;
long usedBytes = 0;
long freeBytes = container.getContainerData().getMaxSize();
long containerId = container.getContainerData().getContainerID();
ReferenceCountedDB db = BlockUtils.getDB(container.getContainerData(), conf);
Table<String, Long> metadataTable = db.getStore().getMetadataTable();
Table<String, BlockData> blockDataTable = db.getStore().getBlockDataTable();
for (int bi = 0; bi < blocks; bi++) {
// Creating BlockData
BlockID blockID = new BlockID(containerId, bi);
BlockData blockData = new BlockData(blockID);
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
chunkList.clear();
for (int ci = 0; ci < chunksPerBlock; ci++) {
String chunkName = strBlock + bi + strChunk + ci;
long offset = ci * (long) datalen;
ChunkInfo info = new ChunkInfo(chunkName, offset, datalen);
usedBytes += datalen;
chunkList.add(info.getProtoBufMessage());
}
blockData.setChunks(chunkList);
blockDataTable.put(Long.toString(blockID.getLocalID()), blockData);
}
// Set Block count and used bytes.
metadataTable.put(OzoneConsts.BLOCK_COUNT, (long) blocks);
metadataTable.put(OzoneConsts.CONTAINER_BYTES_USED, usedBytes);
// remaining available capacity of the container
db.close();
return (freeBytes - usedBytes);
}
use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.
the class TestKeyValueContainer method populate.
/**
* Add some keys to the container.
*/
private void populate(long numberOfKeysToWrite) throws IOException {
try (ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer.getContainerData(), CONF)) {
Table<String, BlockData> blockDataTable = metadataStore.getStore().getBlockDataTable();
for (long i = 0; i < numberOfKeysToWrite; i++) {
blockDataTable.put("test" + i, new BlockData(new BlockID(i, i)));
}
// As now when we put blocks, we increment block count and update in DB.
// As for test, we are doing manually so adding key count to DB.
metadataStore.getStore().getMetadataTable().put(OzoneConsts.BLOCK_COUNT, numberOfKeysToWrite);
}
Map<String, String> metadata = new HashMap<>();
metadata.put("key1", "value1");
keyValueContainer.update(metadata, true);
}
use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.
the class TestKeyValueContainerIntegrityChecks method createContainerWithBlocks.
/**
* Creates a container with normal and deleted blocks.
* First it will insert normal blocks, and then it will insert
* deleted blocks.
*/
protected KeyValueContainer createContainerWithBlocks(long containerId, int normalBlocks, int deletedBlocks) throws Exception {
String strBlock = "block";
String strChunk = "-chunkFile";
long totalBlocks = normalBlocks + deletedBlocks;
int bytesPerChecksum = 2 * UNIT_LEN;
Checksum checksum = new Checksum(ContainerProtos.ChecksumType.SHA256, bytesPerChecksum);
byte[] chunkData = RandomStringUtils.randomAscii(CHUNK_LEN).getBytes(UTF_8);
ChecksumData checksumData = checksum.computeChecksum(chunkData);
DispatcherContext writeStage = new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build();
DispatcherContext commitStage = new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build();
KeyValueContainerData containerData = new KeyValueContainerData(containerId, containerLayoutTestInfo.getLayout(), (long) CHUNKS_PER_BLOCK * CHUNK_LEN * totalBlocks, UUID.randomUUID().toString(), UUID.randomUUID().toString());
KeyValueContainer container = new KeyValueContainer(containerData, conf);
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString());
try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData, conf)) {
assertNotNull(containerData.getChunksPath());
File chunksPath = new File(containerData.getChunksPath());
containerLayoutTestInfo.validateFileCount(chunksPath, 0, 0);
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
for (int i = 0; i < totalBlocks; i++) {
BlockID blockID = new BlockID(containerId, i);
BlockData blockData = new BlockData(blockID);
chunkList.clear();
for (long chunkCount = 0; chunkCount < CHUNKS_PER_BLOCK; chunkCount++) {
String chunkName = strBlock + i + strChunk + chunkCount;
long offset = chunkCount * CHUNK_LEN;
ChunkInfo info = new ChunkInfo(chunkName, offset, CHUNK_LEN);
info.setChecksumData(checksumData);
chunkList.add(info.getProtoBufMessage());
chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), writeStage);
chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), commitStage);
}
blockData.setChunks(chunkList);
// normal key
String key = Long.toString(blockID.getLocalID());
if (i >= normalBlocks) {
// deleted key
key = OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID();
}
metadataStore.getStore().getBlockDataTable().put(key, blockData);
}
containerLayoutTestInfo.validateFileCount(chunksPath, totalBlocks, totalBlocks * CHUNKS_PER_BLOCK);
}
return container;
}
use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.
the class TestContainerPersistence method testPutBlockWithInvalidBCSId.
/**
* Tests a put block and read block with invalid bcsId.
*
* @throws IOException
* @throws NoSuchAlgorithmException
*/
@Test
public void testPutBlockWithInvalidBCSId() throws IOException, NoSuchAlgorithmException {
long testContainerID = getTestContainerID();
Container container = addContainer(containerSet, testContainerID);
BlockID blockID1 = ContainerTestHelper.getTestBlockID(testContainerID);
ChunkInfo info = writeChunkHelper(blockID1);
BlockData blockData = new BlockData(blockID1);
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
chunkList.add(info.getProtoBufMessage());
blockData.setChunks(chunkList);
blockData.setBlockCommitSequenceId(3);
blockManager.putBlock(container, blockData);
chunkList.clear();
// write a 2nd block
BlockID blockID2 = ContainerTestHelper.getTestBlockID(testContainerID);
info = writeChunkHelper(blockID2);
blockData = new BlockData(blockID2);
chunkList.add(info.getProtoBufMessage());
blockData.setChunks(chunkList);
blockData.setBlockCommitSequenceId(4);
blockManager.putBlock(container, blockData);
BlockData readBlockData;
try {
blockID1.setBlockCommitSequenceId(5);
// read with bcsId higher than container bcsId
blockManager.getBlock(container, blockID1);
Assert.fail("Expected exception not thrown");
} catch (StorageContainerException sce) {
Assert.assertTrue(sce.getResult() == UNKNOWN_BCSID);
}
try {
blockID1.setBlockCommitSequenceId(4);
// read with bcsId lower than container bcsId but greater than committed
// bcsId.
blockManager.getBlock(container, blockID1);
Assert.fail("Expected exception not thrown");
} catch (StorageContainerException sce) {
Assert.assertTrue(sce.getResult() == BCSID_MISMATCH);
}
readBlockData = blockManager.getBlock(container, blockData.getBlockID());
ChunkInfo readChunk = ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(0));
Assert.assertEquals(info.getChecksumData(), readChunk.getChecksumData());
}
use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.
the class TestContainerReader method markBlocksForDelete.
private void markBlocksForDelete(KeyValueContainer keyValueContainer, boolean setMetaData, List<Long> blockNames, int count) throws Exception {
try (ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer.getContainerData(), conf)) {
for (int i = 0; i < count; i++) {
Table<String, BlockData> blockDataTable = metadataStore.getStore().getBlockDataTable();
String blk = Long.toString(blockNames.get(i));
BlockData blkInfo = blockDataTable.get(blk);
blockDataTable.delete(blk);
blockDataTable.put(OzoneConsts.DELETING_KEY_PREFIX + blk, blkInfo);
}
if (setMetaData) {
// Pending delete blocks are still counted towards the block count
// and bytes used metadata values, so those do not change.
Table<String, Long> metadataTable = metadataStore.getStore().getMetadataTable();
metadataTable.put(OzoneConsts.PENDING_DELETE_BLOCK_COUNT, (long) count);
}
}
}
Aggregations