use of org.apache.hadoop.ozone.container.keyvalue.impl.FilePerBlockStrategy in project ozone by apache.
the class TestSchemaTwoBackwardsCompatibility method setup.
@Before
public void setup() throws Exception {
testRoot = tempFolder.newFolder();
conf = new OzoneConfiguration();
clusterID = UUID.randomUUID().toString();
datanodeUuid = UUID.randomUUID().toString();
// turn off schemaV3 first
ContainerTestUtils.disableSchemaV3(conf);
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testRoot.getAbsolutePath());
volumeSet = new MutableVolumeSet(datanodeUuid, clusterID, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
blockManager = new BlockManagerImpl(conf);
chunkManager = new FilePerBlockStrategy(true, blockManager, volumeSet);
containerSet = new ContainerSet();
keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, ContainerMetrics.create(conf), c -> {
});
ozoneContainer = mock(OzoneContainer.class);
when(ozoneContainer.getContainerSet()).thenReturn(containerSet);
when(ozoneContainer.getWriteChannel()).thenReturn(null);
ContainerDispatcher dispatcher = mock(ContainerDispatcher.class);
when(ozoneContainer.getDispatcher()).thenReturn(dispatcher);
when(dispatcher.getHandler(any())).thenReturn(keyValueHandler);
}
use of org.apache.hadoop.ozone.container.keyvalue.impl.FilePerBlockStrategy in project ozone by apache.
the class TestBlockDeletingService method createToDeleteBlocks.
/**
* A helper method to create some blocks and put them under deletion
* state for testing. This method directly updates container.db and
* creates some fake chunk files for testing.
*/
private void createToDeleteBlocks(ContainerSet containerSet, int numOfContainers, int numOfBlocksPerContainer, int numOfChunksPerBlock) throws IOException {
ChunkManager chunkManager;
if (layout == FILE_PER_BLOCK) {
chunkManager = new FilePerBlockStrategy(true, null, null);
} else {
chunkManager = new FilePerChunkStrategy(true, null, null);
}
byte[] arr = randomAlphanumeric(1048576).getBytes(UTF_8);
ChunkBuffer buffer = ChunkBuffer.wrap(ByteBuffer.wrap(arr));
int txnID = 0;
for (int x = 0; x < numOfContainers; x++) {
long containerID = ContainerTestHelper.getTestContainerID();
KeyValueContainerData data = new KeyValueContainerData(containerID, layout, ContainerTestHelper.CONTAINER_MAX_SIZE, UUID.randomUUID().toString(), datanodeUuid);
data.closeContainer();
data.setSchemaVersion(schemaVersion);
KeyValueContainer container = new KeyValueContainer(data, conf);
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId);
containerSet.addContainer(container);
data = (KeyValueContainerData) containerSet.getContainer(containerID).getContainerData();
data.setSchemaVersion(schemaVersion);
if (schemaVersion.equals(SCHEMA_V1)) {
createPendingDeleteBlocksSchema1(numOfBlocksPerContainer, data, containerID, numOfChunksPerBlock, buffer, chunkManager, container);
} else if (schemaVersion.equals(SCHEMA_V2) || schemaVersion.equals(SCHEMA_V3)) {
createPendingDeleteBlocksViaTxn(numOfBlocksPerContainer, txnID, containerID, numOfChunksPerBlock, buffer, chunkManager, container, data);
} else {
throw new UnsupportedOperationException("Only schema version 1,2,3 are supported.");
}
}
}
Aggregations