use of org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy in project ozone by apache.
the class ChunkManagerDiskWrite method call.
@Override
public Void call() throws Exception {
try {
init();
OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
VolumeSet volumeSet = new MutableVolumeSet("dnid", "clusterid", ozoneConfiguration, null, StorageVolume.VolumeType.DATA_VOLUME, null);
Random random = new Random();
VolumeChoosingPolicy volumeChoicePolicy = new RoundRobinVolumeChoosingPolicy();
final int threadCount = getThreadNo();
// create a dedicated (NEW) container for each thread
for (int i = 1; i <= threadCount; i++) {
// use a non-negative container id
long containerId = random.nextLong() & 0x0F_FF_FF_FF_FF_FF_FF_FFL;
KeyValueContainerData keyValueContainerData = new KeyValueContainerData(containerId, containerLayout, 1_000_000L, getPrefix(), "nodeid");
KeyValueContainer keyValueContainer = new KeyValueContainer(keyValueContainerData, ozoneConfiguration);
keyValueContainer.create(volumeSet, volumeChoicePolicy, "scmid");
containersPerThread.put(i, keyValueContainer);
}
blockSize = chunkSize * chunksPerBlock;
data = randomAscii(chunkSize).getBytes(UTF_8);
chunkManager = ChunkManagerFactory.createChunkManager(ozoneConfiguration, null, null);
timer = getMetrics().timer("chunk-write");
LOG.info("Running chunk write test: threads={} chunkSize={} " + "chunksPerBlock={} layout={}", threadCount, chunkSize, chunksPerBlock, containerLayout);
runTests(this::writeChunk);
} finally {
if (chunkManager != null) {
chunkManager.shutdown();
}
}
return null;
}
Aggregations