use of org.apache.hadoop.ozone.container.common.volume.VolumeSet in project ozone by apache.
the class ChunkManagerDiskWrite method call.
@Override
public Void call() throws Exception {
try {
init();
OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
VolumeSet volumeSet = new MutableVolumeSet("dnid", "clusterid", ozoneConfiguration, null, StorageVolume.VolumeType.DATA_VOLUME, null);
Random random = new Random();
VolumeChoosingPolicy volumeChoicePolicy = new RoundRobinVolumeChoosingPolicy();
final int threadCount = getThreadNo();
// create a dedicated (NEW) container for each thread
for (int i = 1; i <= threadCount; i++) {
// use a non-negative container id
long containerId = random.nextLong() & 0x0F_FF_FF_FF_FF_FF_FF_FFL;
KeyValueContainerData keyValueContainerData = new KeyValueContainerData(containerId, containerLayout, 1_000_000L, getPrefix(), "nodeid");
KeyValueContainer keyValueContainer = new KeyValueContainer(keyValueContainerData, ozoneConfiguration);
keyValueContainer.create(volumeSet, volumeChoicePolicy, "scmid");
containersPerThread.put(i, keyValueContainer);
}
blockSize = chunkSize * chunksPerBlock;
data = randomAscii(chunkSize).getBytes(UTF_8);
chunkManager = ChunkManagerFactory.createChunkManager(ozoneConfiguration, null, null);
timer = getMetrics().timer("chunk-write");
LOG.info("Running chunk write test: threads={} chunkSize={} " + "chunksPerBlock={} layout={}", threadCount, chunkSize, chunksPerBlock, containerLayout);
runTests(this::writeChunk);
} finally {
if (chunkManager != null) {
chunkManager.shutdown();
}
}
return null;
}
use of org.apache.hadoop.ozone.container.common.volume.VolumeSet in project ozone by apache.
the class TestHddsDispatcher method createDispatcher.
/**
* Creates HddsDispatcher instance with given infos.
* @param dd datanode detail info.
* @param scmId UUID of scm id.
* @param conf configuration be used.
* @return HddsDispatcher HddsDispatcher instance.
* @throws IOException
*/
private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException {
ContainerSet containerSet = new ContainerSet();
VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
Mockito.when(context.getParent()).thenReturn(stateMachine);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerType containerType : ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, NO_OP_ICR_SENDER));
}
HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
hddsDispatcher.setClusterId(scmId.toString());
return hddsDispatcher;
}
use of org.apache.hadoop.ozone.container.common.volume.VolumeSet in project ozone by apache.
the class AbstractTestChunkManager method setUp.
@Before
public final void setUp() throws Exception {
OzoneConfiguration config = new OzoneConfiguration();
getStrategy().updateConfig(config);
UUID datanodeId = UUID.randomUUID();
hddsVolume = new HddsVolume.Builder(folder.getRoot().getAbsolutePath()).conf(config).datanodeUuid(datanodeId.toString()).build();
VolumeSet volumeSet = mock(MutableVolumeSet.class);
RoundRobinVolumeChoosingPolicy volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())).thenReturn(hddsVolume);
keyValueContainerData = new KeyValueContainerData(1L, ContainerLayoutVersion.getConfiguredVersion(config), (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), datanodeId.toString());
keyValueContainer = new KeyValueContainer(keyValueContainerData, config);
keyValueContainer.create(volumeSet, volumeChoosingPolicy, UUID.randomUUID().toString());
header = "my header".getBytes(UTF_8);
byte[] bytes = "testing write chunks".getBytes(UTF_8);
data = ByteBuffer.allocate(header.length + bytes.length).put(header).put(bytes);
rewindBufferToDataStart();
// Creating BlockData
blockID = new BlockID(1L, 1L);
chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID.getLocalID(), 0), 0, bytes.length);
}
use of org.apache.hadoop.ozone.container.common.volume.VolumeSet in project ozone by apache.
the class TestKeyValueHandler method testDeleteContainer.
@Test
public void testDeleteContainer() throws IOException {
final String testDir = GenericTestUtils.getTempPath(TestKeyValueHandler.class.getSimpleName() + "-" + UUID.randomUUID().toString());
try {
final long containerID = 1L;
final ConfigurationSource conf = new OzoneConfiguration();
final ContainerSet containerSet = new ContainerSet();
final VolumeSet volumeSet = Mockito.mock(VolumeSet.class);
Mockito.when(volumeSet.getVolumesList()).thenReturn(Collections.singletonList(new HddsVolume.Builder(testDir).conf(conf).build()));
final int[] interval = new int[1];
interval[0] = 2;
final ContainerMetrics metrics = new ContainerMetrics(interval);
final AtomicInteger icrReceived = new AtomicInteger(0);
final KeyValueHandler kvHandler = new KeyValueHandler(conf, UUID.randomUUID().toString(), containerSet, volumeSet, metrics, c -> icrReceived.incrementAndGet());
kvHandler.setClusterID(UUID.randomUUID().toString());
final ContainerCommandRequestProto createContainer = ContainerCommandRequestProto.newBuilder().setCmdType(ContainerProtos.Type.CreateContainer).setDatanodeUuid(UUID.randomUUID().toString()).setCreateContainer(ContainerProtos.CreateContainerRequestProto.newBuilder().setContainerType(ContainerType.KeyValueContainer).build()).setContainerID(containerID).setPipelineID(UUID.randomUUID().toString()).build();
kvHandler.handleCreateContainer(createContainer, null);
Assert.assertEquals(1, icrReceived.get());
Assert.assertNotNull(containerSet.getContainer(containerID));
kvHandler.deleteContainer(containerSet.getContainer(containerID), true);
Assert.assertEquals(2, icrReceived.get());
Assert.assertNull(containerSet.getContainer(containerID));
} finally {
FileUtils.deleteDirectory(new File(testDir));
}
}
use of org.apache.hadoop.ozone.container.common.volume.VolumeSet in project ozone by apache.
the class TestContainerMetrics method testContainerMetrics.
@Test
public void testContainerMetrics() throws Exception {
XceiverServerGrpc server = null;
XceiverClientGrpc client = null;
long containerID = ContainerTestHelper.getTestContainerID();
String path = GenericTestUtils.getRandomizedTempPath();
try {
final int interval = 1;
Pipeline pipeline = MockPipeline.createSingleNodePipeline();
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
conf.setInt(DFSConfigKeysLegacy.DFS_METRICS_PERCENTILES_INTERVALS_KEY, interval);
DatanodeDetails datanodeDetails = randomDatanodeDetails();
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, path);
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, path);
VolumeSet volumeSet = new MutableVolumeSet(datanodeDetails.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
ContainerSet containerSet = new ContainerSet();
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
Mockito.when(context.getParent()).thenReturn(stateMachine);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, c -> {
}));
}
HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
dispatcher.setClusterId(UUID.randomUUID().toString());
server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher, null);
client = new XceiverClientGrpc(pipeline, conf);
server.start();
client.connect();
// Create container
ContainerCommandRequestProto request = ContainerTestHelper.getCreateContainerRequest(containerID, pipeline);
ContainerCommandResponseProto response = client.sendCommand(request);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
// Write Chunk
BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, 1024, null);
ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, 1024, null);
response = client.sendCommand(writeChunkRequest);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
// Read Chunk
ContainerProtos.ContainerCommandRequestProto readChunkRequest = ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest.getWriteChunk());
response = client.sendCommand(readChunkRequest);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
MetricsRecordBuilder containerMetrics = getMetrics("StorageContainerMetrics");
assertCounter("NumOps", 3L, containerMetrics);
assertCounter("numCreateContainer", 1L, containerMetrics);
assertCounter("numWriteChunk", 1L, containerMetrics);
assertCounter("numReadChunk", 1L, containerMetrics);
assertCounter("bytesWriteChunk", 1024L, containerMetrics);
assertCounter("bytesReadChunk", 1024L, containerMetrics);
String sec = interval + "s";
Thread.sleep((interval + 1) * 1000);
assertQuantileGauges("WriteChunkNanos" + sec, containerMetrics);
// Check VolumeIOStats metrics
List<HddsVolume> volumes = StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList());
HddsVolume hddsVolume = volumes.get(0);
MetricsRecordBuilder volumeIOMetrics = getMetrics(hddsVolume.getVolumeIOStats().getMetricsSourceName());
assertCounter("ReadBytes", 1024L, volumeIOMetrics);
assertCounter("ReadOpCount", 1L, volumeIOMetrics);
assertCounter("WriteBytes", 1024L, volumeIOMetrics);
assertCounter("WriteOpCount", 1L, volumeIOMetrics);
} finally {
if (client != null) {
client.close();
}
if (server != null) {
server.stop();
}
// clean up volume dir
File file = new File(path);
if (file.exists()) {
FileUtil.fullyDelete(file);
}
}
}
Aggregations