use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData in project ozone by apache.
the class TestKeyValueContainerData method testKeyValueData.
@Test
public void testKeyValueData() {
long containerId = 1L;
ContainerProtos.ContainerType containerType = ContainerProtos.ContainerType.KeyValueContainer;
String path = "/tmp";
String containerDBType = "RocksDB";
ContainerProtos.ContainerDataProto.State state = ContainerProtos.ContainerDataProto.State.CLOSED;
AtomicLong val = new AtomicLong(0);
UUID pipelineId = UUID.randomUUID();
UUID datanodeId = UUID.randomUUID();
KeyValueContainerData kvData = new KeyValueContainerData(containerId, layout, MAXSIZE, pipelineId.toString(), datanodeId.toString());
assertEquals(containerType, kvData.getContainerType());
assertEquals(containerId, kvData.getContainerID());
assertEquals(ContainerProtos.ContainerDataProto.State.OPEN, kvData.getState());
assertEquals(0, kvData.getMetadata().size());
assertEquals(0, kvData.getNumPendingDeletionBlocks());
assertEquals(val.get(), kvData.getReadBytes());
assertEquals(val.get(), kvData.getWriteBytes());
assertEquals(val.get(), kvData.getReadCount());
assertEquals(val.get(), kvData.getWriteCount());
assertEquals(val.get(), kvData.getBlockCount());
assertEquals(val.get(), kvData.getNumPendingDeletionBlocks());
assertEquals(MAXSIZE, kvData.getMaxSize());
kvData.setState(state);
kvData.setContainerDBType(containerDBType);
kvData.setChunksPath(path);
kvData.setMetadataPath(path);
kvData.incrReadBytes(10);
kvData.incrWriteBytes(10);
kvData.incrReadCount();
kvData.incrWriteCount();
kvData.incrBlockCount();
kvData.incrPendingDeletionBlocks(1);
kvData.setSchemaVersion(VersionedDatanodeFeatures.SchemaV2.chooseSchemaVersion());
assertEquals(state, kvData.getState());
assertEquals(containerDBType, kvData.getContainerDBType());
assertEquals(path, kvData.getChunksPath());
assertEquals(path, kvData.getMetadataPath());
assertEquals(10, kvData.getReadBytes());
assertEquals(10, kvData.getWriteBytes());
assertEquals(1, kvData.getReadCount());
assertEquals(1, kvData.getWriteCount());
assertEquals(1, kvData.getBlockCount());
assertEquals(1, kvData.getNumPendingDeletionBlocks());
assertEquals(pipelineId.toString(), kvData.getOriginPipelineId());
assertEquals(datanodeId.toString(), kvData.getOriginNodeId());
assertEquals(VersionedDatanodeFeatures.SchemaV2.chooseSchemaVersion(), kvData.getSchemaVersion());
}
use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData in project ozone by apache.
the class TestOzoneContainer method testContainerCreateDiskFull.
@Test
public void testContainerCreateDiskFull() throws Exception {
long containerSize = (long) StorageUnit.MB.toBytes(100);
List<HddsVolume> volumes = StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList());
// Format the volumes
for (HddsVolume volume : volumes) {
volume.format(UUID.randomUUID().toString());
// eat up all available space except size of 1 container
volume.incCommittedBytes(volume.getAvailable() - containerSize);
// eat up 10 bytes more, now available space is less than 1 container
volume.incCommittedBytes(10);
}
keyValueContainerData = new KeyValueContainerData(99, layout, containerSize, UUID.randomUUID().toString(), datanodeDetails.getUuidString());
keyValueContainer = new KeyValueContainer(keyValueContainerData, conf);
// we expect an out of space Exception
StorageContainerException e = LambdaTestUtils.intercept(StorageContainerException.class, () -> keyValueContainer.create(volumeSet, volumeChoosingPolicy, clusterId));
if (!DISK_OUT_OF_SPACE.equals(e.getResult())) {
LOG.info("Unexpected error during container creation", e);
}
assertEquals(DISK_OUT_OF_SPACE, e.getResult());
}
use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData in project ozone by apache.
the class TestOzoneContainer method testBuildContainerMap.
@Test
public void testBuildContainerMap() throws Exception {
// Format the volumes
List<HddsVolume> volumes = StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList());
for (HddsVolume volume : volumes) {
volume.format(clusterId);
commitSpaceMap.put(getVolumeKey(volume), Long.valueOf(0));
}
// Add containers to disk
for (int i = 0; i < numTestContainers; i++) {
long freeBytes = 0;
long volCommitBytes;
long maxCap = (long) StorageUnit.GB.toBytes(1);
HddsVolume myVolume;
keyValueContainerData = new KeyValueContainerData(i, layout, maxCap, UUID.randomUUID().toString(), datanodeDetails.getUuidString());
keyValueContainer = new KeyValueContainer(keyValueContainerData, conf);
keyValueContainer.create(volumeSet, volumeChoosingPolicy, clusterId);
myVolume = keyValueContainer.getContainerData().getVolume();
freeBytes = addBlocks(keyValueContainer, 2, 3);
// update our expectation of volume committed space in the map
volCommitBytes = commitSpaceMap.get(getVolumeKey(myVolume)).longValue();
Preconditions.checkState(freeBytes >= 0);
commitSpaceMap.put(getVolumeKey(myVolume), Long.valueOf(volCommitBytes + freeBytes));
BlockUtils.removeDB(keyValueContainerData, conf);
}
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
Mockito.when(context.getParent()).thenReturn(stateMachine);
// When OzoneContainer is started, the containers from disk should be
// loaded into the containerSet.
// Also expected to initialize committed space for each volume.
OzoneContainer ozoneContainer = new OzoneContainer(datanodeDetails, conf, context, null);
ContainerSet containerset = ozoneContainer.getContainerSet();
assertEquals(numTestContainers, containerset.containerCount());
verifyCommittedSpace(ozoneContainer);
}
use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData in project ozone by apache.
the class TestStorageVolumeChecker method testVolumeDeletion.
/**
* Test {@link StorageVolumeChecker#checkAllVolumes} propagates
* checks for all volumes to the delegate checker.
*
* @throws Exception
*/
@Test
public void testVolumeDeletion() throws Exception {
LOG.info("Executing {}", testName.getMethodName());
DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class);
dnConf.setDiskCheckMinGap(Duration.ofMillis(0));
conf.setFromObject(dnConf);
DatanodeDetails datanodeDetails = ContainerTestUtils.createDatanodeDetails();
OzoneContainer ozoneContainer = ContainerTestUtils.getOzoneContainer(datanodeDetails, conf);
MutableVolumeSet volumeSet = ozoneContainer.getVolumeSet();
ContainerSet containerSet = ozoneContainer.getContainerSet();
StorageVolumeChecker volumeChecker = volumeSet.getVolumeChecker();
volumeChecker.setDelegateChecker(new DummyChecker());
File volParentDir = new File(folder.getRoot(), UUID.randomUUID().toString());
volumeSet.addVolume(volParentDir.getPath());
File volRootDir = new File(volParentDir, "hdds");
int i = 0;
for (ContainerDataProto.State state : ContainerDataProto.State.values()) {
if (!state.equals(ContainerDataProto.State.INVALID)) {
// add containers to the created volume
Container container = ContainerTestUtils.getContainer(++i, layout, state);
container.getContainerData().setVolume((HddsVolume) volumeSet.getVolumeMap().get(volRootDir.getPath()));
((KeyValueContainerData) container.getContainerData()).setMetadataPath(volParentDir.getPath());
containerSet.addContainer(container);
}
}
// delete the volume directory
FileUtils.deleteDirectory(volParentDir);
Assert.assertEquals(2, volumeSet.getVolumesList().size());
volumeSet.checkAllVolumes();
// failed volume should be removed from volumeSet volume list
Assert.assertEquals(1, volumeSet.getVolumesList().size());
Assert.assertEquals(1, volumeSet.getFailedVolumesList().size());
i = 0;
for (ContainerDataProto.State state : ContainerDataProto.State.values()) {
if (!state.equals(ContainerDataProto.State.INVALID)) {
Assert.assertEquals(ContainerDataProto.State.UNHEALTHY, containerSet.getContainer(++i).getContainerState());
}
}
ozoneContainer.stop();
}
use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData in project ozone by apache.
the class TestContainerDataYaml method testDisabledChecksum.
/**
* Test to verify disabled checksum with incorrect checksum.
*/
@Test
public void testDisabledChecksum() throws IOException {
KeyValueContainerData kvData = getKeyValueContainerData();
conf.setBoolean(HddsConfigKeys.HDDS_CONTAINER_CHECKSUM_VERIFICATION_ENABLED, false);
ContainerUtils.verifyChecksum(kvData, conf);
}
Aggregations