use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State in project ozone by apache.
the class TestDatanodeUpgradeToScmHA method testFailedVolumeDuringFinalization.
@Test
public void testFailedVolumeDuringFinalization() throws Exception {
// / SETUP ///
String originalScmID = startScmServer();
File volume = addVolume();
startPreFinalizedDatanode();
final Pipeline pipeline = getPipeline();
// / PRE-FINALIZED: Write and Read from formatted volume ///
Assert.assertEquals(1, dsm.getContainer().getVolumeSet().getVolumesList().size());
Assert.assertEquals(0, dsm.getContainer().getVolumeSet().getFailedVolumesList().size());
// Add container with data, make sure it can be read and written.
final long containerID = addContainer(pipeline);
ContainerProtos.WriteChunkRequestProto writeChunk = putBlock(containerID, pipeline);
readChunk(writeChunk, pipeline);
checkPreFinalizedVolumePathID(volume, originalScmID, CLUSTER_ID);
checkContainerPathID(containerID, originalScmID, CLUSTER_ID);
// FINALIZE: With failed volume ///
failVolume(volume);
// Since volume is failed, container should be marked unhealthy.
// Finalization should proceed anyways.
closeContainer(containerID, pipeline, ContainerProtos.Result.CONTAINER_FILES_CREATE_ERROR);
State containerState = dsm.getContainer().getContainerSet().getContainer(containerID).getContainerState();
Assert.assertEquals(State.UNHEALTHY, containerState);
dsm.finalizeUpgrade();
LambdaTestUtils.await(2000, 500, () -> dsm.getLayoutVersionManager().isAllowed(HDDSLayoutFeature.SCM_HA));
// / FINALIZED: Volume marked failed but gets restored on disk ///
// Check that volume is marked failed during finalization.
Assert.assertEquals(0, dsm.getContainer().getVolumeSet().getVolumesList().size());
Assert.assertEquals(1, dsm.getContainer().getVolumeSet().getFailedVolumesList().size());
// Since the volume was out during the upgrade, it should maintain its
// original format.
checkPreFinalizedVolumePathID(volume, originalScmID, CLUSTER_ID);
checkContainerPathID(containerID, originalScmID, CLUSTER_ID);
// Now that we are done finalizing, restore the volume.
restoreVolume(volume);
// After restoring the failed volume, its containers are readable again.
// However, since it is marked as failed no containers can be created or
// imported to it.
// This should log a warning about reading from an unhealthy container
// but otherwise proceed successfully.
readChunk(writeChunk, pipeline);
// / FINALIZED: Restart datanode to upgrade the failed volume ///
restartDatanode(HDDSLayoutFeature.SCM_HA.layoutVersion());
Assert.assertEquals(1, dsm.getContainer().getVolumeSet().getVolumesList().size());
Assert.assertEquals(0, dsm.getContainer().getVolumeSet().getFailedVolumesList().size());
checkFinalizedVolumePathID(volume, originalScmID, CLUSTER_ID);
checkContainerPathID(containerID, originalScmID, CLUSTER_ID);
// Read container from before upgrade. The upgrade required it to be closed.
readChunk(writeChunk, pipeline);
// Write and read container after upgrade.
long newContainerID = addContainer(pipeline);
ContainerProtos.WriteChunkRequestProto newWriteChunk = putBlock(newContainerID, pipeline);
readChunk(newWriteChunk, pipeline);
// The new container should use cluster ID in its path.
// The volume it is placed on is up to the implementation.
checkContainerPathID(newContainerID, CLUSTER_ID);
}
Aggregations