Search in sources :

Example 6 with State

use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State in project ozone by apache.

the class TestDatanodeUpgradeToScmHA method testFailedVolumeDuringFinalization.

@Test
public void testFailedVolumeDuringFinalization() throws Exception {
    // / SETUP ///
    String originalScmID = startScmServer();
    File volume = addVolume();
    startPreFinalizedDatanode();
    final Pipeline pipeline = getPipeline();
    // / PRE-FINALIZED: Write and Read from formatted volume ///
    Assert.assertEquals(1, dsm.getContainer().getVolumeSet().getVolumesList().size());
    Assert.assertEquals(0, dsm.getContainer().getVolumeSet().getFailedVolumesList().size());
    // Add container with data, make sure it can be read and written.
    final long containerID = addContainer(pipeline);
    ContainerProtos.WriteChunkRequestProto writeChunk = putBlock(containerID, pipeline);
    readChunk(writeChunk, pipeline);
    checkPreFinalizedVolumePathID(volume, originalScmID, CLUSTER_ID);
    checkContainerPathID(containerID, originalScmID, CLUSTER_ID);
    // FINALIZE: With failed volume ///
    failVolume(volume);
    // Since volume is failed, container should be marked unhealthy.
    // Finalization should proceed anyways.
    closeContainer(containerID, pipeline, ContainerProtos.Result.CONTAINER_FILES_CREATE_ERROR);
    State containerState = dsm.getContainer().getContainerSet().getContainer(containerID).getContainerState();
    Assert.assertEquals(State.UNHEALTHY, containerState);
    dsm.finalizeUpgrade();
    LambdaTestUtils.await(2000, 500, () -> dsm.getLayoutVersionManager().isAllowed(HDDSLayoutFeature.SCM_HA));
    // / FINALIZED: Volume marked failed but gets restored on disk ///
    // Check that volume is marked failed during finalization.
    Assert.assertEquals(0, dsm.getContainer().getVolumeSet().getVolumesList().size());
    Assert.assertEquals(1, dsm.getContainer().getVolumeSet().getFailedVolumesList().size());
    // Since the volume was out during the upgrade, it should maintain its
    // original format.
    checkPreFinalizedVolumePathID(volume, originalScmID, CLUSTER_ID);
    checkContainerPathID(containerID, originalScmID, CLUSTER_ID);
    // Now that we are done finalizing, restore the volume.
    restoreVolume(volume);
    // After restoring the failed volume, its containers are readable again.
    // However, since it is marked as failed no containers can be created or
    // imported to it.
    // This should log a warning about reading from an unhealthy container
    // but otherwise proceed successfully.
    readChunk(writeChunk, pipeline);
    // / FINALIZED: Restart datanode to upgrade the failed volume ///
    restartDatanode(HDDSLayoutFeature.SCM_HA.layoutVersion());
    Assert.assertEquals(1, dsm.getContainer().getVolumeSet().getVolumesList().size());
    Assert.assertEquals(0, dsm.getContainer().getVolumeSet().getFailedVolumesList().size());
    checkFinalizedVolumePathID(volume, originalScmID, CLUSTER_ID);
    checkContainerPathID(containerID, originalScmID, CLUSTER_ID);
    // Read container from before upgrade. The upgrade required it to be closed.
    readChunk(writeChunk, pipeline);
    // Write and read container after upgrade.
    long newContainerID = addContainer(pipeline);
    ContainerProtos.WriteChunkRequestProto newWriteChunk = putBlock(newContainerID, pipeline);
    readChunk(newWriteChunk, pipeline);
    // The new container should use cluster ID in its path.
    // The volume it is placed on is up to the implementation.
    checkContainerPathID(newContainerID, CLUSTER_ID);
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) State(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State) File(java.io.File) MockPipeline(org.apache.hadoop.hdds.scm.pipeline.MockPipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Test(org.junit.Test)

Aggregations

State (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State)6 StorageContainerException (org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)5 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)4 IOException (java.io.IOException)2 ContainerType (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType)2 Type (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type)2 ContainerNotOpenException (org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException)2 InvalidContainerStateException (org.apache.hadoop.hdds.scm.container.common.helpers.InvalidContainerStateException)2 AuditAction (org.apache.hadoop.ozone.audit.AuditAction)2 AuditLoggerType (org.apache.hadoop.ozone.audit.AuditLoggerType)2 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)2 Handler (org.apache.hadoop.ozone.container.common.interfaces.Handler)2 ServiceException (com.google.protobuf.ServiceException)1 File (java.io.File)1 ContainerCommandResponseProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)1 Result (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result)1 MockPipeline (org.apache.hadoop.hdds.scm.pipeline.MockPipeline)1 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)1 ByteString (org.apache.ratis.thirdparty.com.google.protobuf.ByteString)1 Test (org.junit.Test)1