Search in sources :

Example 1 with Flaky

use of org.apache.ozone.test.tag.Flaky in project ozone by apache.

the class TestOzoneFileSystem method testTrash.

/**
 * 1.Move a Key to Trash
 * 2.Verify that the key gets deleted by the trash emptier.
 */
@Test
@Flaky("HDDS-6645")
public void testTrash() throws Exception {
    String testKeyName = "testKey2";
    Path path = new Path(OZONE_URI_DELIMITER, testKeyName);
    ContractTestUtils.touch(fs, path);
    Assert.assertTrue(trash.getConf().getClass("fs.trash.classname", TrashPolicy.class).isAssignableFrom(TrashPolicyOzone.class));
    assertEquals(TRASH_INTERVAL, trash.getConf().getFloat(OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, 0), 0);
    // Call moveToTrash. We can't call protected fs.rename() directly
    trash.moveToTrash(path);
    // Construct paths
    String username = UserGroupInformation.getCurrentUser().getShortUserName();
    Path trashRoot = new Path(OZONE_URI_DELIMITER, TRASH_PREFIX);
    Path userTrash = new Path(trashRoot, username);
    Path userTrashCurrent = new Path(userTrash, "Current");
    Path trashPath = new Path(userTrashCurrent, testKeyName);
    // Wait until the TrashEmptier purges the key
    GenericTestUtils.waitFor(() -> {
        try {
            return !o3fs.exists(trashPath);
        } catch (IOException e) {
            LOG.error("Delete from Trash Failed");
            Assert.fail("Delete from Trash Failed");
            return false;
        }
    }, 1000, 120000);
    // userTrash path will contain the checkpoint folder
    Assert.assertEquals(1, fs.listStatus(userTrash).length);
    // wait for deletion of checkpoint dir
    GenericTestUtils.waitFor(() -> {
        try {
            return o3fs.listStatus(userTrash).length == 0;
        } catch (IOException e) {
            LOG.error("Delete from Trash Failed", e);
            Assert.fail("Delete from Trash Failed");
            return false;
        }
    }, 1000, 120000);
}
Also used : Path(org.apache.hadoop.fs.Path) TrashPolicyOzone(org.apache.hadoop.ozone.om.TrashPolicyOzone) IOException(java.io.IOException) Test(org.junit.Test) Flaky(org.apache.ozone.test.tag.Flaky)

Example 2 with Flaky

use of org.apache.ozone.test.tag.Flaky in project ozone by apache.

the class TestOzoneFileSystem method testRenameToTrashEnabled.

/**
 * Check that files are moved to trash.
 * since fs.rename(src,dst,options) is enabled.
 */
@Test
@Flaky("HDDS-6646")
public void testRenameToTrashEnabled() throws Exception {
    // Create a file
    String testKeyName = "testKey1";
    Path path = new Path(OZONE_URI_DELIMITER, testKeyName);
    try (FSDataOutputStream stream = fs.create(path)) {
        stream.write(1);
    }
    // Call moveToTrash. We can't call protected fs.rename() directly
    trash.moveToTrash(path);
    // Construct paths
    String username = UserGroupInformation.getCurrentUser().getShortUserName();
    Path trashRoot = new Path(OZONE_URI_DELIMITER, TRASH_PREFIX);
    Path userTrash = new Path(trashRoot, username);
    Path userTrashCurrent = new Path(userTrash, "Current");
    Path trashPath = new Path(userTrashCurrent, testKeyName);
    // Trash Current directory should still have been created.
    Assert.assertTrue(o3fs.exists(userTrashCurrent));
    // Check under trash, the key should be present
    Assert.assertTrue(o3fs.exists(trashPath));
}
Also used : Path(org.apache.hadoop.fs.Path) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test) Flaky(org.apache.ozone.test.tag.Flaky)

Example 3 with Flaky

use of org.apache.ozone.test.tag.Flaky in project ozone by apache.

the class TestMiniOzoneCluster method testDNstartAfterSCM.

/**
 * Test that a DN can register with SCM even if it was started before the SCM.
 * @throws Exception
 */
@Test
@Timeout(100)
@Flaky("HDDS-6111")
public void testDNstartAfterSCM() throws Exception {
    // Start a cluster with 3 DN
    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build();
    cluster.waitForClusterToBeReady();
    // Stop the SCM
    StorageContainerManager scm = cluster.getStorageContainerManager();
    scm.stop();
    // Restart DN
    cluster.restartHddsDatanode(0, false);
    // DN should be in GETVERSION state till the SCM is restarted.
    // Check DN endpoint state for 20 seconds
    DatanodeStateMachine dnStateMachine = cluster.getHddsDatanodes().get(0).getDatanodeStateMachine();
    for (int i = 0; i < 20; i++) {
        for (EndpointStateMachine endpoint : dnStateMachine.getConnectionManager().getValues()) {
            Assert.assertEquals(EndpointStateMachine.EndPointStates.GETVERSION, endpoint.getState());
        }
        Thread.sleep(1000);
    }
    // DN should successfully register with the SCM after SCM is restarted.
    // Restart the SCM
    cluster.restartStorageContainerManager(true);
    // Wait for DN to register
    cluster.waitForClusterToBeReady();
    // DN should be in HEARTBEAT state after registering with the SCM
    for (EndpointStateMachine endpoint : dnStateMachine.getConnectionManager().getValues()) {
        Assert.assertEquals(EndpointStateMachine.EndPointStates.HEARTBEAT, endpoint.getState());
    }
}
Also used : EndpointStateMachine(org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine) StorageContainerManager(org.apache.hadoop.hdds.scm.server.StorageContainerManager) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine) Test(org.junit.jupiter.api.Test) Flaky(org.apache.ozone.test.tag.Flaky) Timeout(org.junit.jupiter.api.Timeout)

Example 4 with Flaky

use of org.apache.ozone.test.tag.Flaky in project ozone by apache.

the class TestSCMInstallSnapshot method testInstallCheckPoint.

@Test
@Flaky("HDDS-6116")
public void testInstallCheckPoint() throws Exception {
    DBCheckpoint checkpoint = downloadSnapshot();
    StorageContainerManager scm = cluster.getStorageContainerManager();
    DBStore db = HAUtils.loadDB(conf, checkpoint.getCheckpointLocation().getParent().toFile(), checkpoint.getCheckpointLocation().getFileName().toString(), new SCMDBDefinition());
    // Hack the transaction index in the checkpoint so as to ensure the
    // checkpointed transaction index is higher than when it was downloaded
    // from.
    Assert.assertNotNull(db);
    HAUtils.getTransactionInfoTable(db, new SCMDBDefinition()).put(OzoneConsts.TRANSACTION_INFO_KEY, TransactionInfo.builder().setCurrentTerm(10).setTransactionIndex(100).build());
    db.close();
    ContainerID cid = scm.getContainerManager().getContainers().get(0).containerID();
    PipelineID pipelineID = scm.getPipelineManager().getPipelines().get(0).getId();
    scm.getScmMetadataStore().getPipelineTable().delete(pipelineID);
    scm.getContainerManager().deleteContainer(cid);
    Assert.assertNull(scm.getScmMetadataStore().getPipelineTable().get(pipelineID));
    Assert.assertFalse(scm.getContainerManager().containerExist(cid));
    SCMStateMachine sm = scm.getScmHAManager().getRatisServer().getSCMStateMachine();
    sm.pause();
    sm.setInstallingDBCheckpoint(checkpoint);
    sm.reinitialize();
    Assert.assertNotNull(scm.getScmMetadataStore().getPipelineTable().get(pipelineID));
    Assert.assertNotNull(scm.getScmMetadataStore().getContainerTable().get(cid));
    Assert.assertTrue(scm.getPipelineManager().containsPipeline(pipelineID));
    Assert.assertTrue(scm.getContainerManager().containerExist(cid));
    Assert.assertEquals(100, scm.getScmMetadataStore().getTransactionInfoTable().get(OzoneConsts.TRANSACTION_INFO_KEY).getTransactionIndex());
    Assert.assertEquals(100, scm.getScmHAManager().asSCMHADBTransactionBuffer().getLatestTrxInfo().getTermIndex().getIndex());
}
Also used : StorageContainerManager(org.apache.hadoop.hdds.scm.server.StorageContainerManager) ContainerID(org.apache.hadoop.hdds.scm.container.ContainerID) DBCheckpoint(org.apache.hadoop.hdds.utils.db.DBCheckpoint) SCMDBDefinition(org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition) PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) SCMStateMachine(org.apache.hadoop.hdds.scm.ha.SCMStateMachine) DBStore(org.apache.hadoop.hdds.utils.db.DBStore) Test(org.junit.jupiter.api.Test) Flaky(org.apache.ozone.test.tag.Flaky)

Example 5 with Flaky

use of org.apache.ozone.test.tag.Flaky in project ozone by apache.

the class TestPipelineClose method testPipelineCloseWithLogFailure.

@Test
@Flaky("HDDS-5604")
public void testPipelineCloseWithLogFailure() throws IOException {
    EventQueue eventQ = (EventQueue) scm.getEventQueue();
    PipelineActionHandler pipelineActionTest = Mockito.mock(PipelineActionHandler.class);
    eventQ.addHandler(SCMEvents.PIPELINE_ACTIONS, pipelineActionTest);
    ArgumentCaptor<PipelineActionsFromDatanode> actionCaptor = ArgumentCaptor.forClass(PipelineActionsFromDatanode.class);
    ContainerInfo containerInfo = containerManager.allocateContainer(RatisReplicationConfig.getInstance(ReplicationFactor.THREE), "testOwner");
    ContainerWithPipeline containerWithPipeline = new ContainerWithPipeline(containerInfo, pipelineManager.getPipeline(containerInfo.getPipelineID()));
    Pipeline openPipeline = containerWithPipeline.getPipeline();
    RaftGroupId groupId = RaftGroupId.valueOf(openPipeline.getId().getId());
    try {
        pipelineManager.getPipeline(openPipeline.getId());
    } catch (PipelineNotFoundException e) {
        Assert.assertTrue("pipeline should exist", false);
    }
    DatanodeDetails datanodeDetails = openPipeline.getNodes().get(0);
    int index = cluster.getHddsDatanodeIndex(datanodeDetails);
    XceiverServerRatis xceiverRatis = (XceiverServerRatis) cluster.getHddsDatanodes().get(index).getDatanodeStateMachine().getContainer().getWriteChannel();
    /**
     * Notify Datanode Ratis Server endpoint of a Ratis log failure.
     * This is expected to trigger an immediate pipeline actions report to SCM
     */
    xceiverRatis.handleNodeLogFailure(groupId, null);
    // verify SCM receives a pipeline action report "immediately"
    Mockito.verify(pipelineActionTest, Mockito.timeout(100)).onMessage(actionCaptor.capture(), Mockito.any(EventPublisher.class));
    PipelineActionsFromDatanode actionsFromDatanode = actionCaptor.getValue();
    // match the pipeline id
    verifyCloseForPipeline(openPipeline, actionsFromDatanode);
}
Also used : XceiverServerRatis(org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis) EventPublisher(org.apache.hadoop.hdds.server.events.EventPublisher) RaftGroupId(org.apache.ratis.protocol.RaftGroupId) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) EventQueue(org.apache.hadoop.hdds.server.events.EventQueue) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) PipelineActionsFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) Test(org.junit.jupiter.api.Test) Flaky(org.apache.ozone.test.tag.Flaky)

Aggregations

Flaky (org.apache.ozone.test.tag.Flaky)16 Test (org.junit.jupiter.api.Test)13 IOException (java.io.IOException)4 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)4 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)4 Path (org.apache.hadoop.fs.Path)3 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)3 ContainerInfo (org.apache.hadoop.hdds.scm.container.ContainerInfo)3 StorageContainerManager (org.apache.hadoop.hdds.scm.server.StorageContainerManager)3 ObjectStore (org.apache.hadoop.ozone.client.ObjectStore)3 OzoneVolume (org.apache.hadoop.ozone.client.OzoneVolume)3 KeyOutputStream (org.apache.hadoop.ozone.client.io.KeyOutputStream)3 Test (org.junit.Test)3 ArrayList (java.util.ArrayList)2 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)2 ContainerWithPipeline (org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline)2 HddsDatanodeService (org.apache.hadoop.ozone.HddsDatanodeService)2 OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)2 VolumeArgs (org.apache.hadoop.ozone.client.VolumeArgs)2 KeyValueContainerData (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData)2