Search in sources :

Example 31 with PipelineID

use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.

the class TestSCMRatisRequest method testEncodeWithNonProto.

@Test(expected = InvalidProtocolBufferException.class)
public void testEncodeWithNonProto() throws Exception {
    PipelineID pipelineID = PipelineID.randomId();
    // Non proto args
    Object[] args = new Object[] { pipelineID };
    SCMRatisRequest request = SCMRatisRequest.of(PIPELINE, "test", new Class[] { pipelineID.getClass() }, args);
    // Should throw exception there.
    request.encode();
}
Also used : PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) Test(org.junit.Test)

Example 32 with PipelineID

use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.

the class TestPipelineIDCodec method checkPersisting.

private void checkPersisting(long mostSigBits, long leastSigBits, byte[] expected) throws Exception {
    UUID uuid = new UUID(mostSigBits, leastSigBits);
    PipelineID pid = PipelineID.valueOf(uuid);
    byte[] encoded = new PipelineIDCodec().toPersistedFormat(pid);
    assertArrayEquals(expected, encoded);
}
Also used : PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) UUID(java.util.UUID)

Example 33 with PipelineID

use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.

the class TestHybridPipelineOnDatanode method testHybridPipelineOnDatanode.

/**
 * Tests reading a corrputed chunk file throws checksum exception.
 * @throws IOException
 */
@Test
public void testHybridPipelineOnDatanode() throws IOException {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    String value = UUID.randomUUID().toString();
    byte[] data = value.getBytes(UTF_8);
    objectStore.createVolume(volumeName);
    OzoneVolume volume = objectStore.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName1 = UUID.randomUUID().toString();
    // Write data into a key
    OzoneOutputStream out = bucket.createKey(keyName1, data.length, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    String keyName2 = UUID.randomUUID().toString();
    // Write data into a key
    out = bucket.createKey(keyName2, data.length, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    // We need to find the location of the chunk file corresponding to the
    // data we just wrote.
    OzoneKey key1 = bucket.getKey(keyName1);
    long containerID1 = ((OzoneKeyDetails) key1).getOzoneKeyLocations().get(0).getContainerID();
    OzoneKey key2 = bucket.getKey(keyName2);
    long containerID2 = ((OzoneKeyDetails) key2).getOzoneKeyLocations().get(0).getContainerID();
    PipelineID pipelineID1 = cluster.getStorageContainerManager().getContainerInfo(containerID1).getPipelineID();
    PipelineID pipelineID2 = cluster.getStorageContainerManager().getContainerInfo(containerID2).getPipelineID();
    Pipeline pipeline1 = cluster.getStorageContainerManager().getPipelineManager().getPipeline(pipelineID1);
    List<DatanodeDetails> dns = pipeline1.getNodes();
    Assert.assertTrue(dns.size() == 1);
    Pipeline pipeline2 = cluster.getStorageContainerManager().getPipelineManager().getPipeline(pipelineID2);
    Assert.assertNotEquals(pipeline1, pipeline2);
    Assert.assertTrue(pipeline1.getType() == HddsProtos.ReplicationType.RATIS);
    Assert.assertTrue(pipeline1.getType() == pipeline2.getType());
    // assert that the pipeline Id1 and pipelineId2 are on the same node
    // but different replication factor
    Assert.assertTrue(pipeline2.getNodes().contains(dns.get(0)));
    byte[] b1 = new byte[data.length];
    byte[] b2 = new byte[data.length];
    // now try to read both the keys
    OzoneInputStream is = bucket.readKey(keyName1);
    is.read(b1);
    is.close();
    // now try to read both the keys
    is = bucket.readKey(keyName2);
    is.read(b2);
    is.close();
    Assert.assertTrue(Arrays.equals(b1, data));
    Assert.assertTrue(Arrays.equals(b1, b2));
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) HashMap(java.util.HashMap) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) Test(org.junit.Test)

Example 34 with PipelineID

use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.

the class TestHDDSUpgrade method testFinalizationFromInitialVersionToLatestVersion.

/*
   * Happy Path Test Case.
   */
@Test
public void testFinalizationFromInitialVersionToLatestVersion() throws Exception {
    waitForPipelineCreated();
    createTestContainers();
    // Test the Pre-Upgrade conditions on SCM as well as DataNodes.
    testPreUpgradeConditionsSCM();
    testPreUpgradeConditionsDataNodes();
    Set<PipelineID> preUpgradeOpenPipelines = scmPipelineManager.getPipelines(RATIS_THREE, OPEN).stream().map(Pipeline::getId).collect(Collectors.toSet());
    // Trigger Finalization on the SCM
    StatusAndMessages status = scm.finalizeUpgrade("xyz");
    Assert.assertEquals(STARTING_FINALIZATION, status.status());
    // Wait for the Finalization to complete on the SCM.
    while (status.status() != FINALIZATION_DONE) {
        status = scm.queryUpgradeFinalizationProgress("xyz", false, false);
    }
    Set<PipelineID> postUpgradeOpenPipelines = scmPipelineManager.getPipelines(RATIS_THREE, OPEN).stream().map(Pipeline::getId).collect(Collectors.toSet());
    // No pipelines from before the upgrade should still be open after the
    // upgrade.
    long numPreUpgradeOpenPipelines = preUpgradeOpenPipelines.stream().filter(postUpgradeOpenPipelines::contains).count();
    Assert.assertEquals(0, numPreUpgradeOpenPipelines);
    // Verify Post-Upgrade conditions on the SCM.
    testPostUpgradeConditionsSCM();
    // All datanodes on the SCM should have moved to HEALTHY-READONLY state.
    testDataNodesStateOnSCM(HEALTHY_READONLY, HEALTHY);
    // Verify the SCM has driven all the DataNodes through Layout Upgrade.
    // In the happy path case, no containers should have been quasi closed as
    // a result of the upgrade.
    testPostUpgradeConditionsDataNodes(CLOSED);
    // Test that we can use a pipeline after upgrade.
    // Will fail with exception if there are no pipelines.
    ObjectStore store = cluster.getClient().getObjectStore();
    store.createVolume("vol1");
    store.getVolume("vol1").createBucket("buc1");
    store.getVolume("vol1").getBucket("buc1").createKey("key1", 100, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>());
}
Also used : ObjectStore(org.apache.hadoop.ozone.client.ObjectStore) PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) StatusAndMessages(org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages) Test(org.junit.Test)

Example 35 with PipelineID

use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.

the class TestHDDSUpgrade method testPostUpgradePipelineCreation.

/*
   * Helper function to test that we can create new pipelines Post-Upgrade.
   */
private void testPostUpgradePipelineCreation() throws IOException {
    Pipeline ratisPipeline1 = scmPipelineManager.createPipeline(RATIS_THREE);
    scmPipelineManager.openPipeline(ratisPipeline1.getId());
    Assert.assertEquals(0, scmPipelineManager.getNumberOfContainers(ratisPipeline1.getId()));
    PipelineID pid = scmContainerManager.allocateContainer(RATIS_THREE, "Owner1").getPipelineID();
    Assert.assertEquals(1, scmPipelineManager.getNumberOfContainers(pid));
    Assert.assertEquals(pid, ratisPipeline1.getId());
}
Also used : PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline)

Aggregations

PipelineID (org.apache.hadoop.hdds.scm.pipeline.PipelineID)35 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)15 Test (org.junit.Test)13 IOException (java.io.IOException)12 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)11 HddsProtos (org.apache.hadoop.hdds.protocol.proto.HddsProtos)6 PipelineNotFoundException (org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException)6 XceiverServerSpi (org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi)5 ArrayList (java.util.ArrayList)4 HashMap (java.util.HashMap)4 UUID (java.util.UUID)4 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)4 List (java.util.List)3 ContainerID (org.apache.hadoop.hdds.scm.container.ContainerID)3 RaftGroup (org.apache.ratis.protocol.RaftGroup)3 VisibleForTesting (com.google.common.annotations.VisibleForTesting)2 ConfigurationSource (org.apache.hadoop.hdds.conf.ConfigurationSource)2 MockDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails)2 ClosePipelineInfo (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo)2 CreatePipelineCommandProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CreatePipelineCommandProto)2