use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.
the class TestSCMRatisRequest method testEncodeWithNonProto.
@Test(expected = InvalidProtocolBufferException.class)
public void testEncodeWithNonProto() throws Exception {
PipelineID pipelineID = PipelineID.randomId();
// Non proto args
Object[] args = new Object[] { pipelineID };
SCMRatisRequest request = SCMRatisRequest.of(PIPELINE, "test", new Class[] { pipelineID.getClass() }, args);
// Should throw exception there.
request.encode();
}
use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.
the class TestPipelineIDCodec method checkPersisting.
private void checkPersisting(long mostSigBits, long leastSigBits, byte[] expected) throws Exception {
UUID uuid = new UUID(mostSigBits, leastSigBits);
PipelineID pid = PipelineID.valueOf(uuid);
byte[] encoded = new PipelineIDCodec().toPersistedFormat(pid);
assertArrayEquals(expected, encoded);
}
use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.
the class TestHybridPipelineOnDatanode method testHybridPipelineOnDatanode.
/**
* Tests reading a corrputed chunk file throws checksum exception.
* @throws IOException
*/
@Test
public void testHybridPipelineOnDatanode() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String value = UUID.randomUUID().toString();
byte[] data = value.getBytes(UTF_8);
objectStore.createVolume(volumeName);
OzoneVolume volume = objectStore.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName1 = UUID.randomUUID().toString();
// Write data into a key
OzoneOutputStream out = bucket.createKey(keyName1, data.length, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
String keyName2 = UUID.randomUUID().toString();
// Write data into a key
out = bucket.createKey(keyName2, data.length, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
// We need to find the location of the chunk file corresponding to the
// data we just wrote.
OzoneKey key1 = bucket.getKey(keyName1);
long containerID1 = ((OzoneKeyDetails) key1).getOzoneKeyLocations().get(0).getContainerID();
OzoneKey key2 = bucket.getKey(keyName2);
long containerID2 = ((OzoneKeyDetails) key2).getOzoneKeyLocations().get(0).getContainerID();
PipelineID pipelineID1 = cluster.getStorageContainerManager().getContainerInfo(containerID1).getPipelineID();
PipelineID pipelineID2 = cluster.getStorageContainerManager().getContainerInfo(containerID2).getPipelineID();
Pipeline pipeline1 = cluster.getStorageContainerManager().getPipelineManager().getPipeline(pipelineID1);
List<DatanodeDetails> dns = pipeline1.getNodes();
Assert.assertTrue(dns.size() == 1);
Pipeline pipeline2 = cluster.getStorageContainerManager().getPipelineManager().getPipeline(pipelineID2);
Assert.assertNotEquals(pipeline1, pipeline2);
Assert.assertTrue(pipeline1.getType() == HddsProtos.ReplicationType.RATIS);
Assert.assertTrue(pipeline1.getType() == pipeline2.getType());
// assert that the pipeline Id1 and pipelineId2 are on the same node
// but different replication factor
Assert.assertTrue(pipeline2.getNodes().contains(dns.get(0)));
byte[] b1 = new byte[data.length];
byte[] b2 = new byte[data.length];
// now try to read both the keys
OzoneInputStream is = bucket.readKey(keyName1);
is.read(b1);
is.close();
// now try to read both the keys
is = bucket.readKey(keyName2);
is.read(b2);
is.close();
Assert.assertTrue(Arrays.equals(b1, data));
Assert.assertTrue(Arrays.equals(b1, b2));
}
use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.
the class TestHDDSUpgrade method testFinalizationFromInitialVersionToLatestVersion.
/*
* Happy Path Test Case.
*/
@Test
public void testFinalizationFromInitialVersionToLatestVersion() throws Exception {
waitForPipelineCreated();
createTestContainers();
// Test the Pre-Upgrade conditions on SCM as well as DataNodes.
testPreUpgradeConditionsSCM();
testPreUpgradeConditionsDataNodes();
Set<PipelineID> preUpgradeOpenPipelines = scmPipelineManager.getPipelines(RATIS_THREE, OPEN).stream().map(Pipeline::getId).collect(Collectors.toSet());
// Trigger Finalization on the SCM
StatusAndMessages status = scm.finalizeUpgrade("xyz");
Assert.assertEquals(STARTING_FINALIZATION, status.status());
// Wait for the Finalization to complete on the SCM.
while (status.status() != FINALIZATION_DONE) {
status = scm.queryUpgradeFinalizationProgress("xyz", false, false);
}
Set<PipelineID> postUpgradeOpenPipelines = scmPipelineManager.getPipelines(RATIS_THREE, OPEN).stream().map(Pipeline::getId).collect(Collectors.toSet());
// No pipelines from before the upgrade should still be open after the
// upgrade.
long numPreUpgradeOpenPipelines = preUpgradeOpenPipelines.stream().filter(postUpgradeOpenPipelines::contains).count();
Assert.assertEquals(0, numPreUpgradeOpenPipelines);
// Verify Post-Upgrade conditions on the SCM.
testPostUpgradeConditionsSCM();
// All datanodes on the SCM should have moved to HEALTHY-READONLY state.
testDataNodesStateOnSCM(HEALTHY_READONLY, HEALTHY);
// Verify the SCM has driven all the DataNodes through Layout Upgrade.
// In the happy path case, no containers should have been quasi closed as
// a result of the upgrade.
testPostUpgradeConditionsDataNodes(CLOSED);
// Test that we can use a pipeline after upgrade.
// Will fail with exception if there are no pipelines.
ObjectStore store = cluster.getClient().getObjectStore();
store.createVolume("vol1");
store.getVolume("vol1").createBucket("buc1");
store.getVolume("vol1").getBucket("buc1").createKey("key1", 100, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>());
}
use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.
the class TestHDDSUpgrade method testPostUpgradePipelineCreation.
/*
* Helper function to test that we can create new pipelines Post-Upgrade.
*/
private void testPostUpgradePipelineCreation() throws IOException {
Pipeline ratisPipeline1 = scmPipelineManager.createPipeline(RATIS_THREE);
scmPipelineManager.openPipeline(ratisPipeline1.getId());
Assert.assertEquals(0, scmPipelineManager.getNumberOfContainers(ratisPipeline1.getId()));
PipelineID pid = scmContainerManager.allocateContainer(RATIS_THREE, "Owner1").getPipelineID();
Assert.assertEquals(1, scmPipelineManager.getNumberOfContainers(pid));
Assert.assertEquals(pid, ratisPipeline1.getId());
}
Aggregations