use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.
the class TestHDDSUpgrade method testFinalizationFromInitialVersionToLatestVersion.
/*
* Happy Path Test Case.
*/
@Test
public void testFinalizationFromInitialVersionToLatestVersion() throws Exception {
waitForPipelineCreated();
createTestContainers();
// Test the Pre-Upgrade conditions on SCM as well as DataNodes.
testPreUpgradeConditionsSCM();
testPreUpgradeConditionsDataNodes();
Set<PipelineID> preUpgradeOpenPipelines = scmPipelineManager.getPipelines(RATIS_THREE, OPEN).stream().map(Pipeline::getId).collect(Collectors.toSet());
// Trigger Finalization on the SCM
StatusAndMessages status = scm.finalizeUpgrade("xyz");
Assert.assertEquals(STARTING_FINALIZATION, status.status());
// Wait for the Finalization to complete on the SCM.
while (status.status() != FINALIZATION_DONE) {
status = scm.queryUpgradeFinalizationProgress("xyz", false, false);
}
Set<PipelineID> postUpgradeOpenPipelines = scmPipelineManager.getPipelines(RATIS_THREE, OPEN).stream().map(Pipeline::getId).collect(Collectors.toSet());
// No pipelines from before the upgrade should still be open after the
// upgrade.
long numPreUpgradeOpenPipelines = preUpgradeOpenPipelines.stream().filter(postUpgradeOpenPipelines::contains).count();
Assert.assertEquals(0, numPreUpgradeOpenPipelines);
// Verify Post-Upgrade conditions on the SCM.
testPostUpgradeConditionsSCM();
// All datanodes on the SCM should have moved to HEALTHY-READONLY state.
testDataNodesStateOnSCM(HEALTHY_READONLY, HEALTHY);
// Verify the SCM has driven all the DataNodes through Layout Upgrade.
// In the happy path case, no containers should have been quasi closed as
// a result of the upgrade.
testPostUpgradeConditionsDataNodes(CLOSED);
// Test that we can use a pipeline after upgrade.
// Will fail with exception if there are no pipelines.
ObjectStore store = cluster.getClient().getObjectStore();
store.createVolume("vol1");
store.getVolume("vol1").createBucket("buc1");
store.getVolume("vol1").getBucket("buc1").createKey("key1", 100, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>());
}
use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.
the class TestHybridPipelineOnDatanode method testHybridPipelineOnDatanode.
/**
* Tests reading a corrputed chunk file throws checksum exception.
* @throws IOException
*/
@Test
public void testHybridPipelineOnDatanode() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String value = UUID.randomUUID().toString();
byte[] data = value.getBytes(UTF_8);
objectStore.createVolume(volumeName);
OzoneVolume volume = objectStore.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName1 = UUID.randomUUID().toString();
// Write data into a key
OzoneOutputStream out = bucket.createKey(keyName1, data.length, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
String keyName2 = UUID.randomUUID().toString();
// Write data into a key
out = bucket.createKey(keyName2, data.length, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
// We need to find the location of the chunk file corresponding to the
// data we just wrote.
OzoneKey key1 = bucket.getKey(keyName1);
long containerID1 = ((OzoneKeyDetails) key1).getOzoneKeyLocations().get(0).getContainerID();
OzoneKey key2 = bucket.getKey(keyName2);
long containerID2 = ((OzoneKeyDetails) key2).getOzoneKeyLocations().get(0).getContainerID();
PipelineID pipelineID1 = cluster.getStorageContainerManager().getContainerInfo(containerID1).getPipelineID();
PipelineID pipelineID2 = cluster.getStorageContainerManager().getContainerInfo(containerID2).getPipelineID();
Pipeline pipeline1 = cluster.getStorageContainerManager().getPipelineManager().getPipeline(pipelineID1);
List<DatanodeDetails> dns = pipeline1.getNodes();
Assert.assertTrue(dns.size() == 1);
Pipeline pipeline2 = cluster.getStorageContainerManager().getPipelineManager().getPipeline(pipelineID2);
Assert.assertNotEquals(pipeline1, pipeline2);
Assert.assertTrue(pipeline1.getType() == HddsProtos.ReplicationType.RATIS);
Assert.assertTrue(pipeline1.getType() == pipeline2.getType());
// assert that the pipeline Id1 and pipelineId2 are on the same node
// but different replication factor
Assert.assertTrue(pipeline2.getNodes().contains(dns.get(0)));
byte[] b1 = new byte[data.length];
byte[] b2 = new byte[data.length];
// now try to read both the keys
OzoneInputStream is = bucket.readKey(keyName1);
is.read(b1);
is.close();
// now try to read both the keys
is = bucket.readKey(keyName2);
is.read(b2);
is.close();
Assert.assertTrue(Arrays.equals(b1, data));
Assert.assertTrue(Arrays.equals(b1, b2));
}
use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.
the class TestCreatePipelineCommandHandler method testPipelineCreation.
@Test
public void testPipelineCreation() throws IOException {
final List<DatanodeDetails> datanodes = getDatanodes();
final PipelineID pipelineID = PipelineID.randomId();
final SCMCommand<CreatePipelineCommandProto> command = new CreatePipelineCommand(pipelineID, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, datanodes);
final XceiverServerSpi writeChanel = Mockito.mock(XceiverServerSpi.class);
final DatanodeStateMachine dnsm = Mockito.mock(DatanodeStateMachine.class);
Mockito.when(stateContext.getParent()).thenReturn(dnsm);
Mockito.when(dnsm.getDatanodeDetails()).thenReturn(datanodes.get(0));
Mockito.when(ozoneContainer.getWriteChannel()).thenReturn(writeChanel);
Mockito.when(writeChanel.isExist(pipelineID.getProtobuf())).thenReturn(false);
final CreatePipelineCommandHandler commandHandler = new CreatePipelineCommandHandler((leader, tls) -> raftClient);
commandHandler.handle(command, ozoneContainer, stateContext, connectionManager);
List<Integer> priorityList = new ArrayList<>(Collections.nCopies(datanodes.size(), 0));
Mockito.verify(writeChanel, Mockito.times(1)).addGroup(pipelineID.getProtobuf(), datanodes, priorityList);
Mockito.verify(raftClientGroupManager, Mockito.times(2)).add(Mockito.any(RaftGroup.class));
}
use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.
the class CreatePipelineCommandHandler method handle.
/**
* Handles a given SCM command.
*
* @param command - SCM Command
* @param ozoneContainer - Ozone Container.
* @param context - Current Context.
* @param connectionManager - The SCMs that we are talking to.
*/
@Override
public void handle(SCMCommand command, OzoneContainer ozoneContainer, StateContext context, SCMConnectionManager connectionManager) {
invocationCount.incrementAndGet();
final long startTime = Time.monotonicNow();
final DatanodeDetails dn = context.getParent().getDatanodeDetails();
final CreatePipelineCommand createCommand = (CreatePipelineCommand) command;
final PipelineID pipelineID = createCommand.getPipelineID();
final HddsProtos.PipelineID pipelineIdProto = pipelineID.getProtobuf();
final List<DatanodeDetails> peers = createCommand.getNodeList();
final List<Integer> priorityList = createCommand.getPriorityList();
try {
XceiverServerSpi server = ozoneContainer.getWriteChannel();
if (!server.isExist(pipelineIdProto)) {
final RaftGroupId groupId = RaftGroupId.valueOf(pipelineID.getId());
final RaftGroup group = RatisHelper.newRaftGroup(groupId, peers, priorityList);
server.addGroup(pipelineIdProto, peers, priorityList);
peers.stream().filter(d -> !d.getUuid().equals(dn.getUuid())).forEach(d -> {
final RaftPeer peer = RatisHelper.toRaftPeer(d);
try (RaftClient client = newRaftClient.apply(peer, ozoneContainer.getTlsClientConfig())) {
client.getGroupManagementApi(peer.getId()).add(group);
} catch (AlreadyExistsException ae) {
// do not log
} catch (IOException ioe) {
LOG.warn("Add group failed for {}", d, ioe);
}
});
LOG.info("Created Pipeline {} {} {}.", createCommand.getReplicationType(), createCommand.getFactor(), pipelineID);
}
} catch (IOException e) {
// from another peer, so we may got an AlreadyExistsException.
if (!(e.getCause() instanceof AlreadyExistsException)) {
LOG.error("Can't create pipeline {} {} {}", createCommand.getReplicationType(), createCommand.getFactor(), pipelineID, e);
}
} finally {
long endTime = Time.monotonicNow();
totalTime += endTime - startTime;
}
}
use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.
the class XceiverServerRatis method addGroup.
@Override
public void addGroup(HddsProtos.PipelineID pipelineId, List<DatanodeDetails> peers, List<Integer> priorityList) throws IOException {
final PipelineID pipelineID = PipelineID.getFromProtobuf(pipelineId);
final RaftGroupId groupId = RaftGroupId.valueOf(pipelineID.getId());
final RaftGroup group = RatisHelper.newRaftGroup(groupId, peers, priorityList);
GroupManagementRequest request = GroupManagementRequest.newAdd(clientId, server.getId(), nextCallId(), group);
RaftClientReply reply;
LOG.debug("Received addGroup request for pipeline {}", pipelineID);
try {
reply = server.groupManagement(request);
} catch (Exception e) {
throw new IOException(e.getMessage(), e);
}
processReply(reply);
LOG.info("Created group {}", pipelineID);
}
Aggregations