use of org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi in project ozone by apache.
the class ClosePipelineCommandHandler method handle.
/**
* Handles a given SCM command.
*
* @param command - SCM Command
* @param ozoneContainer - Ozone Container.
* @param context - Current Context.
* @param connectionManager - The SCMs that we are talking to.
*/
@Override
public void handle(SCMCommand command, OzoneContainer ozoneContainer, StateContext context, SCMConnectionManager connectionManager) {
invocationCount.incrementAndGet();
final long startTime = Time.monotonicNow();
final DatanodeDetails dn = context.getParent().getDatanodeDetails();
ClosePipelineCommand closePipelineCommand = (ClosePipelineCommand) command;
final PipelineID pipelineID = closePipelineCommand.getPipelineID();
final HddsProtos.PipelineID pipelineIdProto = pipelineID.getProtobuf();
try {
XceiverServerSpi server = ozoneContainer.getWriteChannel();
if (server.isExist(pipelineIdProto)) {
server.removeGroup(pipelineIdProto);
LOG.info("Close Pipeline {} command on datanode {}.", pipelineID, dn.getUuidString());
} else {
LOG.debug("Ignoring close pipeline command for pipeline {} " + "as it does not exist", pipelineID);
}
} catch (IOException e) {
LOG.error("Can't close pipeline {}", pipelineID, e);
} finally {
long endTime = Time.monotonicNow();
totalTime += endTime - startTime;
}
}
use of org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi in project ozone by apache.
the class TestFreonWithPipelineDestroy method destroyPipeline.
private void destroyPipeline() throws Exception {
XceiverServerSpi server = cluster.getHddsDatanodes().get(0).getDatanodeStateMachine().getContainer().getWriteChannel();
StorageContainerDatanodeProtocolProtos.PipelineReport report = server.getPipelineReport().get(0);
PipelineID id = PipelineID.getFromProtobuf(report.getPipelineID());
PipelineManager pipelineManager = cluster.getStorageContainerManager().getPipelineManager();
Pipeline pipeline = pipelineManager.getPipeline(id);
pipelineManager.closePipeline(pipeline, false);
}
use of org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi in project ozone by apache.
the class TestSecureContainerServer method runTestClientServer.
private static void runTestClientServer(int numDatanodes, CheckedBiConsumer<Pipeline, OzoneConfiguration, IOException> initConf, CheckedBiFunction<Pipeline, OzoneConfiguration, XceiverClientSpi, IOException> createClient, CheckedBiFunction<DatanodeDetails, OzoneConfiguration, XceiverServerSpi, IOException> createServer, CheckedBiConsumer<DatanodeDetails, Pipeline, IOException> initServer, Consumer<Pipeline> stopServer) throws Exception {
final List<XceiverServerSpi> servers = new ArrayList<>();
final Pipeline pipeline = MockPipeline.createPipeline(numDatanodes);
initConf.accept(pipeline, CONF);
for (DatanodeDetails dn : pipeline.getNodes()) {
final XceiverServerSpi s = createServer.apply(dn, CONF);
servers.add(s);
s.start();
initServer.accept(dn, pipeline);
}
try (XceiverClientSpi client = createClient.apply(pipeline, CONF)) {
client.connect();
long containerID = getTestContainerID();
BlockID blockID = getTestBlockID(containerID);
assertFailsTokenVerification(client, getCreateContainerRequest(containerID, pipeline));
// create the container
ContainerProtocolCalls.createContainer(client, containerID, getToken(ContainerID.valueOf(containerID)));
Token<OzoneBlockTokenIdentifier> token = blockTokenSecretManager.generateToken(blockID, EnumSet.allOf(AccessModeProto.class), RandomUtils.nextLong());
String encodedToken = token.encodeToUrlString();
ContainerCommandRequestProto.Builder writeChunk = newWriteChunkRequestBuilder(pipeline, blockID, 1024, 0);
assertRequiresToken(client, encodedToken, writeChunk);
ContainerCommandRequestProto.Builder putBlock = newPutBlockRequestBuilder(pipeline, writeChunk.getWriteChunk());
assertRequiresToken(client, encodedToken, putBlock);
ContainerCommandRequestProto.Builder readChunk = newReadChunkRequestBuilder(pipeline, writeChunk.getWriteChunk());
assertRequiresToken(client, encodedToken, readChunk);
ContainerCommandRequestProto.Builder getBlock = newGetBlockRequestBuilder(pipeline, putBlock.getPutBlock());
assertRequiresToken(client, encodedToken, getBlock);
ContainerCommandRequestProto.Builder getCommittedBlockLength = newGetCommittedBlockLengthBuilder(pipeline, putBlock.getPutBlock());
assertRequiresToken(client, encodedToken, getCommittedBlockLength);
} finally {
stopServer.accept(pipeline);
servers.forEach(XceiverServerSpi::stop);
}
}
use of org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi in project ozone by apache.
the class TestCreatePipelineCommandHandler method testCommandIdempotency.
@Test
public void testCommandIdempotency() throws IOException {
final List<DatanodeDetails> datanodes = getDatanodes();
final PipelineID pipelineID = PipelineID.randomId();
final SCMCommand<CreatePipelineCommandProto> command = new CreatePipelineCommand(pipelineID, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, datanodes);
final XceiverServerSpi writeChanel = Mockito.mock(XceiverServerSpi.class);
final DatanodeStateMachine dnsm = Mockito.mock(DatanodeStateMachine.class);
Mockito.when(stateContext.getParent()).thenReturn(dnsm);
Mockito.when(dnsm.getDatanodeDetails()).thenReturn(datanodes.get(0));
Mockito.when(ozoneContainer.getWriteChannel()).thenReturn(writeChanel);
Mockito.when(writeChanel.isExist(pipelineID.getProtobuf())).thenReturn(true);
final CreatePipelineCommandHandler commandHandler = new CreatePipelineCommandHandler(new OzoneConfiguration());
commandHandler.handle(command, ozoneContainer, stateContext, connectionManager);
Mockito.verify(writeChanel, Mockito.times(0)).addGroup(pipelineID.getProtobuf(), datanodes);
Mockito.verify(raftClientGroupManager, Mockito.times(0)).add(Mockito.any(RaftGroup.class));
}
use of org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi in project ozone by apache.
the class TestCreatePipelineCommandHandler method testPipelineCreation.
@Test
public void testPipelineCreation() throws IOException {
final List<DatanodeDetails> datanodes = getDatanodes();
final PipelineID pipelineID = PipelineID.randomId();
final SCMCommand<CreatePipelineCommandProto> command = new CreatePipelineCommand(pipelineID, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, datanodes);
final XceiverServerSpi writeChanel = Mockito.mock(XceiverServerSpi.class);
final DatanodeStateMachine dnsm = Mockito.mock(DatanodeStateMachine.class);
Mockito.when(stateContext.getParent()).thenReturn(dnsm);
Mockito.when(dnsm.getDatanodeDetails()).thenReturn(datanodes.get(0));
Mockito.when(ozoneContainer.getWriteChannel()).thenReturn(writeChanel);
Mockito.when(writeChanel.isExist(pipelineID.getProtobuf())).thenReturn(false);
final CreatePipelineCommandHandler commandHandler = new CreatePipelineCommandHandler(new OzoneConfiguration());
commandHandler.handle(command, ozoneContainer, stateContext, connectionManager);
List<Integer> priorityList = new ArrayList<>(Collections.nCopies(datanodes.size(), 0));
Mockito.verify(writeChanel, Mockito.times(1)).addGroup(pipelineID.getProtobuf(), datanodes, priorityList);
Mockito.verify(raftClientGroupManager, Mockito.times(2)).add(Mockito.any(RaftGroup.class));
}
Aggregations