use of org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand in project ozone by apache.
the class CloseContainerCommandHandler method handle.
/**
* Handles a given SCM command.
*
* @param command - SCM Command
* @param ozoneContainer - Ozone Container.
* @param context - Current Context.
* @param connectionManager - The SCMs that we are talking to.
*/
@Override
public void handle(SCMCommand command, OzoneContainer ozoneContainer, StateContext context, SCMConnectionManager connectionManager) {
invocationCount.incrementAndGet();
final long startTime = Time.monotonicNow();
final DatanodeDetails datanodeDetails = context.getParent().getDatanodeDetails();
final CloseContainerCommandProto closeCommand = ((CloseContainerCommand) command).getProto();
final ContainerController controller = ozoneContainer.getController();
final long containerId = closeCommand.getContainerID();
LOG.debug("Processing Close Container command container #{}", containerId);
try {
final Container container = controller.getContainer(containerId);
if (container == null) {
LOG.error("Container #{} does not exist in datanode. " + "Container close failed.", containerId);
return;
}
// move the container to CLOSING if in OPEN state
controller.markContainerForClose(containerId);
switch(container.getContainerState()) {
case OPEN:
case CLOSING:
// If the container is part of open pipeline, close it via write channel
if (ozoneContainer.getWriteChannel().isExist(closeCommand.getPipelineID())) {
ContainerCommandRequestProto request = getContainerCommandRequestProto(datanodeDetails, closeCommand.getContainerID(), command.getEncodedToken());
ozoneContainer.getWriteChannel().submitRequest(request, closeCommand.getPipelineID());
} else {
controller.quasiCloseContainer(containerId);
LOG.info("Marking Container {} quasi closed", containerId);
}
break;
case QUASI_CLOSED:
if (closeCommand.getForce()) {
controller.closeContainer(containerId);
}
break;
case CLOSED:
break;
case UNHEALTHY:
case INVALID:
LOG.debug("Cannot close the container #{}, the container is" + " in {} state.", containerId, container.getContainerState());
break;
default:
break;
}
} catch (NotLeaderException e) {
LOG.debug("Follower cannot close container #{}.", containerId);
} catch (IOException e) {
LOG.error("Can't close container #{}", containerId, e);
} finally {
long endTime = Time.monotonicNow();
totalTime += endTime - startTime;
}
}
use of org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand in project ozone by apache.
the class ReplicationManager method sendCloseCommand.
/**
* Sends close container command for the given container to the given
* datanode.
*
* @param container Container to be closed
* @param datanode The datanode on which the container
* has to be closed
* @param force Should be set to true if we want to close a
* QUASI_CLOSED container
*/
private void sendCloseCommand(final ContainerInfo container, final DatanodeDetails datanode, final boolean force) {
ContainerID containerID = container.containerID();
LOG.info("Sending close container command for container {}" + " to datanode {}.", containerID, datanode);
CloseContainerCommand closeContainerCommand = new CloseContainerCommand(container.getContainerID(), container.getPipelineID(), force);
try {
closeContainerCommand.setTerm(scmContext.getTermOfLeader());
} catch (NotLeaderException nle) {
LOG.warn("Skip sending close container command," + " since current SCM is not leader.", nle);
return;
}
closeContainerCommand.setEncodedToken(getContainerToken(containerID));
eventPublisher.fireEvent(SCMEvents.DATANODE_COMMAND, new CommandForDatanode<>(datanode.getUuid(), closeContainerCommand));
}
use of org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand in project ozone by apache.
the class TestContainerStateMachineFailures method testContainerStateMachineCloseOnMissingPipeline.
@Test
public void testContainerStateMachineCloseOnMissingPipeline() throws Exception {
// This integration test is a bit of a hack to see if the highly
// improbable event where the Datanode does not have the pipeline
// in its Ratis channel but still receives a close container command
// for a container that is open or in closing state.
// Bugs in code can lead to this sequence of events but for this test
// to inject this state, it removes the pipeline by directly calling
// the underlying method.
OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName).createKey("testQuasiClosed1", 1024, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>());
key.write("ratis".getBytes(UTF_8));
key.flush();
KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
List<OmKeyLocationInfo> locationInfoList = groupOutputStream.getLocationInfoList();
Assert.assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
Set<HddsDatanodeService> datanodeSet = TestHelper.getDatanodeServices(cluster, omKeyLocationInfo.getPipeline());
long containerID = omKeyLocationInfo.getContainerID();
for (HddsDatanodeService dn : datanodeSet) {
XceiverServerRatis wc = (XceiverServerRatis) dn.getDatanodeStateMachine().getContainer().getWriteChannel();
if (wc == null) {
// Test applicable only for RATIS based channel.
return;
}
wc.notifyGroupRemove(RaftGroupId.valueOf(omKeyLocationInfo.getPipeline().getId().getId()));
SCMCommand<?> command = new CloseContainerCommand(containerID, omKeyLocationInfo.getPipeline().getId());
command.setTerm(cluster.getStorageContainerManager().getScmContext().getTermOfLeader());
cluster.getStorageContainerManager().getScmNodeManager().addDatanodeCommand(dn.getDatanodeDetails().getUuid(), command);
}
for (HddsDatanodeService dn : datanodeSet) {
LambdaTestUtils.await(20000, 1000, () -> (dn.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerState().equals(QUASI_CLOSED)));
}
key.close();
}
use of org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand in project ozone by apache.
the class TestCloseContainerHandler method test.
@Test
public void test() throws Exception {
cluster.waitForClusterToBeReady();
// the easiest way to create an open container is creating a key
OzoneClient client = OzoneClientFactory.getRpcClient(conf);
ObjectStore objectStore = client.getObjectStore();
objectStore.createVolume("test");
objectStore.getVolume("test").createBucket("test");
OzoneOutputStream key = objectStore.getVolume("test").getBucket("test").createKey("test", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
key.write("test".getBytes(UTF_8));
key.close();
// get the name of a valid container
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test").setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)).setDataSize(1024).setKeyName("test").setRefreshPipeline(true).build();
OmKeyLocationInfo omKeyLocationInfo = cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0);
ContainerID containerId = ContainerID.valueOf(omKeyLocationInfo.getContainerID());
ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(containerId);
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
Assert.assertFalse(isContainerClosed(cluster, containerId.getId()));
DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0).getDatanodeDetails();
// send the order to close the container
SCMCommand<?> command = new CloseContainerCommand(containerId.getId(), pipeline.getId());
command.setTerm(cluster.getStorageContainerManager().getScmContext().getTermOfLeader());
cluster.getStorageContainerManager().getScmNodeManager().addDatanodeCommand(datanodeDetails.getUuid(), command);
GenericTestUtils.waitFor(() -> isContainerClosed(cluster, containerId.getId()), 500, 5 * 1000);
// double check if it's really closed (waitFor also throws an exception)
Assert.assertTrue(isContainerClosed(cluster, containerId.getId()));
}
use of org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand in project ozone by apache.
the class TestCloseContainerByPipeline method testIfCloseContainerCommandHandlerIsInvoked.
@Test
public void testIfCloseContainerCommandHandlerIsInvoked() throws Exception {
String keyName = "testIfCloseContainerCommandHandlerIsInvoked";
OzoneOutputStream key = objectStore.getVolume("test").getBucket("test").createKey(keyName, 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
key.write(keyName.getBytes(UTF_8));
key.close();
// get the name of a valid container
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test").setReplicationConfig(RatisReplicationConfig.getInstance(ONE)).setDataSize(1024).setKeyName(keyName).setRefreshPipeline(true).build();
OmKeyLocationInfo omKeyLocationInfo = cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0);
long containerID = omKeyLocationInfo.getContainerID();
ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
List<DatanodeDetails> datanodes = pipeline.getNodes();
Assert.assertEquals(datanodes.size(), 1);
DatanodeDetails datanodeDetails = datanodes.get(0);
HddsDatanodeService datanodeService = null;
Assert.assertFalse(isContainerClosed(cluster, containerID, datanodeDetails));
for (HddsDatanodeService datanodeServiceItr : cluster.getHddsDatanodes()) {
if (datanodeDetails.equals(datanodeServiceItr.getDatanodeDetails())) {
datanodeService = datanodeServiceItr;
break;
}
}
CommandHandler closeContainerHandler = datanodeService.getDatanodeStateMachine().getCommandDispatcher().getCloseContainerHandler();
int lastInvocationCount = closeContainerHandler.getInvocationCount();
// send the order to close the container
SCMCommand<?> command = new CloseContainerCommand(containerID, pipeline.getId());
command.setTerm(cluster.getStorageContainerManager().getScmContext().getTermOfLeader());
cluster.getStorageContainerManager().getScmNodeManager().addDatanodeCommand(datanodeDetails.getUuid(), command);
GenericTestUtils.waitFor(() -> isContainerClosed(cluster, containerID, datanodeDetails), 500, 5 * 1000);
// Make sure the closeContainerCommandHandler is Invoked
Assert.assertTrue(closeContainerHandler.getInvocationCount() > lastInvocationCount);
}
Aggregations