use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.
the class ReconContainerManager method checkContainerStateAndUpdate.
/**
* Check if container state is not open. In SCM, container state
* changes to CLOSING first, and then the close command is pushed down
* to Datanodes. Recon 'learns' this from DN, and hence replica state
* will move container state to 'CLOSING'.
*
* @param containerID containerID to check
* @param state state to be compared
* @throws IOException
*/
private void checkContainerStateAndUpdate(ContainerID containerID, ContainerReplicaProto.State state) throws Exception {
ContainerInfo containerInfo = getContainer(containerID);
if (containerInfo.getState().equals(HddsProtos.LifeCycleState.OPEN) && !state.equals(ContainerReplicaProto.State.OPEN) && isHealthy(state)) {
LOG.info("Container {} has state OPEN, but given state is {}.", containerID, state);
final PipelineID pipelineID = containerInfo.getPipelineID();
// subtract open container count from the map
int curCnt = pipelineToOpenContainer.getOrDefault(pipelineID, 0);
if (curCnt == 1) {
pipelineToOpenContainer.remove(pipelineID);
} else if (curCnt > 0) {
pipelineToOpenContainer.put(pipelineID, curCnt - 1);
}
updateContainerState(containerID, FINALIZE);
}
}
use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.
the class TestBlockManager method testAllocateBlockWithExclusion.
@Test
public void testAllocateBlockWithExclusion() throws Exception {
try {
while (true) {
pipelineManager.createPipeline(replicationConfig);
}
} catch (IOException e) {
}
HddsTestUtils.openAllRatisPipelines(pipelineManager);
ExcludeList excludeList = new ExcludeList();
excludeList.addPipeline(pipelineManager.getPipelines(replicationConfig).get(0).getId());
AllocatedBlock block = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, excludeList);
Assert.assertNotNull(block);
for (PipelineID id : excludeList.getPipelineIds()) {
Assert.assertNotEquals(block.getPipeline().getId(), id);
}
for (Pipeline pipeline : pipelineManager.getPipelines(replicationConfig)) {
excludeList.addPipeline(pipeline.getId());
}
block = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, excludeList);
Assert.assertNotNull(block);
Assert.assertTrue(excludeList.getPipelineIds().contains(block.getPipeline().getId()));
}
use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.
the class HddsTestUtils method getPipelineActionFromDatanode.
public static PipelineActionsFromDatanode getPipelineActionFromDatanode(DatanodeDetails dn, PipelineID... pipelineIDs) {
PipelineActionsProto.Builder actionsProtoBuilder = PipelineActionsProto.newBuilder();
for (PipelineID pipelineID : pipelineIDs) {
ClosePipelineInfo closePipelineInfo = ClosePipelineInfo.newBuilder().setPipelineID(pipelineID.getProtobuf()).setReason(ClosePipelineInfo.Reason.PIPELINE_FAILED).setDetailedReason("").build();
actionsProtoBuilder.addPipelineActions(PipelineAction.newBuilder().setClosePipeline(closePipelineInfo).setAction(PipelineAction.Action.CLOSE).build());
}
return new PipelineActionsFromDatanode(dn, actionsProtoBuilder.build());
}
use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.
the class TestCreatePipelineCommandHandler method testPipelineCreation.
@Test
public void testPipelineCreation() throws IOException {
final List<DatanodeDetails> datanodes = getDatanodes();
final PipelineID pipelineID = PipelineID.randomId();
final SCMCommand<CreatePipelineCommandProto> command = new CreatePipelineCommand(pipelineID, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, datanodes);
final XceiverServerSpi writeChanel = Mockito.mock(XceiverServerSpi.class);
final DatanodeStateMachine dnsm = Mockito.mock(DatanodeStateMachine.class);
Mockito.when(stateContext.getParent()).thenReturn(dnsm);
Mockito.when(dnsm.getDatanodeDetails()).thenReturn(datanodes.get(0));
Mockito.when(ozoneContainer.getWriteChannel()).thenReturn(writeChanel);
Mockito.when(writeChanel.isExist(pipelineID.getProtobuf())).thenReturn(false);
final CreatePipelineCommandHandler commandHandler = new CreatePipelineCommandHandler(new OzoneConfiguration());
commandHandler.handle(command, ozoneContainer, stateContext, connectionManager);
List<Integer> priorityList = new ArrayList<>(Collections.nCopies(datanodes.size(), 0));
Mockito.verify(writeChanel, Mockito.times(1)).addGroup(pipelineID.getProtobuf(), datanodes, priorityList);
Mockito.verify(raftClientGroupManager, Mockito.times(2)).add(Mockito.any(RaftGroup.class));
}
use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.
the class KeyOutputStream method handleException.
/**
* It performs following actions :
* a. Updates the committed length at datanode for the current stream in
* datanode.
* b. Reads the data from the underlying buffer and writes it the next stream.
*
* @param streamEntry StreamEntry
* @param exception actual exception that occurred
* @throws IOException Throws IOException if Write fails
*/
private void handleException(BlockOutputStreamEntry streamEntry, IOException exception) throws IOException {
Throwable t = HddsClientUtils.checkForException(exception);
Preconditions.checkNotNull(t);
boolean retryFailure = checkForRetryFailure(t);
boolean containerExclusionException = false;
if (!retryFailure) {
containerExclusionException = checkIfContainerToExclude(t);
}
Pipeline pipeline = streamEntry.getPipeline();
PipelineID pipelineId = pipeline.getId();
long totalSuccessfulFlushedData = streamEntry.getTotalAckDataLength();
streamEntry.resetToAckedPosition();
long bufferedDataLen = blockOutputStreamEntryPool.computeBufferData();
if (containerExclusionException) {
LOG.debug("Encountered exception {}. The last committed block length is {}, " + "uncommitted data length is {} retry count {}", exception, totalSuccessfulFlushedData, bufferedDataLen, retryCount);
} else {
LOG.warn("Encountered exception {} on the pipeline {}. " + "The last committed block length is {}, " + "uncommitted data length is {} retry count {}", exception, pipeline, totalSuccessfulFlushedData, bufferedDataLen, retryCount);
}
Preconditions.checkArgument(bufferedDataLen <= config.getStreamBufferMaxSize());
Preconditions.checkArgument(offset - blockOutputStreamEntryPool.getKeyLength() == bufferedDataLen);
long containerId = streamEntry.getBlockID().getContainerID();
Collection<DatanodeDetails> failedServers = streamEntry.getFailedServers();
Preconditions.checkNotNull(failedServers);
ExcludeList excludeList = blockOutputStreamEntryPool.getExcludeList();
if (!failedServers.isEmpty()) {
excludeList.addDatanodes(failedServers);
}
// exclusion list , otherwise add the pipeline to the exclusion list
if (containerExclusionException) {
excludeList.addConatinerId(ContainerID.valueOf(containerId));
} else {
excludeList.addPipeline(pipelineId);
}
// just clean up the current stream.
streamEntry.cleanup(retryFailure);
// write data on the closed container/pipeline
if (containerExclusionException) {
// discard subsequent pre allocated blocks from the streamEntries list
// from the closed container
blockOutputStreamEntryPool.discardPreallocatedBlocks(streamEntry.getBlockID().getContainerID(), null);
} else {
// In case there is timeoutException or Watch for commit happening over
// majority or the client connection failure to the leader in the
// pipeline, just discard all the pre allocated blocks on this pipeline.
// Next block allocation will happen with excluding this specific pipeline
// This will ensure if 2 way commit happens , it cannot span over multiple
// blocks
blockOutputStreamEntryPool.discardPreallocatedBlocks(-1, pipelineId);
}
if (bufferedDataLen > 0) {
// If the data is still cached in the underlying stream, we need to
// allocate new block and write this data in the datanode.
handleRetry(exception, bufferedDataLen);
// reset the retryCount after handling the exception
retryCount = 0;
}
}
Aggregations