use of org.apache.hadoop.hdds.scm.pipeline.Pipeline in project ozone by apache.
the class SCMClientProtocolServer method getContainerWithPipelineCommon.
private ContainerWithPipeline getContainerWithPipelineCommon(long containerID) throws IOException {
final ContainerID cid = ContainerID.valueOf(containerID);
final ContainerInfo container = scm.getContainerManager().getContainer(cid);
if (scm.getScmContext().isInSafeMode()) {
if (container.isOpen()) {
if (!hasRequiredReplicas(container)) {
throw new SCMException("Open container " + containerID + " doesn't" + " have enough replicas to service this operation in " + "Safe mode.", ResultCodes.SAFE_MODE_EXCEPTION);
}
}
}
Pipeline pipeline;
try {
pipeline = container.isOpen() ? scm.getPipelineManager().getPipeline(container.getPipelineID()) : null;
} catch (PipelineNotFoundException ex) {
// The pipeline is destroyed.
pipeline = null;
}
if (pipeline == null) {
pipeline = scm.getPipelineManager().createPipelineForRead(container.getReplicationConfig(), scm.getContainerManager().getContainerReplicas(cid));
}
return new ContainerWithPipeline(container, pipeline);
}
use of org.apache.hadoop.hdds.scm.pipeline.Pipeline in project ozone by apache.
the class SCMClientProtocolServer method closePipeline.
@Override
public void closePipeline(HddsProtos.PipelineID pipelineID) throws IOException {
getScm().checkAdminAccess(getRemoteUser());
Map<String, String> auditMap = Maps.newHashMap();
auditMap.put("pipelineID", pipelineID.getId());
PipelineManager pipelineManager = scm.getPipelineManager();
Pipeline pipeline = pipelineManager.getPipeline(PipelineID.getFromProtobuf(pipelineID));
pipelineManager.closePipeline(pipeline, true);
AUDIT.logWriteSuccess(buildAuditMessageForSuccess(SCMAction.CLOSE_PIPELINE, null));
}
use of org.apache.hadoop.hdds.scm.pipeline.Pipeline in project ozone by apache.
the class SCMClientProtocolServer method listContainer.
/**
* Lists a range of containers and get their info.
*
* @param startContainerID start containerID.
* @param count count must be {@literal >} 0.
* @param state Container with this state will be returned.
* @param factor Container factor.
* @return a list of pipeline.
* @throws IOException
*/
@Override
@Deprecated
public List<ContainerInfo> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException {
boolean auditSuccess = true;
Map<String, String> auditMap = Maps.newHashMap();
auditMap.put("startContainerID", String.valueOf(startContainerID));
auditMap.put("count", String.valueOf(count));
if (state != null) {
auditMap.put("state", state.name());
}
if (factor != null) {
auditMap.put("factor", factor.name());
}
try {
final ContainerID containerId = ContainerID.valueOf(startContainerID);
if (state != null) {
if (factor != null) {
return scm.getContainerManager().getContainers(state).stream().filter(info -> info.containerID().getId() >= startContainerID).filter(info -> info.getReplicationType() != HddsProtos.ReplicationType.EC).filter(info -> (info.getReplicationFactor() == factor)).sorted().limit(count).collect(Collectors.toList());
} else {
return scm.getContainerManager().getContainers(state).stream().filter(info -> info.containerID().getId() >= startContainerID).sorted().limit(count).collect(Collectors.toList());
}
} else {
if (factor != null) {
return scm.getContainerManager().getContainers().stream().filter(info -> info.containerID().getId() >= startContainerID).filter(info -> info.getReplicationType() != HddsProtos.ReplicationType.EC).filter(info -> info.getReplicationFactor() == factor).sorted().limit(count).collect(Collectors.toList());
} else {
return scm.getContainerManager().getContainers(containerId, count);
}
}
} catch (Exception ex) {
auditSuccess = false;
AUDIT.logReadFailure(buildAuditMessageForFailure(SCMAction.LIST_CONTAINER, auditMap, ex));
throw ex;
} finally {
if (auditSuccess) {
AUDIT.logReadSuccess(buildAuditMessageForSuccess(SCMAction.LIST_CONTAINER, auditMap));
}
}
}
use of org.apache.hadoop.hdds.scm.pipeline.Pipeline in project ozone by apache.
the class StorageContainerLocationProtocolServerSideTranslatorPB method listPipelines.
public ListPipelineResponseProto listPipelines(ListPipelineRequestProto request, int clientVersion) throws IOException {
ListPipelineResponseProto.Builder builder = ListPipelineResponseProto.newBuilder();
List<Pipeline> pipelines = impl.listPipelines();
for (Pipeline pipeline : pipelines) {
builder.addPipelines(pipeline.getProtobufMessage(clientVersion));
}
return builder.build();
}
use of org.apache.hadoop.hdds.scm.pipeline.Pipeline in project ozone by apache.
the class OneReplicaPipelineSafeModeRule method process.
@Override
protected synchronized void process(PipelineReportFromDatanode report) {
Preconditions.checkNotNull(report);
for (PipelineReport report1 : report.getReport().getPipelineReportList()) {
Pipeline pipeline;
try {
pipeline = pipelineManager.getPipeline(PipelineID.getFromProtobuf(report1.getPipelineID()));
} catch (PipelineNotFoundException pnfe) {
continue;
}
if (RatisReplicationConfig.hasFactor(pipeline.getReplicationConfig(), ReplicationFactor.THREE) && pipeline.isOpen() && !reportedPipelineIDSet.contains(pipeline.getId())) {
if (oldPipelineIDSet.contains(pipeline.getId())) {
getSafeModeMetrics().incCurrentHealthyPipelinesWithAtleastOneReplicaReportedCount();
currentReportedPipelineCount++;
reportedPipelineIDSet.add(pipeline.getId());
}
}
}
if (scmInSafeMode()) {
SCMSafeModeManager.getLogger().info("SCM in safe mode. Pipelines with at least one datanode reported " + "count is {}, required at least one datanode reported per " + "pipeline count is {}", currentReportedPipelineCount, thresholdCount);
}
}
Aggregations