use of org.apache.hadoop.hdds.scm.container.ContainerID in project ozone by apache.
the class StorageContainerLocationProtocolClientSideTranslatorPB method getExistContainerWithPipelinesInBatch.
/**
* {@inheritDoc}
*/
@Override
public List<ContainerWithPipeline> getExistContainerWithPipelinesInBatch(List<Long> containerIDs) {
for (Long containerID : containerIDs) {
Preconditions.checkState(containerID >= 0, "Container ID cannot be negative");
}
GetExistContainerWithPipelinesInBatchRequestProto request = GetExistContainerWithPipelinesInBatchRequestProto.newBuilder().setTraceID(TracingUtil.exportCurrentSpan()).addAllContainerIDs(containerIDs).build();
ScmContainerLocationResponse response = null;
List<ContainerWithPipeline> cps = new ArrayList<>();
try {
response = submitRequest(Type.GetExistContainerWithPipelinesInBatch, (builder) -> builder.setGetExistContainerWithPipelinesInBatchRequest(request));
} catch (IOException ex) {
return cps;
}
List<HddsProtos.ContainerWithPipeline> protoCps = response.getGetExistContainerWithPipelinesInBatchResponse().getContainerWithPipelinesList();
for (HddsProtos.ContainerWithPipeline cp : protoCps) {
try {
cps.add(ContainerWithPipeline.fromProtobuf(cp));
} catch (IOException uex) {
// "fromProtobuf" may throw an exception
// do nothing , just go ahead
}
}
return cps;
}
use of org.apache.hadoop.hdds.scm.container.ContainerID in project ozone by apache.
the class TestReconAsPassiveScm method testReconRestart.
@Test
public void testReconRestart() throws Exception {
final OzoneStorageContainerManager reconScm = cluster.getReconServer().getReconStorageContainerManager();
StorageContainerManager scm = cluster.getStorageContainerManager();
// Stop Recon
ContainerManager scmContainerManager = scm.getContainerManager();
assertTrue(scmContainerManager.getContainers().isEmpty());
ContainerManager reconContainerManager = reconScm.getContainerManager();
assertTrue(reconContainerManager.getContainers().isEmpty());
LambdaTestUtils.await(60000, 5000, () -> (reconScm.getScmNodeManager().getAllNodes().size() == 3));
cluster.stopRecon();
// Create container in SCM.
ContainerInfo containerInfo = scmContainerManager.allocateContainer(RatisReplicationConfig.getInstance(ONE), "test");
long containerID = containerInfo.getContainerID();
PipelineManager scmPipelineManager = scm.getPipelineManager();
Pipeline pipeline = scmPipelineManager.getPipeline(containerInfo.getPipelineID());
XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf);
runTestOzoneContainerViaDataNode(containerID, client);
assertFalse(scmContainerManager.getContainers().isEmpty());
// Close a pipeline
Optional<Pipeline> pipelineToClose = scmPipelineManager.getPipelines(RatisReplicationConfig.getInstance(ONE)).stream().filter(p -> !p.getId().equals(containerInfo.getPipelineID())).findFirst();
assertTrue(pipelineToClose.isPresent());
scmPipelineManager.closePipeline(pipelineToClose.get(), false);
// Start Recon
cluster.startRecon();
// Verify if Recon has all the nodes on restart (even if heartbeats are
// not yet received).
NodeManager reconNodeManager = reconScm.getScmNodeManager();
NodeManager scmNodeManager = scm.getScmNodeManager();
assertEquals(scmNodeManager.getAllNodes().size(), reconNodeManager.getAllNodes().size());
// Verify Recon picks up new container, close pipeline SCM actions.
OzoneStorageContainerManager newReconScm = cluster.getReconServer().getReconStorageContainerManager();
PipelineManager reconPipelineManager = newReconScm.getPipelineManager();
assertFalse(reconPipelineManager.containsPipeline(pipelineToClose.get().getId()));
LambdaTestUtils.await(90000, 5000, () -> (newReconScm.getContainerManager().containerExist(ContainerID.valueOf(containerID))));
}
use of org.apache.hadoop.hdds.scm.container.ContainerID in project ozone by apache.
the class PipelineManagerImpl method closeContainersForPipeline.
/**
* Fire events to close all containers related to the input pipeline.
* @param pipelineId - ID of the pipeline.
* @throws IOException
*/
protected void closeContainersForPipeline(final PipelineID pipelineId) throws IOException {
Set<ContainerID> containerIDs = stateManager.getContainers(pipelineId);
ContainerManager containerManager = scmContext.getScm().getContainerManager();
for (ContainerID containerID : containerIDs) {
if (containerManager.getContainer(containerID).getState() == HddsProtos.LifeCycleState.OPEN) {
try {
containerManager.updateContainerState(containerID, HddsProtos.LifeCycleEvent.FINALIZE);
} catch (InvalidStateTransitionException ex) {
throw new IOException(ex);
}
}
eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerID);
LOG.info("Container {} closed for pipeline={}", containerID, pipelineId);
}
}
use of org.apache.hadoop.hdds.scm.container.ContainerID in project ozone by apache.
the class SequenceIdGenerator method upgradeToSequenceId.
/**
* TODO
* Relocate the code after upgrade framework is ready.
*
* Upgrade localID, delTxnId, containerId from legacy solution
* to SequenceIdGenerator.
*/
public static void upgradeToSequenceId(SCMMetadataStore scmMetadataStore) throws IOException {
Table<String, Long> sequenceIdTable = scmMetadataStore.getSequenceIdTable();
// operations can take effect exactly once in a SCM HA cluster.
if (sequenceIdTable.get(LOCAL_ID) == null) {
long millisSinceEpoch = TimeUnit.DAYS.toMillis(LocalDate.of(LocalDate.now().getYear() + 1, 1, 1).toEpochDay());
long localId = millisSinceEpoch << Short.SIZE;
Preconditions.checkArgument(localId > UniqueId.next());
sequenceIdTable.put(LOCAL_ID, localId);
LOG.info("upgrade {} to {}", LOCAL_ID, sequenceIdTable.get(LOCAL_ID));
}
// upgrade delTxnId
if (sequenceIdTable.get(DEL_TXN_ID) == null) {
// fetch delTxnId from DeletedBlocksTXTable
// check HDDS-4477 for details.
DeletedBlocksTransaction txn = scmMetadataStore.getDeletedBlocksTXTable().get(0L);
sequenceIdTable.put(DEL_TXN_ID, txn != null ? txn.getTxID() : 0L);
LOG.info("upgrade {} to {}", DEL_TXN_ID, sequenceIdTable.get(DEL_TXN_ID));
}
// upgrade containerId
if (sequenceIdTable.get(CONTAINER_ID) == null) {
long largestContainerId = 0;
TableIterator<ContainerID, ? extends KeyValue<ContainerID, ContainerInfo>> iterator = scmMetadataStore.getContainerTable().iterator();
while (iterator.hasNext()) {
ContainerInfo containerInfo = iterator.next().getValue();
largestContainerId = Long.max(containerInfo.getContainerID(), largestContainerId);
}
sequenceIdTable.put(CONTAINER_ID, largestContainerId);
LOG.info("upgrade {} to {}", CONTAINER_ID, sequenceIdTable.get(CONTAINER_ID));
}
}
use of org.apache.hadoop.hdds.scm.container.ContainerID in project ozone by apache.
the class DatanodeAdminMonitorImpl method checkContainersReplicatedOnNode.
private boolean checkContainersReplicatedOnNode(DatanodeDetails dn) throws NodeNotFoundException {
int sufficientlyReplicated = 0;
int underReplicated = 0;
int unhealthy = 0;
List<ContainerID> underReplicatedIDs = new ArrayList<>();
List<ContainerID> unhealthyIDs = new ArrayList<>();
Set<ContainerID> containers = nodeManager.getContainers(dn);
for (ContainerID cid : containers) {
try {
ContainerReplicaCount replicaSet = replicationManager.getContainerReplicaCount(cid);
if (replicaSet.isSufficientlyReplicated()) {
sufficientlyReplicated++;
} else {
if (LOG.isDebugEnabled()) {
underReplicatedIDs.add(cid);
}
if (underReplicated < CONTAINER_DETAILS_LOGGING_LIMIT || LOG.isDebugEnabled()) {
LOG.info("Under Replicated Container {} {}; {}", cid, replicaSet, replicaDetails(replicaSet.getReplica()));
}
underReplicated++;
}
if (!replicaSet.isHealthy()) {
if (LOG.isDebugEnabled()) {
unhealthyIDs.add(cid);
}
if (unhealthy < CONTAINER_DETAILS_LOGGING_LIMIT || LOG.isDebugEnabled()) {
LOG.info("Unhealthy Container {} {}; {}", cid, replicaSet, replicaDetails(replicaSet.getReplica()));
}
unhealthy++;
}
} catch (ContainerNotFoundException e) {
LOG.warn("ContainerID {} present in node list for {} but not found " + "in containerManager", cid, dn);
}
}
LOG.info("{} has {} sufficientlyReplicated, {} underReplicated and {} " + "unhealthy containers", dn, sufficientlyReplicated, underReplicated, unhealthy);
if (LOG.isDebugEnabled() && underReplicatedIDs.size() < 10000 && unhealthyIDs.size() < 10000) {
LOG.debug("{} has {} underReplicated [{}] and {} unhealthy [{}] " + "containers", dn, underReplicated, underReplicatedIDs.stream().map(Object::toString).collect(Collectors.joining(", ")), unhealthy, unhealthyIDs.stream().map(Object::toString).collect(Collectors.joining(", ")));
}
return underReplicated == 0 && unhealthy == 0;
}
Aggregations