use of org.apache.hadoop.hdds.scm.container.ContainerReplica in project ozone by apache.
the class TestDecommissionAndMaintenance method getOneDNHostingReplica.
/**
* Select any DN hosting a replica from the Replica Set.
* @param replicas The set of ContainerReplica
* @return Any datanode associated one of the replicas
*/
private DatanodeDetails getOneDNHostingReplica(Set<ContainerReplica> replicas) {
// Now Decommission a host with one of the replicas
Iterator<ContainerReplica> iter = replicas.iterator();
ContainerReplica c = iter.next();
return c.getDatanodeDetails();
}
use of org.apache.hadoop.hdds.scm.container.ContainerReplica in project ozone by apache.
the class TestDecommissionAndMaintenance method testContainerIsReplicatedWhenAllNodesGotoMaintenance.
@Test
public // return, the excess replicas should be removed.
void testContainerIsReplicatedWhenAllNodesGotoMaintenance() throws Exception {
// Generate some data on the empty cluster to create some containers
generateData(20, "key", ReplicationFactor.THREE, ReplicationType.RATIS);
// Locate any container and find its open pipeline
final ContainerInfo container = waitForAndReturnContainer();
Set<ContainerReplica> replicas = getContainerReplicas(container);
List<DatanodeDetails> forMaintenance = new ArrayList<>();
replicas.forEach(r -> forMaintenance.add(r.getDatanodeDetails()));
scmClient.startMaintenanceNodes(forMaintenance.stream().map(d -> getDNHostAndPort(d)).collect(Collectors.toList()), 0);
// Ensure all 3 DNs go to maintenance
for (DatanodeDetails dn : forMaintenance) {
waitForDnToReachPersistedOpState(dn, IN_MAINTENANCE);
}
// There should now be 5-6 replicas of the container we are tracking
Set<ContainerReplica> newReplicas = cm.getContainerReplicas(container.containerID());
assertTrue(newReplicas.size() >= 5);
scmClient.recommissionNodes(forMaintenance.stream().map(d -> getDNHostAndPort(d)).collect(Collectors.toList()));
// Ensure all 3 DNs go to maintenance
for (DatanodeDetails dn : forMaintenance) {
waitForDnToReachOpState(dn, IN_SERVICE);
}
waitForContainerReplicas(container, 3);
}
use of org.apache.hadoop.hdds.scm.container.ContainerReplica in project ozone by apache.
the class TestDecommissionAndMaintenance method testNodeWithOpenPipelineCanBeDecommissionedAndRecommissioned.
@Test
public // be recommissioned.
void testNodeWithOpenPipelineCanBeDecommissionedAndRecommissioned() throws Exception {
// Generate some data on the empty cluster to create some containers
generateData(20, "key", ReplicationFactor.THREE, ReplicationType.RATIS);
// Locate any container and find its open pipeline
final ContainerInfo container = waitForAndReturnContainer();
Pipeline pipeline = pm.getPipeline(container.getPipelineID());
assertEquals(Pipeline.PipelineState.OPEN, pipeline.getPipelineState());
Set<ContainerReplica> replicas = getContainerReplicas(container);
final DatanodeDetails toDecommission = getOneDNHostingReplica(replicas);
scmClient.decommissionNodes(Arrays.asList(getDNHostAndPort(toDecommission)));
waitForDnToReachOpState(toDecommission, DECOMMISSIONED);
// Ensure one node transitioned to DECOMMISSIONING
List<DatanodeDetails> decomNodes = nm.getNodes(DECOMMISSIONED, HEALTHY);
assertEquals(1, decomNodes.size());
// Should now be 4 replicas online as the DN is still alive but
// in the DECOMMISSIONED state.
waitForContainerReplicas(container, 4);
// Stop the decommissioned DN
int dnIndex = cluster.getHddsDatanodeIndex(toDecommission);
cluster.shutdownHddsDatanode(toDecommission);
waitForDnToReachHealthState(toDecommission, DEAD);
// Now the decommissioned node is dead, we should have
// 3 replicas for the tracked container.
waitForContainerReplicas(container, 3);
cluster.restartHddsDatanode(dnIndex, true);
scmClient.recommissionNodes(Arrays.asList(getDNHostAndPort(toDecommission)));
waitForDnToReachOpState(toDecommission, IN_SERVICE);
waitForDnToReachPersistedOpState(toDecommission, IN_SERVICE);
}
use of org.apache.hadoop.hdds.scm.container.ContainerReplica in project ozone by apache.
the class TestContainerReportWithKeys method testContainerReportKeyWrite.
@Test
public void testContainerReportKeyWrite() throws Exception {
final String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
final String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
final String keyName = "key" + RandomStringUtils.randomNumeric(5);
final int keySize = 100;
OzoneClient client = OzoneClientFactory.getRpcClient(conf);
ObjectStore objectStore = client.getObjectStore();
objectStore.createVolume(volumeName);
objectStore.getVolume(volumeName).createBucket(bucketName);
OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName).createKey(keyName, keySize, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
String dataString = RandomStringUtils.randomAlphabetic(keySize);
key.write(dataString.getBytes(UTF_8));
key.close();
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setReplicationConfig(StandaloneReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).setDataSize(keySize).setRefreshPipeline(true).build();
OmKeyLocationInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0);
ContainerInfo cinfo = scm.getContainerInfo(keyInfo.getContainerID());
Set<ContainerReplica> replicas = scm.getContainerManager().getContainerReplicas(ContainerID.valueOf(keyInfo.getContainerID()));
Assert.assertTrue(replicas.size() == 1);
replicas.stream().forEach(rp -> Assert.assertTrue(rp.getDatanodeDetails().getParent() != null));
LOG.info("SCM Container Info keyCount: {} usedBytes: {}", cinfo.getNumberOfKeys(), cinfo.getUsedBytes());
}
use of org.apache.hadoop.hdds.scm.container.ContainerReplica in project ozone by apache.
the class DeletedBlockLogImpl method commitTransactions.
/**
* {@inheritDoc}
*
* @param transactionResults - transaction IDs.
* @param dnID - Id of Datanode which has acknowledged
* a delete block command.
* @throws IOException
*/
@Override
public void commitTransactions(List<DeleteBlockTransactionResult> transactionResults, UUID dnID) {
lock.lock();
try {
ArrayList<Long> txIDsToBeDeleted = new ArrayList<>();
Set<UUID> dnsWithCommittedTxn;
for (DeleteBlockTransactionResult transactionResult : transactionResults) {
if (isTransactionFailed(transactionResult)) {
metrics.incrBlockDeletionTransactionFailure();
continue;
}
try {
metrics.incrBlockDeletionTransactionSuccess();
long txID = transactionResult.getTxID();
// set of dns which have successfully committed transaction txId.
dnsWithCommittedTxn = transactionToDNsCommitMap.get(txID);
final ContainerID containerId = ContainerID.valueOf(transactionResult.getContainerID());
if (dnsWithCommittedTxn == null) {
// Mostly likely it's a retried delete command response.
if (LOG.isDebugEnabled()) {
LOG.debug("Transaction txId={} commit by dnId={} for containerID={}" + " failed. Corresponding entry not found.", txID, dnID, containerId);
}
continue;
}
dnsWithCommittedTxn.add(dnID);
final ContainerInfo container = containerManager.getContainer(containerId);
final Set<ContainerReplica> replicas = containerManager.getContainerReplicas(containerId);
// the nodes returned in the pipeline match the replication factor.
if (min(replicas.size(), dnsWithCommittedTxn.size()) >= container.getReplicationConfig().getRequiredNodes()) {
List<UUID> containerDns = replicas.stream().map(ContainerReplica::getDatanodeDetails).map(DatanodeDetails::getUuid).collect(Collectors.toList());
if (dnsWithCommittedTxn.containsAll(containerDns)) {
transactionToDNsCommitMap.remove(txID);
transactionToRetryCountMap.remove(txID);
if (LOG.isDebugEnabled()) {
LOG.debug("Purging txId={} from block deletion log", txID);
}
txIDsToBeDeleted.add(txID);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("Datanode txId={} containerId={} committed by dnId={}", txID, containerId, dnID);
}
} catch (IOException e) {
LOG.warn("Could not commit delete block transaction: " + transactionResult.getTxID(), e);
}
}
try {
deletedBlockLogStateManager.removeTransactionsFromDB(txIDsToBeDeleted);
metrics.incrBlockDeletionTransactionCompleted(txIDsToBeDeleted.size());
} catch (IOException e) {
LOG.warn("Could not commit delete block transactions: " + txIDsToBeDeleted, e);
}
} finally {
lock.unlock();
}
}
Aggregations