use of org.apache.hadoop.hdds.scm.container.ContainerInfo in project ozone by apache.
the class TestSCMRestart method testPipelineWithScmRestart.
@Test
public void testPipelineWithScmRestart() throws IOException {
// After restart make sure that the pipeline are still present
Pipeline ratisPipeline1AfterRestart = pipelineManager.getPipeline(ratisPipeline1.getId());
Pipeline ratisPipeline2AfterRestart = pipelineManager.getPipeline(ratisPipeline2.getId());
Assert.assertNotSame(ratisPipeline1AfterRestart, ratisPipeline1);
Assert.assertNotSame(ratisPipeline2AfterRestart, ratisPipeline2);
Assert.assertEquals(ratisPipeline1AfterRestart, ratisPipeline1);
Assert.assertEquals(ratisPipeline2AfterRestart, ratisPipeline2);
// Try creating a new container, it should be from the same pipeline
// as was before restart
ContainerInfo containerInfo = newContainerManager.allocateContainer(RatisReplicationConfig.getInstance(ReplicationFactor.THREE), "Owner1");
Assert.assertEquals(containerInfo.getPipelineID(), ratisPipeline1.getId());
}
use of org.apache.hadoop.hdds.scm.container.ContainerInfo in project ozone by apache.
the class TestSCMContainerManagerMetrics method testContainerOpsMetrics.
@Test
public void testContainerOpsMetrics() throws IOException {
MetricsRecordBuilder metrics;
ContainerManager containerManager = scm.getContainerManager();
metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
long numSuccessfulCreateContainers = getLongCounter("NumSuccessfulCreateContainers", metrics);
ContainerInfo containerInfo = containerManager.allocateContainer(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), OzoneConsts.OZONE);
metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
Assert.assertEquals(getLongCounter("NumSuccessfulCreateContainers", metrics), ++numSuccessfulCreateContainers);
try {
containerManager.allocateContainer(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), OzoneConsts.OZONE);
fail("testContainerOpsMetrics failed");
} catch (IOException ex) {
// Here it should fail, so it should have the old metric value.
metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
Assert.assertEquals(getLongCounter("NumSuccessfulCreateContainers", metrics), numSuccessfulCreateContainers);
Assert.assertEquals(getLongCounter("NumFailureCreateContainers", metrics), 1);
}
metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
long numSuccessfulDeleteContainers = getLongCounter("NumSuccessfulDeleteContainers", metrics);
containerManager.deleteContainer(ContainerID.valueOf(containerInfo.getContainerID()));
metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
Assert.assertEquals(getLongCounter("NumSuccessfulDeleteContainers", metrics), numSuccessfulDeleteContainers + 1);
try {
// Give random container to delete.
containerManager.deleteContainer(ContainerID.valueOf(RandomUtils.nextLong(10000, 20000)));
fail("testContainerOpsMetrics failed");
} catch (ContainerNotFoundException ex) {
// Here it should fail, so it should have the old metric value.
metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
Assert.assertEquals(getLongCounter("NumSuccessfulDeleteContainers", metrics), numSuccessfulCreateContainers);
Assert.assertEquals(getLongCounter("NumFailureDeleteContainers", metrics), 1);
}
long currentValue = getLongCounter("NumListContainerOps", metrics);
containerManager.getContainers(ContainerID.valueOf(containerInfo.getContainerID()), 1);
metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
Assert.assertEquals(currentValue + 1, getLongCounter("NumListContainerOps", metrics));
}
use of org.apache.hadoop.hdds.scm.container.ContainerInfo in project ozone by apache.
the class TestKeyManagerUnit method listStatus.
@Test
public void listStatus() throws Exception {
String volume = "vol";
String bucket = "bucket";
String keyPrefix = "key";
String client = "client.host";
OMRequestTestUtils.addVolumeToDB(volume, OzoneConsts.OZONE, metadataManager);
OMRequestTestUtils.addBucketToDB(volume, bucket, metadataManager);
final Pipeline pipeline = MockPipeline.createPipeline(3);
final List<String> nodes = pipeline.getNodes().stream().map(DatanodeDetails::getUuidString).collect(toList());
List<Long> containerIDs = new ArrayList<>();
List<ContainerWithPipeline> containersWithPipeline = new ArrayList<>();
for (long i = 1; i <= 10; i++) {
final OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder().setBlockID(new BlockID(i, 1L)).setPipeline(pipeline).setOffset(0).setLength(256000).build();
ContainerInfo containerInfo = new ContainerInfo.Builder().setContainerID(i).build();
containersWithPipeline.add(new ContainerWithPipeline(containerInfo, pipeline));
containerIDs.add(i);
OmKeyInfo keyInfo = new OmKeyInfo.Builder().setVolumeName(volume).setBucketName(bucket).setCreationTime(Time.now()).setOmKeyLocationInfos(singletonList(new OmKeyLocationInfoGroup(0, new ArrayList<>()))).setReplicationConfig(RatisReplicationConfig.getInstance(ReplicationFactor.THREE)).setKeyName(keyPrefix + i).setObjectID(i).setUpdateID(i).build();
keyInfo.appendNewBlocks(singletonList(keyLocationInfo), false);
OMRequestTestUtils.addKeyToOM(metadataManager, keyInfo);
}
when(containerClient.getContainerWithPipelineBatch(containerIDs)).thenReturn(containersWithPipeline);
OmKeyArgs.Builder builder = new OmKeyArgs.Builder().setVolumeName(volume).setBucketName(bucket).setKeyName("").setSortDatanodesInPipeline(true);
List<OzoneFileStatus> fileStatusList = keyManager.listStatus(builder.build(), false, null, Long.MAX_VALUE, client);
Assert.assertEquals(10, fileStatusList.size());
verify(containerClient).getContainerWithPipelineBatch(containerIDs);
verify(blockClient).sortDatanodes(nodes, client);
}
use of org.apache.hadoop.hdds.scm.container.ContainerInfo in project ozone by apache.
the class TestKeyManagerUnit method testLookupFileWithDnFailure.
@Test
public void testLookupFileWithDnFailure() throws IOException {
final DatanodeDetails dnOne = MockDatanodeDetails.randomDatanodeDetails();
final DatanodeDetails dnTwo = MockDatanodeDetails.randomDatanodeDetails();
final DatanodeDetails dnThree = MockDatanodeDetails.randomDatanodeDetails();
final DatanodeDetails dnFour = MockDatanodeDetails.randomDatanodeDetails();
final DatanodeDetails dnFive = MockDatanodeDetails.randomDatanodeDetails();
final DatanodeDetails dnSix = MockDatanodeDetails.randomDatanodeDetails();
final Pipeline pipelineOne = Pipeline.newBuilder().setId(PipelineID.randomId()).setReplicationConfig(RatisReplicationConfig.getInstance(ReplicationFactor.THREE)).setState(Pipeline.PipelineState.OPEN).setLeaderId(dnOne.getUuid()).setNodes(Arrays.asList(dnOne, dnTwo, dnThree)).build();
final Pipeline pipelineTwo = Pipeline.newBuilder().setId(PipelineID.randomId()).setReplicationConfig(RatisReplicationConfig.getInstance(ReplicationFactor.THREE)).setState(Pipeline.PipelineState.OPEN).setLeaderId(dnFour.getUuid()).setNodes(Arrays.asList(dnFour, dnFive, dnSix)).build();
List<Long> containerIDs = new ArrayList<>();
containerIDs.add(1L);
List<ContainerWithPipeline> cps = new ArrayList<>();
ContainerInfo ci = Mockito.mock(ContainerInfo.class);
when(ci.getContainerID()).thenReturn(1L);
cps.add(new ContainerWithPipeline(ci, pipelineTwo));
when(containerClient.getContainerWithPipelineBatch(containerIDs)).thenReturn(cps);
final OmVolumeArgs volumeArgs = OmVolumeArgs.newBuilder().setVolume("volumeOne").setAdminName("admin").setOwnerName("admin").build();
OMRequestTestUtils.addVolumeToOM(metadataManager, volumeArgs);
final OmBucketInfo bucketInfo = OmBucketInfo.newBuilder().setVolumeName("volumeOne").setBucketName("bucketOne").build();
OMRequestTestUtils.addBucketToOM(metadataManager, bucketInfo);
final OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder().setBlockID(new BlockID(1L, 1L)).setPipeline(pipelineOne).setOffset(0).setLength(256000).build();
final OmKeyInfo keyInfo = new OmKeyInfo.Builder().setVolumeName("volumeOne").setBucketName("bucketOne").setKeyName("keyOne").setOmKeyLocationInfos(singletonList(new OmKeyLocationInfoGroup(0, singletonList(keyLocationInfo)))).setCreationTime(Time.now()).setModificationTime(Time.now()).setDataSize(256000).setReplicationConfig(RatisReplicationConfig.getInstance(ReplicationFactor.THREE)).setAcls(Collections.emptyList()).build();
OMRequestTestUtils.addKeyToOM(metadataManager, keyInfo);
final OmKeyArgs.Builder keyArgs = new OmKeyArgs.Builder().setVolumeName("volumeOne").setBucketName("bucketOne").setKeyName("keyOne");
final OmKeyInfo newKeyInfo = keyManager.lookupFile(keyArgs.build(), "test");
final OmKeyLocationInfo newBlockLocation = newKeyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0);
Assert.assertEquals(1L, newBlockLocation.getContainerID());
Assert.assertEquals(1L, newBlockLocation.getBlockID().getLocalID());
Assert.assertEquals(pipelineTwo.getId(), newBlockLocation.getPipeline().getId());
Assert.assertTrue(newBlockLocation.getPipeline().getNodes().contains(dnFour));
Assert.assertTrue(newBlockLocation.getPipeline().getNodes().contains(dnFive));
Assert.assertTrue(newBlockLocation.getPipeline().getNodes().contains(dnSix));
}
use of org.apache.hadoop.hdds.scm.container.ContainerInfo in project ozone by apache.
the class DeletedBlockLogImpl method commitTransactions.
/**
* {@inheritDoc}
*
* @param transactionResults - transaction IDs.
* @param dnID - Id of Datanode which has acknowledged
* a delete block command.
* @throws IOException
*/
@Override
public void commitTransactions(List<DeleteBlockTransactionResult> transactionResults, UUID dnID) {
lock.lock();
try {
ArrayList<Long> txIDsToBeDeleted = new ArrayList<>();
Set<UUID> dnsWithCommittedTxn;
for (DeleteBlockTransactionResult transactionResult : transactionResults) {
if (isTransactionFailed(transactionResult)) {
metrics.incrBlockDeletionTransactionFailure();
continue;
}
try {
metrics.incrBlockDeletionTransactionSuccess();
long txID = transactionResult.getTxID();
// set of dns which have successfully committed transaction txId.
dnsWithCommittedTxn = transactionToDNsCommitMap.get(txID);
final ContainerID containerId = ContainerID.valueOf(transactionResult.getContainerID());
if (dnsWithCommittedTxn == null) {
// Mostly likely it's a retried delete command response.
if (LOG.isDebugEnabled()) {
LOG.debug("Transaction txId={} commit by dnId={} for containerID={}" + " failed. Corresponding entry not found.", txID, dnID, containerId);
}
continue;
}
dnsWithCommittedTxn.add(dnID);
final ContainerInfo container = containerManager.getContainer(containerId);
final Set<ContainerReplica> replicas = containerManager.getContainerReplicas(containerId);
// the nodes returned in the pipeline match the replication factor.
if (min(replicas.size(), dnsWithCommittedTxn.size()) >= container.getReplicationConfig().getRequiredNodes()) {
List<UUID> containerDns = replicas.stream().map(ContainerReplica::getDatanodeDetails).map(DatanodeDetails::getUuid).collect(Collectors.toList());
if (dnsWithCommittedTxn.containsAll(containerDns)) {
transactionToDNsCommitMap.remove(txID);
transactionToRetryCountMap.remove(txID);
if (LOG.isDebugEnabled()) {
LOG.debug("Purging txId={} from block deletion log", txID);
}
txIDsToBeDeleted.add(txID);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("Datanode txId={} containerId={} committed by dnId={}", txID, containerId, dnID);
}
} catch (IOException e) {
LOG.warn("Could not commit delete block transaction: " + transactionResult.getTxID(), e);
}
}
try {
deletedBlockLogStateManager.removeTransactionsFromDB(txIDsToBeDeleted);
metrics.incrBlockDeletionTransactionCompleted(txIDsToBeDeleted.size());
} catch (IOException e) {
LOG.warn("Could not commit delete block transactions: " + txIDsToBeDeleted, e);
}
} finally {
lock.unlock();
}
}
Aggregations