use of org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup in project ozone by apache.
the class OMKeyRequest method sumBlockLengths.
/**
* @return the number of bytes used by blocks pointed to by {@code omKeyInfo}.
*/
protected static long sumBlockLengths(OmKeyInfo omKeyInfo) {
long bytesUsed = 0;
int keyFactor = omKeyInfo.getReplicationConfig().getRequiredNodes();
for (OmKeyLocationInfoGroup group : omKeyInfo.getKeyLocationVersions()) {
for (OmKeyLocationInfo locationInfo : group.getLocationList()) {
bytesUsed += locationInfo.getLength() * keyFactor;
}
}
return bytesUsed;
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup in project ozone by apache.
the class TestContainerKeyMapperTask method testReprocessOMDB.
@Test
public void testReprocessOMDB() throws Exception {
Map<ContainerKeyPrefix, Integer> keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(1);
assertTrue(keyPrefixesForContainer.isEmpty());
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(2);
assertTrue(keyPrefixesForContainer.isEmpty());
Pipeline pipeline = getRandomPipeline();
List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
BlockID blockID1 = new BlockID(1, 1);
OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID1, pipeline);
BlockID blockID2 = new BlockID(2, 1);
OmKeyLocationInfo omKeyLocationInfo2 = getOmKeyLocationInfo(blockID2, pipeline);
omKeyLocationInfoList.add(omKeyLocationInfo1);
omKeyLocationInfoList.add(omKeyLocationInfo2);
OmKeyLocationInfoGroup omKeyLocationInfoGroup = new OmKeyLocationInfoGroup(0, omKeyLocationInfoList);
writeDataToOm(reconOMMetadataManager, "key_one", "bucketOne", "sampleVol", Collections.singletonList(omKeyLocationInfoGroup));
ContainerKeyMapperTask containerKeyMapperTask = new ContainerKeyMapperTask(reconContainerMetadataManager);
containerKeyMapperTask.reprocess(reconOMMetadataManager);
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(1);
assertEquals(1, keyPrefixesForContainer.size());
String omKey = omMetadataManager.getOzoneKey("sampleVol", "bucketOne", "key_one");
ContainerKeyPrefix containerKeyPrefix = new ContainerKeyPrefix(1, omKey, 0);
assertEquals(1, keyPrefixesForContainer.get(containerKeyPrefix).intValue());
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(2);
assertEquals(1, keyPrefixesForContainer.size());
containerKeyPrefix = new ContainerKeyPrefix(2, omKey, 0);
assertEquals(1, keyPrefixesForContainer.get(containerKeyPrefix).intValue());
// Test if container key counts are updated
assertEquals(1, reconContainerMetadataManager.getKeyCountForContainer(1L));
assertEquals(1, reconContainerMetadataManager.getKeyCountForContainer(2L));
assertEquals(0, reconContainerMetadataManager.getKeyCountForContainer(3L));
// Test if container count is updated
assertEquals(2, reconContainerMetadataManager.getCountForContainers());
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup in project ozone by apache.
the class TestContainerKeyMapperTask method testProcessOMEvents.
@Test
public void testProcessOMEvents() throws IOException {
Map<ContainerKeyPrefix, Integer> keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(1);
assertTrue(keyPrefixesForContainer.isEmpty());
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(2);
assertTrue(keyPrefixesForContainer.isEmpty());
Pipeline pipeline = getRandomPipeline();
List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
BlockID blockID1 = new BlockID(1, 1);
OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID1, pipeline);
BlockID blockID2 = new BlockID(2, 1);
OmKeyLocationInfo omKeyLocationInfo2 = getOmKeyLocationInfo(blockID2, pipeline);
omKeyLocationInfoList.add(omKeyLocationInfo1);
omKeyLocationInfoList.add(omKeyLocationInfo2);
OmKeyLocationInfoGroup omKeyLocationInfoGroup = new OmKeyLocationInfoGroup(0, omKeyLocationInfoList);
String bucket = "bucketOne";
String volume = "sampleVol";
String key = "key_one";
String omKey = omMetadataManager.getOzoneKey(volume, bucket, key);
OmKeyInfo omKeyInfo = buildOmKeyInfo(volume, bucket, key, omKeyLocationInfoGroup);
OMDBUpdateEvent keyEvent1 = new OMDBUpdateEvent.OMUpdateEventBuilder<String, OmKeyInfo>().setKey(omKey).setValue(omKeyInfo).setTable(omMetadataManager.getKeyTable(getBucketLayout()).getName()).setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT).build();
BlockID blockID3 = new BlockID(1, 2);
OmKeyLocationInfo omKeyLocationInfo3 = getOmKeyLocationInfo(blockID3, pipeline);
BlockID blockID4 = new BlockID(3, 1);
OmKeyLocationInfo omKeyLocationInfo4 = getOmKeyLocationInfo(blockID4, pipeline);
omKeyLocationInfoList = new ArrayList<>();
omKeyLocationInfoList.add(omKeyLocationInfo3);
omKeyLocationInfoList.add(omKeyLocationInfo4);
omKeyLocationInfoGroup = new OmKeyLocationInfoGroup(0, omKeyLocationInfoList);
String key2 = "key_two";
writeDataToOm(reconOMMetadataManager, key2, bucket, volume, Collections.singletonList(omKeyLocationInfoGroup));
omKey = omMetadataManager.getOzoneKey(volume, bucket, key2);
OMDBUpdateEvent keyEvent2 = new OMDBUpdateEvent.OMUpdateEventBuilder<String, OmKeyInfo>().setKey(omKey).setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE).setTable(omMetadataManager.getKeyTable(getBucketLayout()).getName()).build();
OMUpdateEventBatch omUpdateEventBatch = new OMUpdateEventBatch(new ArrayList<OMDBUpdateEvent>() {
{
add(keyEvent1);
add(keyEvent2);
}
});
ContainerKeyMapperTask containerKeyMapperTask = new ContainerKeyMapperTask(reconContainerMetadataManager);
containerKeyMapperTask.reprocess(reconOMMetadataManager);
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(1);
assertEquals(1, keyPrefixesForContainer.size());
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(2);
assertTrue(keyPrefixesForContainer.isEmpty());
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(3);
assertEquals(1, keyPrefixesForContainer.size());
assertEquals(1, reconContainerMetadataManager.getKeyCountForContainer(1L));
assertEquals(0, reconContainerMetadataManager.getKeyCountForContainer(2L));
assertEquals(1, reconContainerMetadataManager.getKeyCountForContainer(3L));
// Process PUT & DELETE event.
containerKeyMapperTask.process(omUpdateEventBatch);
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(1);
assertEquals(1, keyPrefixesForContainer.size());
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(2);
assertEquals(1, keyPrefixesForContainer.size());
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(3);
assertTrue(keyPrefixesForContainer.isEmpty());
assertEquals(1, reconContainerMetadataManager.getKeyCountForContainer(1L));
assertEquals(1, reconContainerMetadataManager.getKeyCountForContainer(2L));
assertEquals(0, reconContainerMetadataManager.getKeyCountForContainer(3L));
// Test if container count is updated
assertEquals(3, reconContainerMetadataManager.getCountForContainers());
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup in project ozone by apache.
the class TestBlockDeletion method testBlockDeletion.
@Test
public void testBlockDeletion() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String value = RandomStringUtils.random(1024 * 1024);
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName = UUID.randomUUID().toString();
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>());
for (int i = 0; i < 10; i++) {
out.write(value.getBytes(UTF_8));
}
out.close();
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setDataSize(0).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)).setRefreshPipeline(true).build();
List<OmKeyLocationInfoGroup> omKeyLocationInfoGroupList = om.lookupKey(keyArgs).getKeyLocationVersions();
// verify key blocks were created in DN.
GenericTestUtils.waitFor(() -> {
try {
verifyBlocksCreated(omKeyLocationInfoGroupList);
return true;
} catch (Throwable t) {
LOG.warn("Verify blocks creation failed", t);
return false;
}
}, 1000, 10000);
// No containers with deleted blocks
Assert.assertTrue(containerIdsWithDeletedBlocks.isEmpty());
// Delete transactionIds for the containers should be 0.
// NOTE: this test assumes that all the container is KetValueContainer. If
// other container types is going to be added, this test should be checked.
matchContainerTransactionIds();
Assert.assertEquals(0L, metrics.getNumBlockDeletionTransactionCreated());
writeClient.deleteKey(keyArgs);
Thread.sleep(5000);
// The blocks should not be deleted in the DN as the container is open
try {
verifyBlocksDeleted(omKeyLocationInfoGroupList);
Assert.fail("Blocks should not have been deleted");
} catch (Throwable e) {
Assert.assertTrue(e.getMessage().contains("expected null, but was"));
Assert.assertEquals(e.getClass(), AssertionError.class);
}
Assert.assertEquals(0L, metrics.getNumBlockDeletionTransactionSent());
// close the containers which hold the blocks for the key
OzoneTestUtils.closeAllContainers(scm.getEventQueue(), scm);
Thread.sleep(2000);
// make sure the containers are closed on the dn
omKeyLocationInfoGroupList.forEach((group) -> {
List<OmKeyLocationInfo> locationInfo = group.getLocationList();
locationInfo.forEach((info) -> cluster.getHddsDatanodes().get(0).getDatanodeStateMachine().getContainer().getContainerSet().getContainer(info.getContainerID()).getContainerData().setState(ContainerProtos.ContainerDataProto.State.CLOSED));
});
// The blocks should be deleted in the DN.
GenericTestUtils.waitFor(() -> {
try {
verifyBlocksDeleted(omKeyLocationInfoGroupList);
return true;
} catch (Throwable t) {
LOG.warn("Verify blocks deletion failed", t);
return false;
}
}, 2000, 30000);
// Few containers with deleted blocks
Assert.assertTrue(!containerIdsWithDeletedBlocks.isEmpty());
// Containers in the DN and SCM should have same delete transactionIds
matchContainerTransactionIds();
// Containers in the DN and SCM should have same delete transactionIds
// after DN restart. The assertion is just to verify that the state of
// containerInfos in dn and scm is consistent after dn restart.
cluster.restartHddsDatanode(0, true);
matchContainerTransactionIds();
// Verify transactions committed
GenericTestUtils.waitFor(() -> {
try {
verifyTransactionsCommitted();
return true;
} catch (Throwable t) {
LOG.warn("Container closing failed", t);
return false;
}
}, 500, 10000);
Assert.assertTrue(metrics.getNumBlockDeletionTransactionCreated() == metrics.getNumBlockDeletionTransactionCompleted());
Assert.assertTrue(metrics.getNumBlockDeletionCommandSent() >= metrics.getNumBlockDeletionCommandSuccess() + metrics.getBNumBlockDeletionCommandFailure());
Assert.assertTrue(metrics.getNumBlockDeletionTransactionSent() >= metrics.getNumBlockDeletionTransactionFailure() + metrics.getNumBlockDeletionTransactionSuccess());
LOG.info(metrics.toString());
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup in project ozone by apache.
the class TestContainerReplication method lookupKey.
private static List<OmKeyLocationInfo> lookupKey(MiniOzoneCluster cluster) throws IOException {
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(VOLUME).setBucketName(BUCKET).setKeyName(KEY).setReplicationConfig(RatisReplicationConfig.getInstance(THREE)).build();
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
OmKeyLocationInfoGroup locations = keyInfo.getLatestVersionLocations();
Assert.assertNotNull(locations);
return locations.getLocationList();
}
Aggregations