use of org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix in project ozone by apache.
the class TestReconContainerMetadataManagerImpl method populateKeysInContainers.
private void populateKeysInContainers(long containerId1, long containerId2) throws Exception {
ContainerKeyPrefix containerKeyPrefix1 = new ContainerKeyPrefix(containerId1, keyPrefix1, 0);
reconContainerMetadataManager.storeContainerKeyMapping(containerKeyPrefix1, 1);
ContainerKeyPrefix containerKeyPrefix2 = new ContainerKeyPrefix(containerId1, keyPrefix2, 0);
reconContainerMetadataManager.storeContainerKeyMapping(containerKeyPrefix2, 2);
ContainerKeyPrefix containerKeyPrefix3 = new ContainerKeyPrefix(containerId2, keyPrefix3, 0);
reconContainerMetadataManager.storeContainerKeyMapping(containerKeyPrefix3, 3);
}
use of org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix in project ozone by apache.
the class TestContainerKeyMapperTask method testReprocessOMDB.
@Test
public void testReprocessOMDB() throws Exception {
Map<ContainerKeyPrefix, Integer> keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(1);
assertTrue(keyPrefixesForContainer.isEmpty());
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(2);
assertTrue(keyPrefixesForContainer.isEmpty());
Pipeline pipeline = getRandomPipeline();
List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
BlockID blockID1 = new BlockID(1, 1);
OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID1, pipeline);
BlockID blockID2 = new BlockID(2, 1);
OmKeyLocationInfo omKeyLocationInfo2 = getOmKeyLocationInfo(blockID2, pipeline);
omKeyLocationInfoList.add(omKeyLocationInfo1);
omKeyLocationInfoList.add(omKeyLocationInfo2);
OmKeyLocationInfoGroup omKeyLocationInfoGroup = new OmKeyLocationInfoGroup(0, omKeyLocationInfoList);
writeDataToOm(reconOMMetadataManager, "key_one", "bucketOne", "sampleVol", Collections.singletonList(omKeyLocationInfoGroup));
ContainerKeyMapperTask containerKeyMapperTask = new ContainerKeyMapperTask(reconContainerMetadataManager);
containerKeyMapperTask.reprocess(reconOMMetadataManager);
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(1);
assertEquals(1, keyPrefixesForContainer.size());
String omKey = omMetadataManager.getOzoneKey("sampleVol", "bucketOne", "key_one");
ContainerKeyPrefix containerKeyPrefix = new ContainerKeyPrefix(1, omKey, 0);
assertEquals(1, keyPrefixesForContainer.get(containerKeyPrefix).intValue());
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(2);
assertEquals(1, keyPrefixesForContainer.size());
containerKeyPrefix = new ContainerKeyPrefix(2, omKey, 0);
assertEquals(1, keyPrefixesForContainer.get(containerKeyPrefix).intValue());
// Test if container key counts are updated
assertEquals(1, reconContainerMetadataManager.getKeyCountForContainer(1L));
assertEquals(1, reconContainerMetadataManager.getKeyCountForContainer(2L));
assertEquals(0, reconContainerMetadataManager.getKeyCountForContainer(3L));
// Test if container count is updated
assertEquals(2, reconContainerMetadataManager.getCountForContainers());
}
use of org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix in project ozone by apache.
the class TestContainerKeyMapperTask method testProcessOMEvents.
@Test
public void testProcessOMEvents() throws IOException {
Map<ContainerKeyPrefix, Integer> keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(1);
assertTrue(keyPrefixesForContainer.isEmpty());
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(2);
assertTrue(keyPrefixesForContainer.isEmpty());
Pipeline pipeline = getRandomPipeline();
List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
BlockID blockID1 = new BlockID(1, 1);
OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID1, pipeline);
BlockID blockID2 = new BlockID(2, 1);
OmKeyLocationInfo omKeyLocationInfo2 = getOmKeyLocationInfo(blockID2, pipeline);
omKeyLocationInfoList.add(omKeyLocationInfo1);
omKeyLocationInfoList.add(omKeyLocationInfo2);
OmKeyLocationInfoGroup omKeyLocationInfoGroup = new OmKeyLocationInfoGroup(0, omKeyLocationInfoList);
String bucket = "bucketOne";
String volume = "sampleVol";
String key = "key_one";
String omKey = omMetadataManager.getOzoneKey(volume, bucket, key);
OmKeyInfo omKeyInfo = buildOmKeyInfo(volume, bucket, key, omKeyLocationInfoGroup);
OMDBUpdateEvent keyEvent1 = new OMDBUpdateEvent.OMUpdateEventBuilder<String, OmKeyInfo>().setKey(omKey).setValue(omKeyInfo).setTable(omMetadataManager.getKeyTable(getBucketLayout()).getName()).setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT).build();
BlockID blockID3 = new BlockID(1, 2);
OmKeyLocationInfo omKeyLocationInfo3 = getOmKeyLocationInfo(blockID3, pipeline);
BlockID blockID4 = new BlockID(3, 1);
OmKeyLocationInfo omKeyLocationInfo4 = getOmKeyLocationInfo(blockID4, pipeline);
omKeyLocationInfoList = new ArrayList<>();
omKeyLocationInfoList.add(omKeyLocationInfo3);
omKeyLocationInfoList.add(omKeyLocationInfo4);
omKeyLocationInfoGroup = new OmKeyLocationInfoGroup(0, omKeyLocationInfoList);
String key2 = "key_two";
writeDataToOm(reconOMMetadataManager, key2, bucket, volume, Collections.singletonList(omKeyLocationInfoGroup));
omKey = omMetadataManager.getOzoneKey(volume, bucket, key2);
OMDBUpdateEvent keyEvent2 = new OMDBUpdateEvent.OMUpdateEventBuilder<String, OmKeyInfo>().setKey(omKey).setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE).setTable(omMetadataManager.getKeyTable(getBucketLayout()).getName()).build();
OMUpdateEventBatch omUpdateEventBatch = new OMUpdateEventBatch(new ArrayList<OMDBUpdateEvent>() {
{
add(keyEvent1);
add(keyEvent2);
}
});
ContainerKeyMapperTask containerKeyMapperTask = new ContainerKeyMapperTask(reconContainerMetadataManager);
containerKeyMapperTask.reprocess(reconOMMetadataManager);
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(1);
assertEquals(1, keyPrefixesForContainer.size());
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(2);
assertTrue(keyPrefixesForContainer.isEmpty());
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(3);
assertEquals(1, keyPrefixesForContainer.size());
assertEquals(1, reconContainerMetadataManager.getKeyCountForContainer(1L));
assertEquals(0, reconContainerMetadataManager.getKeyCountForContainer(2L));
assertEquals(1, reconContainerMetadataManager.getKeyCountForContainer(3L));
// Process PUT & DELETE event.
containerKeyMapperTask.process(omUpdateEventBatch);
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(1);
assertEquals(1, keyPrefixesForContainer.size());
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(2);
assertEquals(1, keyPrefixesForContainer.size());
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(3);
assertTrue(keyPrefixesForContainer.isEmpty());
assertEquals(1, reconContainerMetadataManager.getKeyCountForContainer(1L));
assertEquals(1, reconContainerMetadataManager.getKeyCountForContainer(2L));
assertEquals(0, reconContainerMetadataManager.getKeyCountForContainer(3L));
// Test if container count is updated
assertEquals(3, reconContainerMetadataManager.getCountForContainers());
}
use of org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix in project ozone by apache.
the class ContainerEndpoint method getKeysForContainer.
/**
* Return @{@link org.apache.hadoop.ozone.recon.api.types.KeyMetadata} for
* all keys that belong to the container identified by the id param
* starting from the given "prev-key" query param for the given "limit".
* The given prevKeyPrefix is skipped from the results returned.
*
* @param containerID the given containerID.
* @param limit max no. of keys to get.
* @param prevKeyPrefix the key prefix after which results are returned.
* @return {@link Response}
*/
@GET
@Path("/{id}/keys")
public Response getKeysForContainer(@PathParam("id") Long containerID, @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) int limit, @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) String prevKeyPrefix) {
Map<String, KeyMetadata> keyMetadataMap = new LinkedHashMap<>();
long totalCount;
try {
Map<ContainerKeyPrefix, Integer> containerKeyPrefixMap = reconContainerMetadataManager.getKeyPrefixesForContainer(containerID, prevKeyPrefix);
// Get set of Container-Key mappings for given containerId.
for (ContainerKeyPrefix containerKeyPrefix : containerKeyPrefixMap.keySet()) {
// Directly calling get() on the Key table instead of iterating since
// only full keys are supported now. When we change to using a prefix
// of the key, this needs to change to prefix seek.
OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).getSkipCache(containerKeyPrefix.getKeyPrefix());
if (null != omKeyInfo) {
// Filter keys by version.
List<OmKeyLocationInfoGroup> matchedKeys = omKeyInfo.getKeyLocationVersions().stream().filter(k -> (k.getVersion() == containerKeyPrefix.getKeyVersion())).collect(Collectors.toList());
List<ContainerBlockMetadata> blockIds = getBlocks(matchedKeys, containerID);
String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(), omKeyInfo.getBucketName(), omKeyInfo.getKeyName());
if (keyMetadataMap.containsKey(ozoneKey)) {
keyMetadataMap.get(ozoneKey).getVersions().add(containerKeyPrefix.getKeyVersion());
keyMetadataMap.get(ozoneKey).getBlockIds().put(containerKeyPrefix.getKeyVersion(), blockIds);
} else {
// break the for loop if limit has been reached
if (keyMetadataMap.size() == limit) {
break;
}
KeyMetadata keyMetadata = new KeyMetadata();
keyMetadata.setBucket(omKeyInfo.getBucketName());
keyMetadata.setVolume(omKeyInfo.getVolumeName());
keyMetadata.setKey(omKeyInfo.getKeyName());
keyMetadata.setCreationTime(Instant.ofEpochMilli(omKeyInfo.getCreationTime()));
keyMetadata.setModificationTime(Instant.ofEpochMilli(omKeyInfo.getModificationTime()));
keyMetadata.setDataSize(omKeyInfo.getDataSize());
keyMetadata.getVersions().add(containerKeyPrefix.getKeyVersion());
keyMetadataMap.put(ozoneKey, keyMetadata);
keyMetadata.getBlockIds().put(containerKeyPrefix.getKeyVersion(), blockIds);
}
}
}
totalCount = reconContainerMetadataManager.getKeyCountForContainer(containerID);
} catch (IOException ioEx) {
throw new WebApplicationException(ioEx, Response.Status.INTERNAL_SERVER_ERROR);
}
KeysResponse keysResponse = new KeysResponse(totalCount, keyMetadataMap.values());
return Response.ok(keysResponse).build();
}
use of org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix in project ozone by apache.
the class ReconContainerMetadataManagerImpl method getContainers.
/**
* Iterate the DB to construct a Map of containerID -> containerMetadata
* only for the given limit from the given start key. The start containerID
* is skipped from the result.
*
* Return all the containers if limit < 0.
*
* @param limit No of containers to get.
* @param prevContainer containerID after which the
* list of containers are scanned.
* @return Map of containerID -> containerMetadata.
* @throws IOException on failure.
*/
@Override
public Map<Long, ContainerMetadata> getContainers(int limit, long prevContainer) throws IOException {
Map<Long, ContainerMetadata> containers = new LinkedHashMap<>();
TableIterator<ContainerKeyPrefix, ? extends KeyValue<ContainerKeyPrefix, Integer>> containerIterator = containerKeyTable.iterator();
ContainerKeyPrefix seekKey;
if (prevContainer > 0L) {
seekKey = new ContainerKeyPrefix(prevContainer);
KeyValue<ContainerKeyPrefix, Integer> seekKeyValue = containerIterator.seek(seekKey);
// prevContainer containerId. If not, then return empty result
if (seekKeyValue != null && seekKeyValue.getKey().getContainerId() != prevContainer) {
return containers;
} else {
// seek to the prevContainer+1 containerID to start scan
seekKey = new ContainerKeyPrefix(prevContainer + 1);
containerIterator.seek(seekKey);
}
}
while (containerIterator.hasNext()) {
KeyValue<ContainerKeyPrefix, Integer> keyValue = containerIterator.next();
ContainerKeyPrefix containerKeyPrefix = keyValue.getKey();
Long containerID = containerKeyPrefix.getContainerId();
Integer numberOfKeys = keyValue.getValue();
// and one more new entity needs to be added to the containers map
if (containers.size() == limit && !containers.containsKey(containerID)) {
break;
}
// initialize containerMetadata with 0 as number of keys.
containers.computeIfAbsent(containerID, ContainerMetadata::new);
// increment number of keys for the containerID
ContainerMetadata containerMetadata = containers.get(containerID);
containerMetadata.setNumberOfKeys(containerMetadata.getNumberOfKeys() + numberOfKeys);
containers.put(containerID, containerMetadata);
}
return containers;
}
Aggregations