use of org.apache.hadoop.ozone.recon.api.types.KeyMetadata.ContainerBlockMetadata in project ozone by apache.
the class ContainerEndpoint method getKeysForContainer.
/**
* Return @{@link org.apache.hadoop.ozone.recon.api.types.KeyMetadata} for
* all keys that belong to the container identified by the id param
* starting from the given "prev-key" query param for the given "limit".
* The given prevKeyPrefix is skipped from the results returned.
*
* @param containerID the given containerID.
* @param limit max no. of keys to get.
* @param prevKeyPrefix the key prefix after which results are returned.
* @return {@link Response}
*/
@GET
@Path("/{id}/keys")
public Response getKeysForContainer(@PathParam("id") Long containerID, @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) int limit, @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) String prevKeyPrefix) {
Map<String, KeyMetadata> keyMetadataMap = new LinkedHashMap<>();
long totalCount;
try {
Map<ContainerKeyPrefix, Integer> containerKeyPrefixMap = reconContainerMetadataManager.getKeyPrefixesForContainer(containerID, prevKeyPrefix);
// Get set of Container-Key mappings for given containerId.
for (ContainerKeyPrefix containerKeyPrefix : containerKeyPrefixMap.keySet()) {
// Directly calling get() on the Key table instead of iterating since
// only full keys are supported now. When we change to using a prefix
// of the key, this needs to change to prefix seek.
OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).getSkipCache(containerKeyPrefix.getKeyPrefix());
if (null != omKeyInfo) {
// Filter keys by version.
List<OmKeyLocationInfoGroup> matchedKeys = omKeyInfo.getKeyLocationVersions().stream().filter(k -> (k.getVersion() == containerKeyPrefix.getKeyVersion())).collect(Collectors.toList());
List<ContainerBlockMetadata> blockIds = getBlocks(matchedKeys, containerID);
String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(), omKeyInfo.getBucketName(), omKeyInfo.getKeyName());
if (keyMetadataMap.containsKey(ozoneKey)) {
keyMetadataMap.get(ozoneKey).getVersions().add(containerKeyPrefix.getKeyVersion());
keyMetadataMap.get(ozoneKey).getBlockIds().put(containerKeyPrefix.getKeyVersion(), blockIds);
} else {
// break the for loop if limit has been reached
if (keyMetadataMap.size() == limit) {
break;
}
KeyMetadata keyMetadata = new KeyMetadata();
keyMetadata.setBucket(omKeyInfo.getBucketName());
keyMetadata.setVolume(omKeyInfo.getVolumeName());
keyMetadata.setKey(omKeyInfo.getKeyName());
keyMetadata.setCreationTime(Instant.ofEpochMilli(omKeyInfo.getCreationTime()));
keyMetadata.setModificationTime(Instant.ofEpochMilli(omKeyInfo.getModificationTime()));
keyMetadata.setDataSize(omKeyInfo.getDataSize());
keyMetadata.getVersions().add(containerKeyPrefix.getKeyVersion());
keyMetadataMap.put(ozoneKey, keyMetadata);
keyMetadata.getBlockIds().put(containerKeyPrefix.getKeyVersion(), blockIds);
}
}
}
totalCount = reconContainerMetadataManager.getKeyCountForContainer(containerID);
} catch (IOException ioEx) {
throw new WebApplicationException(ioEx, Response.Status.INTERNAL_SERVER_ERROR);
}
KeysResponse keysResponse = new KeysResponse(totalCount, keyMetadataMap.values());
return Response.ok(keysResponse).build();
}
Aggregations