use of org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix in project ozone by apache.
the class TestReconCodecs method testContainerKeyPrefixCodec.
@Test
public void testContainerKeyPrefixCodec() throws IOException {
ContainerKeyPrefix containerKeyPrefix = new ContainerKeyPrefix(System.currentTimeMillis(), "TestKeyPrefix", 0);
Codec<ContainerKeyPrefix> codec = new ContainerKeyPrefixCodec();
byte[] persistedFormat = codec.toPersistedFormat(containerKeyPrefix);
Assert.assertTrue(persistedFormat != null);
ContainerKeyPrefix fromPersistedFormat = codec.fromPersistedFormat(persistedFormat);
Assert.assertEquals(containerKeyPrefix, fromPersistedFormat);
}
use of org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix in project ozone by apache.
the class ReconContainerMetadataManagerImpl method getKeyPrefixesForContainer.
/**
* Use the DB's prefix seek iterator to start the scan from the given
* container ID and prev key prefix. The prev key prefix is skipped from
* the result.
*
* @param containerId the given containerId.
* @param prevKeyPrefix the given key prefix to start the scan from.
* @return Map of (Key-Prefix,Count of Keys).
*/
@Override
public Map<ContainerKeyPrefix, Integer> getKeyPrefixesForContainer(long containerId, String prevKeyPrefix) throws IOException {
Map<ContainerKeyPrefix, Integer> prefixes = new LinkedHashMap<>();
TableIterator<ContainerKeyPrefix, ? extends KeyValue<ContainerKeyPrefix, Integer>> containerIterator = containerKeyTable.iterator();
ContainerKeyPrefix seekKey;
boolean skipPrevKey = false;
if (StringUtils.isNotBlank(prevKeyPrefix)) {
skipPrevKey = true;
seekKey = new ContainerKeyPrefix(containerId, prevKeyPrefix);
} else {
seekKey = new ContainerKeyPrefix(containerId);
}
KeyValue<ContainerKeyPrefix, Integer> seekKeyValue = containerIterator.seek(seekKey);
// returned
if (seekKeyValue == null || (StringUtils.isNotBlank(prevKeyPrefix) && !seekKeyValue.getKey().getKeyPrefix().equals(prevKeyPrefix))) {
return prefixes;
}
while (containerIterator.hasNext()) {
KeyValue<ContainerKeyPrefix, Integer> keyValue = containerIterator.next();
ContainerKeyPrefix containerKeyPrefix = keyValue.getKey();
// skip the prev key if prev key is present
if (skipPrevKey && containerKeyPrefix.getKeyPrefix().equals(prevKeyPrefix)) {
continue;
}
// prefix.
if (containerKeyPrefix.getContainerId() == containerId) {
if (StringUtils.isNotEmpty(containerKeyPrefix.getKeyPrefix())) {
prefixes.put(new ContainerKeyPrefix(containerId, containerKeyPrefix.getKeyPrefix(), containerKeyPrefix.getKeyVersion()), keyValue.getValue());
} else {
LOG.warn("Null key prefix returned for containerId = {} ", containerId);
}
} else {
// Break when the first mismatch occurs.
break;
}
}
return prefixes;
}
use of org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix in project ozone by apache.
the class ContainerKeyMapperTask method writeOMKeyToContainerDB.
/**
* Write an OM key to container DB and update containerID -> no. of keys
* count.
*
* @param key key String
* @param omKeyInfo omKeyInfo value
* @throws IOException if unable to write to recon DB.
*/
private void writeOMKeyToContainerDB(String key, OmKeyInfo omKeyInfo) throws IOException {
long containerCountToIncrement = 0;
for (OmKeyLocationInfoGroup omKeyLocationInfoGroup : omKeyInfo.getKeyLocationVersions()) {
long keyVersion = omKeyLocationInfoGroup.getVersion();
for (OmKeyLocationInfo omKeyLocationInfo : omKeyLocationInfoGroup.getLocationList()) {
long containerId = omKeyLocationInfo.getContainerID();
ContainerKeyPrefix containerKeyPrefix = new ContainerKeyPrefix(containerId, key, keyVersion);
if (reconContainerMetadataManager.getCountForContainerKeyPrefix(containerKeyPrefix) == 0) {
// Save on writes. No need to save same container-key prefix
// mapping again.
reconContainerMetadataManager.storeContainerKeyMapping(containerKeyPrefix, 1);
// increment the count of containers if it does not exist
if (!reconContainerMetadataManager.doesContainerExists(containerId)) {
containerCountToIncrement++;
}
// update the count of keys for the given containerID
long keyCount = reconContainerMetadataManager.getKeyCountForContainer(containerId);
// increment the count and update containerKeyCount.
// keyCount will be 0 if containerID is not found. So, there is no
// need to initialize keyCount for the first time.
reconContainerMetadataManager.storeContainerKeyCount(containerId, ++keyCount);
}
}
}
if (containerCountToIncrement > 0) {
reconContainerMetadataManager.incrementContainerCountBy(containerCountToIncrement);
}
}
use of org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix in project ozone by apache.
the class ContainerEndpoint method getKeysForContainer.
/**
* Return @{@link org.apache.hadoop.ozone.recon.api.types.KeyMetadata} for
* all keys that belong to the container identified by the id param
* starting from the given "prev-key" query param for the given "limit".
* The given prevKeyPrefix is skipped from the results returned.
*
* @param containerID the given containerID.
* @param limit max no. of keys to get.
* @param prevKeyPrefix the key prefix after which results are returned.
* @return {@link Response}
*/
@GET
@Path("/{id}/keys")
public Response getKeysForContainer(@PathParam("id") Long containerID, @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) int limit, @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) String prevKeyPrefix) {
Map<String, KeyMetadata> keyMetadataMap = new LinkedHashMap<>();
long totalCount;
try {
Map<ContainerKeyPrefix, Integer> containerKeyPrefixMap = reconContainerMetadataManager.getKeyPrefixesForContainer(containerID, prevKeyPrefix);
// Get set of Container-Key mappings for given containerId.
for (ContainerKeyPrefix containerKeyPrefix : containerKeyPrefixMap.keySet()) {
// Directly calling get() on the Key table instead of iterating since
// only full keys are supported now. When we change to using a prefix
// of the key, this needs to change to prefix seek.
OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).getSkipCache(containerKeyPrefix.getKeyPrefix());
if (null != omKeyInfo) {
// Filter keys by version.
List<OmKeyLocationInfoGroup> matchedKeys = omKeyInfo.getKeyLocationVersions().stream().filter(k -> (k.getVersion() == containerKeyPrefix.getKeyVersion())).collect(Collectors.toList());
List<ContainerBlockMetadata> blockIds = getBlocks(matchedKeys, containerID);
String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(), omKeyInfo.getBucketName(), omKeyInfo.getKeyName());
if (keyMetadataMap.containsKey(ozoneKey)) {
keyMetadataMap.get(ozoneKey).getVersions().add(containerKeyPrefix.getKeyVersion());
keyMetadataMap.get(ozoneKey).getBlockIds().put(containerKeyPrefix.getKeyVersion(), blockIds);
} else {
// break the for loop if limit has been reached
if (keyMetadataMap.size() == limit) {
break;
}
KeyMetadata keyMetadata = new KeyMetadata();
keyMetadata.setBucket(omKeyInfo.getBucketName());
keyMetadata.setVolume(omKeyInfo.getVolumeName());
keyMetadata.setKey(omKeyInfo.getKeyName());
keyMetadata.setCreationTime(Instant.ofEpochMilli(omKeyInfo.getCreationTime()));
keyMetadata.setModificationTime(Instant.ofEpochMilli(omKeyInfo.getModificationTime()));
keyMetadata.setDataSize(omKeyInfo.getDataSize());
keyMetadata.getVersions().add(containerKeyPrefix.getKeyVersion());
keyMetadataMap.put(ozoneKey, keyMetadata);
keyMetadata.getBlockIds().put(containerKeyPrefix.getKeyVersion(), blockIds);
}
}
}
totalCount = reconContainerMetadataManager.getKeyCountForContainer(containerID);
} catch (IOException ioEx) {
throw new WebApplicationException(ioEx, Response.Status.INTERNAL_SERVER_ERROR);
}
KeysResponse keysResponse = new KeysResponse(totalCount, keyMetadataMap.values());
return Response.ok(keysResponse).build();
}
use of org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix in project ozone by apache.
the class TestContainerKeyMapperTask method testReprocessOMDB.
@Test
public void testReprocessOMDB() throws Exception {
Map<ContainerKeyPrefix, Integer> keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(1);
assertTrue(keyPrefixesForContainer.isEmpty());
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(2);
assertTrue(keyPrefixesForContainer.isEmpty());
Pipeline pipeline = getRandomPipeline();
List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
BlockID blockID1 = new BlockID(1, 1);
OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID1, pipeline);
BlockID blockID2 = new BlockID(2, 1);
OmKeyLocationInfo omKeyLocationInfo2 = getOmKeyLocationInfo(blockID2, pipeline);
omKeyLocationInfoList.add(omKeyLocationInfo1);
omKeyLocationInfoList.add(omKeyLocationInfo2);
OmKeyLocationInfoGroup omKeyLocationInfoGroup = new OmKeyLocationInfoGroup(0, omKeyLocationInfoList);
writeDataToOm(reconOMMetadataManager, "key_one", "bucketOne", "sampleVol", Collections.singletonList(omKeyLocationInfoGroup));
ContainerKeyMapperTask containerKeyMapperTask = new ContainerKeyMapperTask(reconContainerMetadataManager);
containerKeyMapperTask.reprocess(reconOMMetadataManager);
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(1);
assertEquals(1, keyPrefixesForContainer.size());
String omKey = omMetadataManager.getOzoneKey("sampleVol", "bucketOne", "key_one");
ContainerKeyPrefix containerKeyPrefix = new ContainerKeyPrefix(1, omKey, 0);
assertEquals(1, keyPrefixesForContainer.get(containerKeyPrefix).intValue());
keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(2);
assertEquals(1, keyPrefixesForContainer.size());
containerKeyPrefix = new ContainerKeyPrefix(2, omKey, 0);
assertEquals(1, keyPrefixesForContainer.get(containerKeyPrefix).intValue());
// Test if container key counts are updated
assertEquals(1, reconContainerMetadataManager.getKeyCountForContainer(1L));
assertEquals(1, reconContainerMetadataManager.getKeyCountForContainer(2L));
assertEquals(0, reconContainerMetadataManager.getKeyCountForContainer(3L));
// Test if container count is updated
assertEquals(2, reconContainerMetadataManager.getCountForContainers());
}
Aggregations