Search in sources :

Example 1 with OzoneFileStatus

use of org.apache.hadoop.ozone.om.helpers.OzoneFileStatus in project ozone by apache.

the class TestKeyManagerImpl method testCreateDirectory.

@Test
public void testCreateDirectory() throws IOException {
    // Create directory where the parent directory does not exist
    StringBuffer keyNameBuf = new StringBuffer();
    keyNameBuf.append(RandomStringUtils.randomAlphabetic(5));
    OmKeyArgs keyArgs = createBuilder().setKeyName(keyNameBuf.toString()).build();
    for (int i = 0; i < 5; i++) {
        keyNameBuf.append("/").append(RandomStringUtils.randomAlphabetic(5));
    }
    String keyName = keyNameBuf.toString();
    writeClient.createDirectory(keyArgs);
    Path path = Paths.get(keyName);
    while (path != null) {
        // verify parent directories are created
        Assert.assertTrue(keyManager.getFileStatus(keyArgs).isDirectory());
        path = path.getParent();
    }
    // make sure create directory fails where parent is a file
    keyName = RandomStringUtils.randomAlphabetic(5);
    keyArgs = createBuilder().setKeyName(keyName).build();
    OpenKeySession keySession = writeClient.openKey(keyArgs);
    keyArgs.setLocationInfoList(keySession.getKeyInfo().getLatestVersionLocations().getLocationList());
    writeClient.commitKey(keyArgs, keySession.getId());
    try {
        writeClient.createDirectory(keyArgs);
        Assert.fail("Creation should fail for directory.");
    } catch (OMException e) {
        Assert.assertEquals(e.getResult(), OMException.ResultCodes.FILE_ALREADY_EXISTS);
    }
    // create directory where parent is root
    keyName = RandomStringUtils.randomAlphabetic(5);
    keyArgs = createBuilder().setKeyName(keyName).build();
    writeClient.createDirectory(keyArgs);
    OzoneFileStatus fileStatus = keyManager.getFileStatus(keyArgs);
    Assert.assertTrue(fileStatus.isDirectory());
    Assert.assertTrue(fileStatus.getKeyInfo().getKeyLocationVersions().get(0).getLocationList().isEmpty());
}
Also used : Path(java.nio.file.Path) OpenKeySession(org.apache.hadoop.ozone.om.helpers.OpenKeySession) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) OzoneFileStatus(org.apache.hadoop.ozone.om.helpers.OzoneFileStatus) Test(org.junit.Test)

Example 2 with OzoneFileStatus

use of org.apache.hadoop.ozone.om.helpers.OzoneFileStatus in project ozone by apache.

the class TestKeyManagerImpl method testListStatusWithDeletedEntriesInCache.

@Test
public void testListStatusWithDeletedEntriesInCache() throws Exception {
    String prefixKey = "key-";
    TreeSet<String> existKeySet = new TreeSet<>();
    TreeSet<String> deletedKeySet = new TreeSet<>();
    for (int i = 1; i <= 100; i++) {
        if (i % 2 == 0) {
            OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, prefixKey + i, 1000L, HddsProtos.ReplicationType.RATIS, ONE, metadataManager);
            existKeySet.add(prefixKey + i);
        } else {
            OMRequestTestUtils.addKeyToTableCache(VOLUME_NAME, BUCKET_NAME, prefixKey + i, HddsProtos.ReplicationType.RATIS, ONE, metadataManager);
            String key = metadataManager.getOzoneKey(VOLUME_NAME, BUCKET_NAME, prefixKey + i);
            // Mark as deleted in cache.
            metadataManager.getKeyTable(getDefaultBucketLayout()).addCacheEntry(new CacheKey<>(key), new CacheValue<>(Optional.absent(), 2L));
            deletedKeySet.add(key);
        }
    }
    OmKeyArgs rootDirArgs = createKeyArgs("");
    List<OzoneFileStatus> fileStatuses = keyManager.listStatus(rootDirArgs, true, "", 1000);
    // Should only get entries that are not marked as deleted.
    Assert.assertEquals(50, fileStatuses.size());
    // Test startKey
    fileStatuses = keyManager.listStatus(rootDirArgs, true, prefixKey, 1000);
    // Should only get entries that are not marked as deleted.
    Assert.assertEquals(50, fileStatuses.size());
    // Verify result
    TreeSet<String> expectedKeys = new TreeSet<>();
    for (OzoneFileStatus fileStatus : fileStatuses) {
        String keyName = fileStatus.getKeyInfo().getKeyName();
        expectedKeys.add(keyName);
        Assert.assertTrue(keyName.startsWith(prefixKey));
    }
    Assert.assertEquals(expectedKeys, existKeySet);
    // Sanity check, existKeySet should not intersect with deletedKeySet.
    Assert.assertEquals(0, Sets.intersection(existKeySet, deletedKeySet).size());
    // Next, mark half of the entries left as deleted
    boolean doDelete = false;
    for (String key : existKeySet) {
        if (doDelete) {
            String ozoneKey = metadataManager.getOzoneKey(VOLUME_NAME, BUCKET_NAME, key);
            metadataManager.getKeyTable(getDefaultBucketLayout()).addCacheEntry(new CacheKey<>(ozoneKey), new CacheValue<>(Optional.absent(), 2L));
            deletedKeySet.add(key);
        }
        doDelete = !doDelete;
    }
    // Update existKeySet
    existKeySet.removeAll(deletedKeySet);
    fileStatuses = keyManager.listStatus(rootDirArgs, true, "", 1000);
    // Should only get entries that are not marked as deleted.
    Assert.assertEquals(50 / 2, fileStatuses.size());
    // Verify result
    expectedKeys.clear();
    for (OzoneFileStatus fileStatus : fileStatuses) {
        String keyName = fileStatus.getKeyInfo().getKeyName();
        expectedKeys.add(keyName);
        Assert.assertTrue(keyName.startsWith(prefixKey));
    }
    Assert.assertEquals(expectedKeys, existKeySet);
    // Test pagination
    final int batchSize = 5;
    String startKey = "";
    expectedKeys.clear();
    do {
        fileStatuses = keyManager.listStatus(rootDirArgs, true, startKey, batchSize);
        // This is fine as we are using a set to store results.
        for (OzoneFileStatus fileStatus : fileStatuses) {
            startKey = fileStatus.getKeyInfo().getKeyName();
            expectedKeys.add(startKey);
            Assert.assertTrue(startKey.startsWith(prefixKey));
        }
    // fileStatuses.size() == batchSize indicates there might be another batch
    // fileStatuses.size() < batchSize indicates it is the last batch
    } while (fileStatuses.size() == batchSize);
    Assert.assertEquals(expectedKeys, existKeySet);
    // Clean up by marking remaining entries as deleted
    for (String key : existKeySet) {
        String ozoneKey = metadataManager.getOzoneKey(VOLUME_NAME, BUCKET_NAME, key);
        metadataManager.getKeyTable(getDefaultBucketLayout()).addCacheEntry(new CacheKey<>(ozoneKey), new CacheValue<>(Optional.absent(), 2L));
        deletedKeySet.add(key);
    }
    // Update existKeySet
    existKeySet.removeAll(deletedKeySet);
    Assert.assertTrue(existKeySet.isEmpty());
}
Also used : TreeSet(java.util.TreeSet) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OzoneFileStatus(org.apache.hadoop.ozone.om.helpers.OzoneFileStatus) Test(org.junit.Test)

Example 3 with OzoneFileStatus

use of org.apache.hadoop.ozone.om.helpers.OzoneFileStatus in project ozone by apache.

the class TestKeyManagerImpl method testListStatusWithTableCacheRecursive.

@Test
public void testListStatusWithTableCacheRecursive() throws Exception {
    String keyNameDir1 = "dir1";
    OmKeyArgs keyArgsDir1 = createBuilder().setKeyName(keyNameDir1).build();
    writeClient.createDirectory(keyArgsDir1);
    String keyNameDir1Subdir1 = "dir1" + OZONE_URI_DELIMITER + "subdir1";
    OmKeyArgs keyArgsDir1Subdir1 = createBuilder().setKeyName(keyNameDir1Subdir1).build();
    writeClient.createDirectory(keyArgsDir1Subdir1);
    String keyNameDir2 = "dir2";
    OmKeyArgs keyArgsDir2 = createBuilder().setKeyName(keyNameDir2).build();
    writeClient.createDirectory(keyArgsDir2);
    OmKeyArgs rootDirArgs = createKeyArgs("");
    // Test listStatus with recursive=false, should only have dirs under root
    List<OzoneFileStatus> fileStatuses = keyManager.listStatus(rootDirArgs, false, "", 1000);
    Assert.assertEquals(2, fileStatuses.size());
    // Test listStatus with recursive=true, should have dirs under root and
    fileStatuses = keyManager.listStatus(rootDirArgs, true, "", 1000);
    Assert.assertEquals(3, fileStatuses.size());
    // Add a total of 10 key entries to DB and TableCache under dir1
    String prefixKeyInDB = "key-d";
    String prefixKeyInCache = "key-c";
    for (int i = 1; i <= 10; i++) {
        if (i % 2 == 0) {
            // Add to DB
            OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, keyNameDir1Subdir1 + OZONE_URI_DELIMITER + prefixKeyInDB + i, 1000L, HddsProtos.ReplicationType.RATIS, ONE, metadataManager);
        } else {
            // Add to TableCache
            OMRequestTestUtils.addKeyToTableCache(VOLUME_NAME, BUCKET_NAME, keyNameDir1Subdir1 + OZONE_URI_DELIMITER + prefixKeyInCache + i, HddsProtos.ReplicationType.RATIS, ONE, metadataManager);
        }
    }
    // Test non-recursive, should return the dir under root
    fileStatuses = keyManager.listStatus(rootDirArgs, false, "", 1000);
    Assert.assertEquals(2, fileStatuses.size());
    // Test recursive, should return the dir and the keys in it
    fileStatuses = keyManager.listStatus(rootDirArgs, true, "", 1000);
    Assert.assertEquals(10 + 3, fileStatuses.size());
    // Clean up
    for (int i = 1; i <= 10; i += 2) {
        // Mark TableCache entries as deleted
        // Note that DB entry clean up is handled by cleanupTest()
        String key = metadataManager.getOzoneKey(VOLUME_NAME, BUCKET_NAME, keyNameDir1Subdir1 + OZONE_URI_DELIMITER + prefixKeyInCache + i);
        metadataManager.getKeyTable(getDefaultBucketLayout()).addCacheEntry(new CacheKey<>(key), new CacheValue<>(Optional.absent(), 2L));
    }
}
Also used : OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OzoneFileStatus(org.apache.hadoop.ozone.om.helpers.OzoneFileStatus) Test(org.junit.Test)

Example 4 with OzoneFileStatus

use of org.apache.hadoop.ozone.om.helpers.OzoneFileStatus in project ozone by apache.

the class TestKeyManagerImpl method testLatestLocationVersion.

@Test
public void testLatestLocationVersion() throws IOException {
    String keyName = RandomStringUtils.randomAlphabetic(5);
    OmKeyArgs keyArgs = createBuilder(VERSIONED_BUCKET_NAME).setKeyName(keyName).setLatestVersionLocation(true).build();
    // lookup for a non-existent key
    try {
        keyManager.lookupKey(keyArgs, null);
        Assert.fail("Lookup key should fail for non existent key");
    } catch (OMException ex) {
        if (ex.getResult() != OMException.ResultCodes.KEY_NOT_FOUND) {
            throw ex;
        }
    }
    // create a key
    OpenKeySession keySession = writeClient.createFile(keyArgs, false, false);
    // randomly select 3 datanodes
    List<DatanodeDetails> nodeList = new ArrayList<>();
    nodeList.add((DatanodeDetails) scm.getClusterMap().getNode(0, null, null, null, null, 0));
    nodeList.add((DatanodeDetails) scm.getClusterMap().getNode(1, null, null, null, null, 0));
    nodeList.add((DatanodeDetails) scm.getClusterMap().getNode(2, null, null, null, null, 0));
    Assume.assumeFalse(nodeList.get(0).equals(nodeList.get(1)));
    Assume.assumeFalse(nodeList.get(0).equals(nodeList.get(2)));
    // create a pipeline using 3 datanodes
    Pipeline pipeline = scm.getPipelineManager().createPipeline(RatisReplicationConfig.getInstance(ReplicationFactor.THREE), nodeList);
    List<OmKeyLocationInfo> locationInfoList = new ArrayList<>();
    List<OmKeyLocationInfo> locationList = keySession.getKeyInfo().getLatestVersionLocations().getLocationList();
    Assert.assertEquals(1, locationList.size());
    locationInfoList.add(new OmKeyLocationInfo.Builder().setPipeline(pipeline).setBlockID(new BlockID(locationList.get(0).getContainerID(), locationList.get(0).getLocalID())).build());
    keyArgs.setLocationInfoList(locationInfoList);
    writeClient.commitKey(keyArgs, keySession.getId());
    // Mock out the pipelines from the SCM
    ContainerInfo containerInfo = new ContainerInfo.Builder().setContainerID(1L).setPipelineID(pipeline.getId()).build();
    List<ContainerWithPipeline> containerWithPipelines = Arrays.asList(new ContainerWithPipeline(containerInfo, pipeline));
    when(mockScmContainerClient.getContainerWithPipelineBatch(Arrays.asList(1L))).thenReturn(containerWithPipelines);
    OmKeyInfo key = keyManager.lookupKey(keyArgs, null);
    Assert.assertEquals(key.getKeyLocationVersions().size(), 1);
    keySession = writeClient.createFile(keyArgs, true, true);
    writeClient.commitKey(keyArgs, keySession.getId());
    // Test lookupKey (latestLocationVersion == true)
    key = keyManager.lookupKey(keyArgs, null);
    Assert.assertEquals(key.getKeyLocationVersions().size(), 1);
    // Test ListStatus (latestLocationVersion == true)
    List<OzoneFileStatus> fileStatuses = keyManager.listStatus(keyArgs, false, "", 1);
    Assert.assertEquals(fileStatuses.size(), 1);
    Assert.assertEquals(fileStatuses.get(0).getKeyInfo().getKeyLocationVersions().size(), 1);
    // Test GetFileStatus (latestLocationVersion == true)
    OzoneFileStatus ozoneFileStatus = keyManager.getFileStatus(keyArgs, null);
    Assert.assertEquals(ozoneFileStatus.getKeyInfo().getKeyLocationVersions().size(), 1);
    // Test LookupFile (latestLocationVersion == true)
    key = keyManager.lookupFile(keyArgs, null);
    Assert.assertEquals(key.getKeyLocationVersions().size(), 1);
    keyArgs = createBuilder(VERSIONED_BUCKET_NAME).setKeyName(keyName).setLatestVersionLocation(false).build();
    // Test lookupKey (latestLocationVersion == false)
    key = keyManager.lookupKey(keyArgs, null);
    Assert.assertEquals(key.getKeyLocationVersions().size(), 2);
    // Test ListStatus (latestLocationVersion == false)
    fileStatuses = keyManager.listStatus(keyArgs, false, "", 100);
    Assert.assertEquals(fileStatuses.size(), 1);
    Assert.assertEquals(fileStatuses.get(0).getKeyInfo().getKeyLocationVersions().size(), 2);
    // Test GetFileStatus (latestLocationVersion == false)
    ozoneFileStatus = keyManager.getFileStatus(keyArgs, null);
    Assert.assertEquals(ozoneFileStatus.getKeyInfo().getKeyLocationVersions().size(), 2);
    // Test LookupFile (latestLocationVersion == false)
    key = keyManager.lookupFile(keyArgs, null);
    Assert.assertEquals(key.getKeyLocationVersions().size(), 2);
    // Test ListKeys (latestLocationVersion is always true for ListKeys)
    List<OmKeyInfo> keyInfos = keyManager.listKeys(keyArgs.getVolumeName(), keyArgs.getBucketName(), "", keyArgs.getKeyName(), 100);
    Assert.assertEquals(keyInfos.size(), 1);
    Assert.assertEquals(keyInfos.get(0).getKeyLocationVersions().size(), 1);
}
Also used : ArrayList(java.util.ArrayList) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) BlockID(org.apache.hadoop.hdds.client.BlockID) OpenKeySession(org.apache.hadoop.ozone.om.helpers.OpenKeySession) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) OzoneFileStatus(org.apache.hadoop.ozone.om.helpers.OzoneFileStatus) Test(org.junit.Test)

Example 5 with OzoneFileStatus

use of org.apache.hadoop.ozone.om.helpers.OzoneFileStatus in project ozone by apache.

the class TestKeyManagerImpl method verifyFileStatus.

private void verifyFileStatus(String directory, List<OzoneFileStatus> fileStatuses, Set<String> directorySet, Set<String> fileSet, boolean recursive) {
    for (OzoneFileStatus fileStatus : fileStatuses) {
        String normalizedKeyName = fileStatus.getTrimmedName();
        String parent = Paths.get(fileStatus.getKeyInfo().getKeyName()).getParent().toString();
        if (!recursive) {
            // if recursive is false, verify all the statuses have the input
            // directory as parent
            Assert.assertEquals(parent, directory);
        }
        // verify filestatus is present in directory or file set accordingly
        if (fileStatus.isDirectory()) {
            Assert.assertTrue(directorySet + " doesn't contain " + normalizedKeyName, directorySet.contains(normalizedKeyName));
        } else {
            Assert.assertTrue(fileSet + " doesn't contain " + normalizedKeyName, fileSet.contains(normalizedKeyName));
        }
    }
    // count the number of entries which should be present in the directory
    int numEntries = 0;
    Set<String> entrySet = new TreeSet<>(directorySet);
    entrySet.addAll(fileSet);
    for (String entry : entrySet) {
        if (OzoneFSUtils.getParent(entry).startsWith(OzoneFSUtils.addTrailingSlashIfNeeded(directory))) {
            if (recursive) {
                numEntries++;
            } else if (OzoneFSUtils.getParent(entry).equals(OzoneFSUtils.addTrailingSlashIfNeeded(directory))) {
                numEntries++;
            }
        }
    }
    // verify the number of entries match the status list size
    Assert.assertEquals(fileStatuses.size(), numEntries);
}
Also used : TreeSet(java.util.TreeSet) OzoneFileStatus(org.apache.hadoop.ozone.om.helpers.OzoneFileStatus)

Aggregations

OzoneFileStatus (org.apache.hadoop.ozone.om.helpers.OzoneFileStatus)42 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)18 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)17 OmKeyArgs (org.apache.hadoop.ozone.om.helpers.OmKeyArgs)16 RepeatedOmKeyInfo (org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo)11 Test (org.junit.Test)10 ArrayList (java.util.ArrayList)9 IOException (java.io.IOException)6 TreeSet (java.util.TreeSet)6 OmBucketInfo (org.apache.hadoop.ozone.om.helpers.OmBucketInfo)6 FileNotFoundException (java.io.FileNotFoundException)4 Table (org.apache.hadoop.hdds.utils.db.Table)4 OmDirectoryInfo (org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo)4 Path (java.nio.file.Path)3 HashMap (java.util.HashMap)3 Map (java.util.Map)3 TreeMap (java.util.TreeMap)3 FileStatus (org.apache.hadoop.fs.FileStatus)3 ContainerWithPipeline (org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline)3 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)3