Search in sources :

Example 21 with OzoneBucket

use of org.apache.hadoop.ozone.client.OzoneBucket in project ozone by apache.

the class TestObjectStoreWithFSO method testListKeysWithNotNormalizedPath.

@Test
public void testListKeysWithNotNormalizedPath() throws Exception {
    OzoneClient client = cluster.getClient();
    ObjectStore objectStore = client.getObjectStore();
    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
    String key1 = "/dir1///dir2/file1/";
    String key2 = "/dir1///dir2/file2/";
    String key3 = "/dir1///dir2/file3/";
    LinkedList<String> keys = new LinkedList<>();
    keys.add("dir1/");
    keys.add("dir1/dir2/");
    keys.add(OmUtils.normalizeKey(key1, false));
    keys.add(OmUtils.normalizeKey(key2, false));
    keys.add(OmUtils.normalizeKey(key3, false));
    int length = 10;
    byte[] input = new byte[length];
    Arrays.fill(input, (byte) 96);
    createKey(ozoneBucket, key1, 10, input);
    createKey(ozoneBucket, key2, 10, input);
    createKey(ozoneBucket, key3, 10, input);
    // Iterator with key name as prefix.
    Iterator<? extends OzoneKey> ozoneKeyIterator = ozoneBucket.listKeys("/dir1//", null);
    checkKeyList(ozoneKeyIterator, keys);
    // Iterator with with normalized key prefix.
    ozoneKeyIterator = ozoneBucket.listKeys("dir1/");
    checkKeyList(ozoneKeyIterator, keys);
    // Iterator with key name as previous key.
    ozoneKeyIterator = ozoneBucket.listKeys(null, "/dir1///dir2/file1/");
    // Remove keys before //dir1/dir2/file1
    keys.remove("dir1/");
    keys.remove("dir1/dir2/");
    keys.remove("dir1/dir2/file1");
    checkKeyList(ozoneKeyIterator, keys);
    // Iterator with  normalized key as previous key.
    ozoneKeyIterator = ozoneBucket.listKeys(null, OmUtils.normalizeKey(key1, false));
    checkKeyList(ozoneKeyIterator, keys);
}
Also used : OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) ObjectStore(org.apache.hadoop.ozone.client.ObjectStore) OzoneClient(org.apache.hadoop.ozone.client.OzoneClient) LinkedList(java.util.LinkedList) Test(org.junit.Test)

Example 22 with OzoneBucket

use of org.apache.hadoop.ozone.client.OzoneBucket in project ozone by apache.

the class TestObjectStoreWithFSO method init.

/**
 * Create a MiniDFSCluster for testing.
 * <p>
 *
 * @throws IOException
 */
@BeforeClass
public static void init() throws Exception {
    conf = new OzoneConfiguration();
    clusterId = UUID.randomUUID().toString();
    scmId = UUID.randomUUID().toString();
    omId = UUID.randomUUID().toString();
    conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, BucketLayout.FILE_SYSTEM_OPTIMIZED.name());
    cluster = MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId).setScmId(scmId).setOmId(omId).build();
    cluster.waitForClusterToBeReady();
    // create a volume and a bucket to be used by OzoneFileSystem
    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster, BucketLayout.FILE_SYSTEM_OPTIMIZED);
    volumeName = bucket.getVolumeName();
    bucketName = bucket.getName();
    String rootPath = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName());
    // Set the fs.defaultFS and start the filesystem
    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
    // Set the number of keys to be processed during batch operate.
    conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
    fs = FileSystem.get(conf);
}
Also used : OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) BeforeClass(org.junit.BeforeClass)

Example 23 with OzoneBucket

use of org.apache.hadoop.ozone.client.OzoneBucket in project ozone by apache.

the class TestOmBlockVersioning method testAllocateCommit.

@Test
public void testAllocateCommit() throws Exception {
    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
    String keyName = "key" + RandomStringUtils.randomNumeric(5);
    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName);
    // Versioning isn't supported currently, but just preserving old behaviour
    bucket.setVersioning(true);
    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setDataSize(1000).setRefreshPipeline(true).setAcls(new ArrayList<>()).setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)).build();
    // 1st update, version 0
    OpenKeySession openKey = writeClient.openKey(keyArgs);
    // explicitly set the keyLocation list before committing the key.
    keyArgs.setLocationInfoList(openKey.getKeyInfo().getLatestVersionLocations().getBlocksLatestVersionOnly());
    writeClient.commitKey(keyArgs, openKey.getId());
    OmKeyInfo keyInfo = ozoneManager.lookupKey(keyArgs);
    OmKeyLocationInfoGroup highestVersion = checkVersions(keyInfo.getKeyLocationVersions());
    assertEquals(0, highestVersion.getVersion());
    assertEquals(1, highestVersion.getLocationList().size());
    // 2nd update, version 1
    openKey = writeClient.openKey(keyArgs);
    // OmKeyLocationInfo locationInfo =
    // writeClient.allocateBlock(keyArgs, openKey.getId());
    // explicitly set the keyLocation list before committing the key.
    keyArgs.setLocationInfoList(openKey.getKeyInfo().getLatestVersionLocations().getBlocksLatestVersionOnly());
    writeClient.commitKey(keyArgs, openKey.getId());
    keyInfo = ozoneManager.lookupKey(keyArgs);
    highestVersion = checkVersions(keyInfo.getKeyLocationVersions());
    assertEquals(1, highestVersion.getVersion());
    assertEquals(1, highestVersion.getLocationList().size());
    // 3rd update, version 2
    openKey = writeClient.openKey(keyArgs);
    // this block will be appended to the latest version of version 2.
    OmKeyLocationInfo locationInfo = writeClient.allocateBlock(keyArgs, openKey.getId(), new ExcludeList());
    List<OmKeyLocationInfo> locationInfoList = openKey.getKeyInfo().getLatestVersionLocations().getBlocksLatestVersionOnly();
    Assert.assertTrue(locationInfoList.size() == 1);
    locationInfoList.add(locationInfo);
    keyArgs.setLocationInfoList(locationInfoList);
    writeClient.commitKey(keyArgs, openKey.getId());
    keyInfo = ozoneManager.lookupKey(keyArgs);
    highestVersion = checkVersions(keyInfo.getKeyLocationVersions());
    assertEquals(2, highestVersion.getVersion());
    assertEquals(2, highestVersion.getLocationList().size());
}
Also used : OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) ExcludeList(org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList) OmKeyLocationInfoGroup(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OpenKeySession(org.apache.hadoop.ozone.om.helpers.OpenKeySession) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) Test(org.junit.Test)

Example 24 with OzoneBucket

use of org.apache.hadoop.ozone.client.OzoneBucket in project ozone by apache.

the class TestOzoneManagerHA method setupBucket.

protected OzoneBucket setupBucket() throws Exception {
    String userName = "user" + RandomStringUtils.randomNumeric(5);
    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
    VolumeArgs createVolumeArgs = VolumeArgs.newBuilder().setOwner(userName).setAdmin(adminName).build();
    objectStore.createVolume(volumeName, createVolumeArgs);
    OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName);
    Assert.assertTrue(retVolumeinfo.getName().equals(volumeName));
    Assert.assertTrue(retVolumeinfo.getOwner().equals(userName));
    Assert.assertTrue(retVolumeinfo.getAdmin().equals(adminName));
    String bucketName = UUID.randomUUID().toString();
    retVolumeinfo.createBucket(bucketName);
    OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName);
    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
    Assert.assertTrue(ozoneBucket.getVolumeName().equals(volumeName));
    return ozoneBucket;
}
Also used : OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) VolumeArgs(org.apache.hadoop.ozone.client.VolumeArgs)

Example 25 with OzoneBucket

use of org.apache.hadoop.ozone.client.OzoneBucket in project ozone by apache.

the class TestContainerMapper method init.

@BeforeClass
public static void init() throws Exception {
    conf = new OzoneConfiguration();
    dbPath = GenericTestUtils.getRandomizedTempPath();
    conf.set(OZONE_OM_DB_DIRS, dbPath);
    conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, "100MB");
    conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 0, StorageUnit.MB);
    // By default, 2 pipelines are created. Setting the value to 6, will ensure
    // each pipleine can have 3 containers open.
    conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 6);
    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).setScmId(SCM_ID).build();
    cluster.waitForClusterToBeReady();
    ozClient = OzoneClientFactory.getRpcClient(conf);
    store = ozClient.getObjectStore();
    storageContainerLocationClient = cluster.getStorageContainerLocationClient();
    ozoneManager = cluster.getOzoneManager();
    store.createVolume(volName);
    OzoneVolume volume = store.getVolume(volName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    byte[] data = generateData(10 * 1024 * 1024, (byte) 98);
    for (int i = 0; i < 20; i++) {
        String key = UUID.randomUUID().toString();
        keyList.add(key);
        OzoneOutputStream out = bucket.createKey(key, data.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, new HashMap<String, String>());
        out.write(data, 0, data.length);
        out.close();
    }
    cluster.stop();
}
Also used : OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) BeforeClass(org.junit.BeforeClass)

Aggregations

OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)241 OzoneVolume (org.apache.hadoop.ozone.client.OzoneVolume)166 Test (org.junit.Test)155 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)62 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)43 IOException (java.io.IOException)35 ArrayList (java.util.ArrayList)26 ObjectStore (org.apache.hadoop.ozone.client.ObjectStore)26 OzoneInputStream (org.apache.hadoop.ozone.client.io.OzoneInputStream)26 OzoneAcl (org.apache.hadoop.ozone.OzoneAcl)24 OzoneClient (org.apache.hadoop.ozone.client.OzoneClient)24 OFSPath (org.apache.hadoop.ozone.OFSPath)23 OzoneKey (org.apache.hadoop.ozone.client.OzoneKey)22 HashMap (java.util.HashMap)21 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)20 BucketArgs (org.apache.hadoop.ozone.client.BucketArgs)20 Path (org.apache.hadoop.fs.Path)18 OzoneKeyDetails (org.apache.hadoop.ozone.client.OzoneKeyDetails)18 OmMultipartInfo (org.apache.hadoop.ozone.om.helpers.OmMultipartInfo)18 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)15