use of org.apache.hadoop.ozone.client.OzoneBucket in project ozone by apache.
the class TestObjectStoreWithFSO method testListKeysWithNotNormalizedPath.
@Test
public void testListKeysWithNotNormalizedPath() throws Exception {
OzoneClient client = cluster.getClient();
ObjectStore objectStore = client.getObjectStore();
OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
String key1 = "/dir1///dir2/file1/";
String key2 = "/dir1///dir2/file2/";
String key3 = "/dir1///dir2/file3/";
LinkedList<String> keys = new LinkedList<>();
keys.add("dir1/");
keys.add("dir1/dir2/");
keys.add(OmUtils.normalizeKey(key1, false));
keys.add(OmUtils.normalizeKey(key2, false));
keys.add(OmUtils.normalizeKey(key3, false));
int length = 10;
byte[] input = new byte[length];
Arrays.fill(input, (byte) 96);
createKey(ozoneBucket, key1, 10, input);
createKey(ozoneBucket, key2, 10, input);
createKey(ozoneBucket, key3, 10, input);
// Iterator with key name as prefix.
Iterator<? extends OzoneKey> ozoneKeyIterator = ozoneBucket.listKeys("/dir1//", null);
checkKeyList(ozoneKeyIterator, keys);
// Iterator with with normalized key prefix.
ozoneKeyIterator = ozoneBucket.listKeys("dir1/");
checkKeyList(ozoneKeyIterator, keys);
// Iterator with key name as previous key.
ozoneKeyIterator = ozoneBucket.listKeys(null, "/dir1///dir2/file1/");
// Remove keys before //dir1/dir2/file1
keys.remove("dir1/");
keys.remove("dir1/dir2/");
keys.remove("dir1/dir2/file1");
checkKeyList(ozoneKeyIterator, keys);
// Iterator with normalized key as previous key.
ozoneKeyIterator = ozoneBucket.listKeys(null, OmUtils.normalizeKey(key1, false));
checkKeyList(ozoneKeyIterator, keys);
}
use of org.apache.hadoop.ozone.client.OzoneBucket in project ozone by apache.
the class TestObjectStoreWithFSO method init.
/**
* Create a MiniDFSCluster for testing.
* <p>
*
* @throws IOException
*/
@BeforeClass
public static void init() throws Exception {
conf = new OzoneConfiguration();
clusterId = UUID.randomUUID().toString();
scmId = UUID.randomUUID().toString();
omId = UUID.randomUUID().toString();
conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, BucketLayout.FILE_SYSTEM_OPTIMIZED.name());
cluster = MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId).setScmId(scmId).setOmId(omId).build();
cluster.waitForClusterToBeReady();
// create a volume and a bucket to be used by OzoneFileSystem
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster, BucketLayout.FILE_SYSTEM_OPTIMIZED);
volumeName = bucket.getVolumeName();
bucketName = bucket.getName();
String rootPath = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName());
// Set the fs.defaultFS and start the filesystem
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
// Set the number of keys to be processed during batch operate.
conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
fs = FileSystem.get(conf);
}
use of org.apache.hadoop.ozone.client.OzoneBucket in project ozone by apache.
the class TestOmBlockVersioning method testAllocateCommit.
@Test
public void testAllocateCommit() throws Exception {
String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
String keyName = "key" + RandomStringUtils.randomNumeric(5);
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName);
// Versioning isn't supported currently, but just preserving old behaviour
bucket.setVersioning(true);
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setDataSize(1000).setRefreshPipeline(true).setAcls(new ArrayList<>()).setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)).build();
// 1st update, version 0
OpenKeySession openKey = writeClient.openKey(keyArgs);
// explicitly set the keyLocation list before committing the key.
keyArgs.setLocationInfoList(openKey.getKeyInfo().getLatestVersionLocations().getBlocksLatestVersionOnly());
writeClient.commitKey(keyArgs, openKey.getId());
OmKeyInfo keyInfo = ozoneManager.lookupKey(keyArgs);
OmKeyLocationInfoGroup highestVersion = checkVersions(keyInfo.getKeyLocationVersions());
assertEquals(0, highestVersion.getVersion());
assertEquals(1, highestVersion.getLocationList().size());
// 2nd update, version 1
openKey = writeClient.openKey(keyArgs);
// OmKeyLocationInfo locationInfo =
// writeClient.allocateBlock(keyArgs, openKey.getId());
// explicitly set the keyLocation list before committing the key.
keyArgs.setLocationInfoList(openKey.getKeyInfo().getLatestVersionLocations().getBlocksLatestVersionOnly());
writeClient.commitKey(keyArgs, openKey.getId());
keyInfo = ozoneManager.lookupKey(keyArgs);
highestVersion = checkVersions(keyInfo.getKeyLocationVersions());
assertEquals(1, highestVersion.getVersion());
assertEquals(1, highestVersion.getLocationList().size());
// 3rd update, version 2
openKey = writeClient.openKey(keyArgs);
// this block will be appended to the latest version of version 2.
OmKeyLocationInfo locationInfo = writeClient.allocateBlock(keyArgs, openKey.getId(), new ExcludeList());
List<OmKeyLocationInfo> locationInfoList = openKey.getKeyInfo().getLatestVersionLocations().getBlocksLatestVersionOnly();
Assert.assertTrue(locationInfoList.size() == 1);
locationInfoList.add(locationInfo);
keyArgs.setLocationInfoList(locationInfoList);
writeClient.commitKey(keyArgs, openKey.getId());
keyInfo = ozoneManager.lookupKey(keyArgs);
highestVersion = checkVersions(keyInfo.getKeyLocationVersions());
assertEquals(2, highestVersion.getVersion());
assertEquals(2, highestVersion.getLocationList().size());
}
use of org.apache.hadoop.ozone.client.OzoneBucket in project ozone by apache.
the class TestOzoneManagerHA method setupBucket.
protected OzoneBucket setupBucket() throws Exception {
String userName = "user" + RandomStringUtils.randomNumeric(5);
String adminName = "admin" + RandomStringUtils.randomNumeric(5);
String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
VolumeArgs createVolumeArgs = VolumeArgs.newBuilder().setOwner(userName).setAdmin(adminName).build();
objectStore.createVolume(volumeName, createVolumeArgs);
OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName);
Assert.assertTrue(retVolumeinfo.getName().equals(volumeName));
Assert.assertTrue(retVolumeinfo.getOwner().equals(userName));
Assert.assertTrue(retVolumeinfo.getAdmin().equals(adminName));
String bucketName = UUID.randomUUID().toString();
retVolumeinfo.createBucket(bucketName);
OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName);
Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
Assert.assertTrue(ozoneBucket.getVolumeName().equals(volumeName));
return ozoneBucket;
}
use of org.apache.hadoop.ozone.client.OzoneBucket in project ozone by apache.
the class TestContainerMapper method init.
@BeforeClass
public static void init() throws Exception {
conf = new OzoneConfiguration();
dbPath = GenericTestUtils.getRandomizedTempPath();
conf.set(OZONE_OM_DB_DIRS, dbPath);
conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, "100MB");
conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 0, StorageUnit.MB);
// By default, 2 pipelines are created. Setting the value to 6, will ensure
// each pipleine can have 3 containers open.
conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 6);
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).setScmId(SCM_ID).build();
cluster.waitForClusterToBeReady();
ozClient = OzoneClientFactory.getRpcClient(conf);
store = ozClient.getObjectStore();
storageContainerLocationClient = cluster.getStorageContainerLocationClient();
ozoneManager = cluster.getOzoneManager();
store.createVolume(volName);
OzoneVolume volume = store.getVolume(volName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
byte[] data = generateData(10 * 1024 * 1024, (byte) 98);
for (int i = 0; i < 20; i++) {
String key = UUID.randomUUID().toString();
keyList.add(key);
OzoneOutputStream out = bucket.createKey(key, data.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, new HashMap<String, String>());
out.write(data, 0, data.length);
out.close();
}
cluster.stop();
}
Aggregations