Search in sources :

Example 1 with OmBucketInfo

use of org.apache.hadoop.ozone.om.helpers.OmBucketInfo in project ozone by apache.

the class RpcClient method getBucketDetails.

@Override
public OzoneBucket getBucketDetails(String volumeName, String bucketName) throws IOException {
    verifyVolumeName(volumeName);
    verifyBucketName(bucketName);
    OmBucketInfo bucketInfo = ozoneManagerClient.getBucketInfo(volumeName, bucketName);
    return new OzoneBucket(conf, this, bucketInfo.getVolumeName(), bucketInfo.getBucketName(), bucketInfo.getStorageType(), bucketInfo.getIsVersionEnabled(), bucketInfo.getCreationTime(), bucketInfo.getModificationTime(), bucketInfo.getMetadata(), bucketInfo.getEncryptionKeyInfo() != null ? bucketInfo.getEncryptionKeyInfo().getKeyName() : null, bucketInfo.getSourceVolume(), bucketInfo.getSourceBucket(), bucketInfo.getUsedBytes(), bucketInfo.getUsedNamespace(), bucketInfo.getQuotaInBytes(), bucketInfo.getQuotaInNamespace(), bucketInfo.getBucketLayout(), bucketInfo.getOwner());
}
Also used : OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket)

Example 2 with OmBucketInfo

use of org.apache.hadoop.ozone.om.helpers.OmBucketInfo in project ozone by apache.

the class PrefixParser method parse.

public void parse(String vol, String buck, String db, String file) throws Exception {
    if (!Files.exists(Paths.get(db))) {
        System.out.println("DB path not exist:" + db);
        return;
    }
    System.out.println("FilePath is:" + file);
    System.out.println("Db Path is:" + db);
    OzoneConfiguration conf = new OzoneConfiguration();
    conf.set(OMConfigKeys.OZONE_OM_DB_DIRS, db);
    OmMetadataManagerImpl metadataManager = new OmMetadataManagerImpl(conf);
    metadataManager.start(conf);
    org.apache.hadoop.fs.Path effectivePath = new org.apache.hadoop.fs.Path("/");
    Path p = Paths.get(file);
    String volumeKey = metadataManager.getVolumeKey(vol);
    if (!metadataManager.getVolumeTable().isExist(volumeKey)) {
        System.out.println("Invalid Volume:" + vol);
        metadataManager.stop();
        return;
    }
    parserStats[Types.VOLUME.ordinal()]++;
    // First get the info about the bucket
    String bucketKey = metadataManager.getBucketKey(vol, buck);
    OmBucketInfo info = metadataManager.getBucketTable().get(bucketKey);
    if (info == null) {
        System.out.println("Invalid Bucket:" + buck);
        metadataManager.stop();
        return;
    }
    BucketLayout bucketLayout = OzoneManagerUtils.resolveLinkBucketLayout(info, metadataManager, new HashSet<>()).getBucketLayout();
    if (!bucketLayout.isFileSystemOptimized()) {
        System.out.println("Prefix tool only works for FileSystem Optimized" + "bucket. Bucket Layout is:" + bucketLayout);
        metadataManager.stop();
        return;
    }
    long lastObjectId = info.getObjectID();
    WithParentObjectId objectBucketId = new WithParentObjectId();
    objectBucketId.setObjectID(lastObjectId);
    dumpInfo(Types.BUCKET, effectivePath, objectBucketId, bucketKey);
    Iterator<Path> pathIterator = p.iterator();
    while (pathIterator.hasNext()) {
        Path elem = pathIterator.next();
        String path = metadataManager.getOzonePathKey(lastObjectId, elem.toString());
        OmDirectoryInfo directoryInfo = metadataManager.getDirectoryTable().get(path);
        org.apache.hadoop.fs.Path tmpPath = getEffectivePath(effectivePath, elem.toString());
        if (directoryInfo == null) {
            System.out.println("Given path contains a non-existent directory at:" + tmpPath);
            System.out.println("Dumping files and dirs at level:" + tmpPath.getParent());
            System.out.println();
            parserStats[Types.NON_EXISTENT_DIRECTORY.ordinal()]++;
            break;
        }
        effectivePath = tmpPath;
        dumpInfo(Types.INTERMEDIATE_DIRECTORY, effectivePath, directoryInfo, path);
        lastObjectId = directoryInfo.getObjectID();
    }
    // at the last level, now parse both file and dir table
    dumpTableInfo(Types.DIRECTORY, effectivePath, metadataManager.getDirectoryTable(), lastObjectId);
    dumpTableInfo(Types.FILE, effectivePath, metadataManager.getKeyTable(getBucketLayout()), lastObjectId);
    metadataManager.stop();
}
Also used : Path(java.nio.file.Path) OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) BucketLayout(org.apache.hadoop.ozone.om.helpers.BucketLayout) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) OmMetadataManagerImpl(org.apache.hadoop.ozone.om.OmMetadataManagerImpl) OmDirectoryInfo(org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo) WithParentObjectId(org.apache.hadoop.ozone.om.helpers.WithParentObjectId) HashSet(java.util.HashSet)

Example 3 with OmBucketInfo

use of org.apache.hadoop.ozone.om.helpers.OmBucketInfo in project ozone by apache.

the class TestKeyManagerImpl method createBucket.

private static void createBucket(String volumeName, String bucketName, boolean isVersionEnabled) throws IOException {
    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder().setVolumeName(volumeName).setBucketName(bucketName).setIsVersionEnabled(isVersionEnabled).build();
    OMRequestTestUtils.addBucketToOM(metadataManager, bucketInfo);
}
Also used : OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo)

Example 4 with OmBucketInfo

use of org.apache.hadoop.ozone.om.helpers.OmBucketInfo in project ozone by apache.

the class TestOMEpochForNonRatis method testUniqueTrxnIndexOnOMRestart.

@Test
public void testUniqueTrxnIndexOnOMRestart() throws Exception {
    // When OM is restarted, the transaction index for requests should not
    // start from 0. It should incrementally increase from the last
    // transaction index which was stored in DB before restart.
    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
    String keyName = "key" + RandomStringUtils.randomNumeric(5);
    OzoneManager om = cluster.getOzoneManager();
    OzoneClient client = cluster.getClient();
    ObjectStore objectStore = client.getObjectStore();
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    OzoneManagerProtocolClientSideTranslatorPB omClient = new OzoneManagerProtocolClientSideTranslatorPB(OmTransportFactory.create(conf, ugi, null), RandomStringUtils.randomAscii(5));
    objectStore.createVolume(volumeName);
    // Verify that the last transactionIndex stored in DB after volume
    // creation equals the transaction index corresponding to volume's
    // objectID. Also, the volume transaction index should be 1 as this is
    // the first transaction in this cluster.
    OmVolumeArgs volumeInfo = omClient.getVolumeInfo(volumeName);
    long volumeTrxnIndex = OmUtils.getTxIdFromObjectId(volumeInfo.getObjectID());
    Assert.assertEquals(1, volumeTrxnIndex);
    Assert.assertEquals(volumeTrxnIndex, om.getLastTrxnIndexForNonRatis());
    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
    ozoneVolume.createBucket(bucketName);
    // Verify last transactionIndex is updated after bucket creation
    OmBucketInfo bucketInfo = omClient.getBucketInfo(volumeName, bucketName);
    long bucketTrxnIndex = OmUtils.getTxIdFromObjectId(bucketInfo.getObjectID());
    Assert.assertEquals(2, bucketTrxnIndex);
    Assert.assertEquals(bucketTrxnIndex, om.getLastTrxnIndexForNonRatis());
    // Restart the OM and create new object
    cluster.restartOzoneManager();
    String data = "random data";
    OzoneOutputStream ozoneOutputStream = ozoneVolume.getBucket(bucketName).createKey(keyName, data.length(), ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
    ozoneOutputStream.write(data.getBytes(UTF_8), 0, data.length());
    ozoneOutputStream.close();
    // Verify last transactionIndex is updated after key creation and the
    // transaction index after restart is incremented from the last
    // transaction index before restart.
    OmKeyInfo omKeyInfo = omClient.lookupKey(new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true).build());
    long keyTrxnIndex = OmUtils.getTxIdFromObjectId(omKeyInfo.getObjectID());
    Assert.assertEquals(3, keyTrxnIndex);
    // Key commit is a separate transaction. Hence, the last trxn index in DB
    // should be 1 more than KeyTrxnIndex
    Assert.assertEquals(4, om.getLastTrxnIndexForNonRatis());
}
Also used : OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) OzoneManagerProtocolClientSideTranslatorPB(org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB) ObjectStore(org.apache.hadoop.ozone.client.ObjectStore) OmVolumeArgs(org.apache.hadoop.ozone.om.helpers.OmVolumeArgs) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OzoneClient(org.apache.hadoop.ozone.client.OzoneClient) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 5 with OmBucketInfo

use of org.apache.hadoop.ozone.om.helpers.OmBucketInfo in project ozone by apache.

the class TestS3InitiateMultipartUploadRequestWithFSO method testValidateAndUpdateCache.

@Test
public void testValidateAndUpdateCache() throws Exception {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    String prefix = "a/b/c/";
    List<String> dirs = new ArrayList<String>();
    dirs.add("a");
    dirs.add("b");
    dirs.add("c");
    String fileName = UUID.randomUUID().toString();
    String keyName = prefix + fileName;
    // Add volume and bucket to DB.
    OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager);
    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
    OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey);
    long bucketID = omBucketInfo.getObjectID();
    OMRequest modifiedRequest = doPreExecuteInitiateMPUWithFSO(volumeName, bucketName, keyName);
    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadReqFSO = getS3InitiateMultipartUploadReq(modifiedRequest);
    OMClientResponse omClientResponse = s3InitiateMultipartUploadReqFSO.validateAndUpdateCache(ozoneManager, 100L, ozoneManagerDoubleBufferHelper);
    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, omClientResponse.getOMResponse().getStatus());
    long parentID = verifyDirectoriesInDB(dirs, bucketID);
    String multipartFileKey = omMetadataManager.getMultipartKey(volumeName, bucketName, keyName, modifiedRequest.getInitiateMultiPartUploadRequest().getKeyArgs().getMultipartUploadID());
    String multipartOpenFileKey = omMetadataManager.getMultipartKey(parentID, fileName, modifiedRequest.getInitiateMultiPartUploadRequest().getKeyArgs().getMultipartUploadID());
    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable(s3InitiateMultipartUploadReqFSO.getBucketLayout()).get(multipartOpenFileKey);
    Assert.assertNotNull("Failed to find the fileInfo", omKeyInfo);
    Assert.assertEquals("FileName mismatches!", fileName, omKeyInfo.getKeyName());
    Assert.assertEquals("ParentId mismatches!", parentID, omKeyInfo.getParentObjectID());
    OmMultipartKeyInfo omMultipartKeyInfo = omMetadataManager.getMultipartInfoTable().get(multipartFileKey);
    Assert.assertNotNull("Failed to find the multipartFileInfo", omMultipartKeyInfo);
    Assert.assertEquals("ParentId mismatches!", parentID, omMultipartKeyInfo.getParentID());
    Assert.assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest().getKeyArgs().getMultipartUploadID(), omMultipartKeyInfo.getUploadID());
    Assert.assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest().getKeyArgs().getModificationTime(), omKeyInfo.getModificationTime());
    Assert.assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest().getKeyArgs().getModificationTime(), omKeyInfo.getCreationTime());
}
Also used : OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) OMRequest(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest) OMClientResponse(org.apache.hadoop.ozone.om.response.OMClientResponse) OmMultipartKeyInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo) ArrayList(java.util.ArrayList) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) Test(org.junit.Test)

Aggregations

OmBucketInfo (org.apache.hadoop.ozone.om.helpers.OmBucketInfo)138 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)43 IOException (java.io.IOException)38 Test (org.junit.Test)35 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)34 ArrayList (java.util.ArrayList)33 OMClientResponse (org.apache.hadoop.ozone.om.response.OMClientResponse)33 OMMetadataManager (org.apache.hadoop.ozone.om.OMMetadataManager)28 OMResponse (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse)24 OMMetrics (org.apache.hadoop.ozone.om.OMMetrics)16 OmVolumeArgs (org.apache.hadoop.ozone.om.helpers.OmVolumeArgs)16 RepeatedOmKeyInfo (org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo)14 OzoneManagerProtocolProtos (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos)14 Path (java.nio.file.Path)12 OMRequest (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest)12 AuditLogger (org.apache.hadoop.ozone.audit.AuditLogger)11 OmDirectoryInfo (org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo)11 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)11 KeyArgs (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs)11 OmMultipartKeyInfo (org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo)10