Search in sources :

Example 26 with OMMetadataManager

use of org.apache.hadoop.ozone.om.OMMetadataManager in project ozone by apache.

the class TestOzoneFileInterfaces method testOzFsReadWrite.

@Test
public void testOzFsReadWrite() throws IOException {
    long currentTime = Time.now();
    int stringLen = 20;
    OMMetadataManager metadataManager = cluster.getOzoneManager().getMetadataManager();
    String lev1dir = "l1dir";
    Path lev1path = createPath("/" + lev1dir);
    String lev1key = metadataManager.getOzoneDirKey(volumeName, bucketName, o3fs.pathToKey(lev1path));
    String lev2dir = "l2dir";
    Path lev2path = createPath("/" + lev1dir + "/" + lev2dir);
    String lev2key = metadataManager.getOzoneDirKey(volumeName, bucketName, o3fs.pathToKey(lev2path));
    String data = RandomStringUtils.randomAlphanumeric(stringLen);
    String filePath = RandomStringUtils.randomAlphanumeric(5);
    Path path = createPath("/" + lev1dir + "/" + lev2dir + "/" + filePath);
    String fileKey = metadataManager.getOzoneDirKey(volumeName, bucketName, o3fs.pathToKey(path));
    // verify prefix directories and the file, do not already exist
    assertTrue(metadataManager.getKeyTable(getBucketLayout()).get(lev1key) == null);
    assertTrue(metadataManager.getKeyTable(getBucketLayout()).get(lev2key) == null);
    assertTrue(metadataManager.getKeyTable(getBucketLayout()).get(fileKey) == null);
    try (FSDataOutputStream stream = fs.create(path)) {
        stream.writeBytes(data);
    }
    assertEquals(statistics.getLong(StorageStatistics.CommonStatisticNames.OP_CREATE).longValue(), 1);
    assertEquals(statistics.getLong("objects_created").longValue(), 1);
    FileStatus status = fs.getFileStatus(path);
    assertEquals(statistics.getLong(StorageStatistics.CommonStatisticNames.OP_GET_FILE_STATUS).longValue(), 1);
    assertEquals(statistics.getLong("objects_query").longValue(), 1);
    // The timestamp of the newly created file should always be greater than
    // the time when the test was started
    assertTrue("Modification time has not been recorded: " + status, status.getModificationTime() > currentTime);
    assertFalse(status.isDirectory());
    assertEquals(FsPermission.getFileDefault(), status.getPermission());
    verifyOwnerGroup(status);
    FileStatus lev1status;
    FileStatus lev2status;
    // verify prefix directories got created when creating the file.
    assertTrue(metadataManager.getKeyTable(getBucketLayout()).get(lev1key).getKeyName().equals("l1dir/"));
    assertTrue(metadataManager.getKeyTable(getBucketLayout()).get(lev2key).getKeyName().equals("l1dir/l2dir/"));
    lev1status = getDirectoryStat(lev1path);
    lev2status = getDirectoryStat(lev2path);
    assertTrue((lev1status != null) && (lev2status != null));
    try (FSDataInputStream inputStream = fs.open(path)) {
        byte[] buffer = new byte[stringLen];
        // This read will not change the offset inside the file
        int readBytes = inputStream.read(0, buffer, 0, buffer.length);
        String out = new String(buffer, 0, buffer.length, UTF_8);
        assertEquals(data, out);
        assertEquals(readBytes, buffer.length);
        assertEquals(0, inputStream.getPos());
        // The following read will change the internal offset
        readBytes = inputStream.read(buffer, 0, buffer.length);
        assertEquals(data, out);
        assertEquals(readBytes, buffer.length);
        assertEquals(buffer.length, inputStream.getPos());
    }
    assertEquals(statistics.getLong(StorageStatistics.CommonStatisticNames.OP_OPEN).longValue(), 1);
    assertEquals(statistics.getLong("objects_read").longValue(), 1);
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) OzoneFileStatus(org.apache.hadoop.ozone.om.helpers.OzoneFileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 27 with OMMetadataManager

use of org.apache.hadoop.ozone.om.OMMetadataManager in project ozone by apache.

the class TestOzoneFileSystemWithFSO method testMultiLevelDirs.

@Test
public void testMultiLevelDirs() throws Exception {
    // reset metrics
    long numKeys = getCluster().getOzoneManager().getMetrics().getNumKeys();
    getCluster().getOzoneManager().getMetrics().decNumKeys(numKeys);
    Assert.assertEquals(0, getCluster().getOzoneManager().getMetrics().getNumKeys());
    // Op 1. create dir -> /d1/d2/d3/d4/
    // Op 2. create dir -> /d1/d2/d3/d4/d5
    // Op 3. create dir -> /d1/d2/d3/d4/d6
    Path parent = new Path("/d1/d2/d3/d4/");
    getFs().mkdirs(parent);
    OMMetadataManager omMgr = getCluster().getOzoneManager().getMetadataManager();
    OmBucketInfo omBucketInfo = omMgr.getBucketTable().get(omMgr.getBucketKey(getVolumeName(), getBucketName()));
    Assert.assertNotNull("Failed to find bucketInfo", omBucketInfo);
    final long volumeId = omMgr.getVolumeId(getVolumeName());
    final long bucketId = omMgr.getBucketId(getVolumeName(), getBucketName());
    ArrayList<String> dirKeys = new ArrayList<>();
    long d1ObjectID = verifyDirKey(volumeId, bucketId, omBucketInfo.getObjectID(), "d1", "/d1", dirKeys, omMgr);
    long d2ObjectID = verifyDirKey(volumeId, bucketId, d1ObjectID, "d2", "/d1/d2", dirKeys, omMgr);
    long d3ObjectID = verifyDirKey(volumeId, bucketId, d2ObjectID, "d3", "/d1/d2/d3", dirKeys, omMgr);
    long d4ObjectID = verifyDirKey(volumeId, bucketId, d3ObjectID, "d4", "/d1/d2/d3/d4", dirKeys, omMgr);
    Assert.assertEquals("Wrong OM numKeys metrics", 4, getCluster().getOzoneManager().getMetrics().getNumKeys());
    // create sub-dirs under same parent
    Path subDir5 = new Path("/d1/d2/d3/d4/d5");
    getFs().mkdirs(subDir5);
    Path subDir6 = new Path("/d1/d2/d3/d4/d6");
    getFs().mkdirs(subDir6);
    long d5ObjectID = verifyDirKey(volumeId, bucketId, d4ObjectID, "d5", "/d1/d2/d3/d4/d5", dirKeys, omMgr);
    long d6ObjectID = verifyDirKey(volumeId, bucketId, d4ObjectID, "d6", "/d1/d2/d3/d4/d6", dirKeys, omMgr);
    Assert.assertTrue("Wrong objectIds for sub-dirs[" + d5ObjectID + "/d5, " + d6ObjectID + "/d6] of same parent!", d5ObjectID != d6ObjectID);
    Assert.assertEquals("Wrong OM numKeys metrics", 6, getCluster().getOzoneManager().getMetrics().getNumKeys());
}
Also used : Path(org.apache.hadoop.fs.Path) OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Example 28 with OMMetadataManager

use of org.apache.hadoop.ozone.om.OMMetadataManager in project ozone by apache.

the class TestOzoneFileSystemWithFSO method testCreateFile.

@Test
public void testCreateFile() throws Exception {
    // Op 1. create dir -> /d1/d2/d3/d4/
    Path parent = new Path("/d1/d2/");
    Path file = new Path(parent, "file1");
    FSDataOutputStream outputStream = getFs().create(file);
    String openFileKey = "";
    OMMetadataManager omMgr = getCluster().getOzoneManager().getMetadataManager();
    OmBucketInfo omBucketInfo = omMgr.getBucketTable().get(omMgr.getBucketKey(getVolumeName(), getBucketName()));
    Assert.assertNotNull("Failed to find bucketInfo", omBucketInfo);
    ArrayList<String> dirKeys = new ArrayList<>();
    final long volumeId = omMgr.getVolumeId(getVolumeName());
    final long bucketId = omMgr.getBucketId(getVolumeName(), getBucketName());
    long d1ObjectID = verifyDirKey(volumeId, bucketId, omBucketInfo.getObjectID(), "d1", "/d1", dirKeys, omMgr);
    long d2ObjectID = verifyDirKey(volumeId, bucketId, d1ObjectID, "d2", "/d1/d2", dirKeys, omMgr);
    openFileKey = OzoneConsts.OM_KEY_PREFIX + volumeId + OzoneConsts.OM_KEY_PREFIX + bucketId + OzoneConsts.OM_KEY_PREFIX + d2ObjectID + OzoneConsts.OM_KEY_PREFIX + file.getName();
    // trigger CommitKeyRequest
    outputStream.close();
    OmKeyInfo omKeyInfo = omMgr.getKeyTable(getBucketLayout()).get(openFileKey);
    Assert.assertNotNull("Invalid Key!", omKeyInfo);
    verifyOMFileInfoFormat(omKeyInfo, file.getName(), d2ObjectID);
    // wait for DB updates
    GenericTestUtils.waitFor(() -> {
        try {
            return omMgr.getOpenKeyTable(getBucketLayout()).isEmpty();
        } catch (IOException e) {
            LOG.error("DB failure!", e);
            Assert.fail("DB failure!");
            return false;
        }
    }, 1000, 120000);
}
Also used : Path(org.apache.hadoop.fs.Path) OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) ArrayList(java.util.ArrayList) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) Test(org.junit.Test)

Example 29 with OMMetadataManager

use of org.apache.hadoop.ozone.om.OMMetadataManager in project ozone by apache.

the class TestNSSummaryEndpoint method initializeNewOmMetadataManager.

/**
 * Create a new OM Metadata manager instance with one user, one vol, and two
 * buckets.
 * @throws IOException ioEx
 */
private static OMMetadataManager initializeNewOmMetadataManager(File omDbDir) throws IOException {
    OzoneConfiguration omConfiguration = new OzoneConfiguration();
    omConfiguration.set(OZONE_OM_DB_DIRS, omDbDir.getAbsolutePath());
    OMMetadataManager omMetadataManager = new OmMetadataManagerImpl(omConfiguration);
    String volumeKey = omMetadataManager.getVolumeKey(VOL);
    OmVolumeArgs args = OmVolumeArgs.newBuilder().setObjectID(VOL_OBJECT_ID).setVolume(VOL).setAdminName(TEST_USER).setOwnerName(TEST_USER).setQuotaInBytes(VOL_QUOTA).build();
    omMetadataManager.getVolumeTable().put(volumeKey, args);
    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder().setVolumeName(VOL).setBucketName(BUCKET_ONE).setObjectID(BUCKET_ONE_OBJECT_ID).setQuotaInBytes(BUCKET_ONE_QUOTA).build();
    OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder().setVolumeName(VOL).setBucketName(BUCKET_TWO).setObjectID(BUCKET_TWO_OBJECT_ID).setQuotaInBytes(BUCKET_TWO_QUOTA).build();
    String bucketKey = omMetadataManager.getBucketKey(bucketInfo.getVolumeName(), bucketInfo.getBucketName());
    String bucketKey2 = omMetadataManager.getBucketKey(bucketInfo2.getVolumeName(), bucketInfo2.getBucketName());
    omMetadataManager.getBucketTable().put(bucketKey, bucketInfo);
    omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2);
    return omMetadataManager;
}
Also used : OmMetadataManagerImpl(org.apache.hadoop.ozone.om.OmMetadataManagerImpl) OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) OmVolumeArgs(org.apache.hadoop.ozone.om.helpers.OmVolumeArgs) ReconOMMetadataManager(org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration)

Example 30 with OMMetadataManager

use of org.apache.hadoop.ozone.om.OMMetadataManager in project ozone by apache.

the class TestReconOmMetadataManagerImpl method getOMMetadataManager.

/**
 * Get test OM metadata manager.
 * @return OMMetadataManager instance
 * @throws IOException
 */
private OMMetadataManager getOMMetadataManager() throws IOException {
    // Create a new OM Metadata Manager instance + DB.
    File omDbDir = temporaryFolder.newFolder();
    OzoneConfiguration omConfiguration = new OzoneConfiguration();
    omConfiguration.set(OZONE_OM_DB_DIRS, omDbDir.getAbsolutePath());
    OMMetadataManager omMetadataManager = new OmMetadataManagerImpl(omConfiguration);
    // Create a volume + bucket + 2 keys.
    String volumeKey = omMetadataManager.getVolumeKey("sampleVol");
    OmVolumeArgs args = OmVolumeArgs.newBuilder().setVolume("sampleVol").setAdminName("TestUser").setOwnerName("TestUser").build();
    omMetadataManager.getVolumeTable().put(volumeKey, args);
    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder().setVolumeName("sampleVol").setBucketName("bucketOne").build();
    String bucketKey = omMetadataManager.getBucketKey(bucketInfo.getVolumeName(), bucketInfo.getBucketName());
    omMetadataManager.getBucketTable().put(bucketKey, bucketInfo);
    omMetadataManager.getKeyTable(getBucketLayout()).put("/sampleVol/bucketOne/key_one", new OmKeyInfo.Builder().setBucketName("bucketOne").setVolumeName("sampleVol").setKeyName("key_one").setReplicationConfig(StandaloneReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).build());
    omMetadataManager.getKeyTable(getBucketLayout()).put("/sampleVol/bucketOne/key_two", new OmKeyInfo.Builder().setBucketName("bucketOne").setVolumeName("sampleVol").setKeyName("key_two").setReplicationConfig(StandaloneReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).build());
    return omMetadataManager;
}
Also used : OmMetadataManagerImpl(org.apache.hadoop.ozone.om.OmMetadataManagerImpl) OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) OmVolumeArgs(org.apache.hadoop.ozone.om.helpers.OmVolumeArgs) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) File(java.io.File)

Aggregations

OMMetadataManager (org.apache.hadoop.ozone.om.OMMetadataManager)90 IOException (java.io.IOException)53 OMClientResponse (org.apache.hadoop.ozone.om.response.OMClientResponse)51 OMResponse (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse)47 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)40 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)33 OMMetrics (org.apache.hadoop.ozone.om.OMMetrics)32 OmBucketInfo (org.apache.hadoop.ozone.om.helpers.OmBucketInfo)30 AuditLogger (org.apache.hadoop.ozone.audit.AuditLogger)22 OzoneManagerProtocolProtos (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos)19 Test (org.junit.Test)19 KeyArgs (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs)18 ArrayList (java.util.ArrayList)17 HashMap (java.util.HashMap)15 OmVolumeArgs (org.apache.hadoop.ozone.om.helpers.OmVolumeArgs)15 ReconOMMetadataManager (org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager)12 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)11 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)10 ReplicationConfig (org.apache.hadoop.hdds.client.ReplicationConfig)8 OMMultiTenantManager (org.apache.hadoop.ozone.om.OMMultiTenantManager)7