Search in sources :

Example 1 with OMMetadataManager

use of org.apache.hadoop.ozone.om.OMMetadataManager in project ozone by apache.

the class TestOzoneFileSystemWithFSO method testCreateFile.

@Test
public void testCreateFile() throws Exception {
    // Op 1. create dir -> /d1/d2/d3/d4/
    Path parent = new Path("/d1/d2/");
    Path file = new Path(parent, "file1");
    FSDataOutputStream outputStream = getFs().create(file);
    String openFileKey = "";
    OMMetadataManager omMgr = getCluster().getOzoneManager().getMetadataManager();
    OmBucketInfo omBucketInfo = omMgr.getBucketTable().get(omMgr.getBucketKey(getVolumeName(), getBucketName()));
    Assert.assertNotNull("Failed to find bucketInfo", omBucketInfo);
    ArrayList<String> dirKeys = new ArrayList<>();
    long d1ObjectID = verifyDirKey(omBucketInfo.getObjectID(), "d1", "/d1", dirKeys, omMgr);
    long d2ObjectID = verifyDirKey(d1ObjectID, "d2", "/d1/d2", dirKeys, omMgr);
    openFileKey = d2ObjectID + OzoneConsts.OM_KEY_PREFIX + file.getName();
    // trigger CommitKeyRequest
    outputStream.close();
    OmKeyInfo omKeyInfo = omMgr.getKeyTable(getBucketLayout()).get(openFileKey);
    Assert.assertNotNull("Invalid Key!", omKeyInfo);
    verifyOMFileInfoFormat(omKeyInfo, file.getName(), d2ObjectID);
    // wait for DB updates
    GenericTestUtils.waitFor(() -> {
        try {
            return omMgr.getOpenKeyTable(getBucketLayout()).isEmpty();
        } catch (IOException e) {
            LOG.error("DB failure!", e);
            Assert.fail("DB failure!");
            return false;
        }
    }, 1000, 120000);
}
Also used : Path(org.apache.hadoop.fs.Path) OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) ArrayList(java.util.ArrayList) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) Test(org.junit.Test)

Example 2 with OMMetadataManager

use of org.apache.hadoop.ozone.om.OMMetadataManager in project ozone by apache.

the class TestOzoneFileSystemWithFSO method testMultiLevelDirs.

@Test
public void testMultiLevelDirs() throws Exception {
    // reset metrics
    long numKeys = getCluster().getOzoneManager().getMetrics().getNumKeys();
    getCluster().getOzoneManager().getMetrics().decNumKeys(numKeys);
    Assert.assertEquals(0, getCluster().getOzoneManager().getMetrics().getNumKeys());
    // Op 1. create dir -> /d1/d2/d3/d4/
    // Op 2. create dir -> /d1/d2/d3/d4/d5
    // Op 3. create dir -> /d1/d2/d3/d4/d6
    Path parent = new Path("/d1/d2/d3/d4/");
    getFs().mkdirs(parent);
    OMMetadataManager omMgr = getCluster().getOzoneManager().getMetadataManager();
    OmBucketInfo omBucketInfo = omMgr.getBucketTable().get(omMgr.getBucketKey(getVolumeName(), getBucketName()));
    Assert.assertNotNull("Failed to find bucketInfo", omBucketInfo);
    ArrayList<String> dirKeys = new ArrayList<>();
    long d1ObjectID = verifyDirKey(omBucketInfo.getObjectID(), "d1", "/d1", dirKeys, omMgr);
    long d2ObjectID = verifyDirKey(d1ObjectID, "d2", "/d1/d2", dirKeys, omMgr);
    long d3ObjectID = verifyDirKey(d2ObjectID, "d3", "/d1/d2/d3", dirKeys, omMgr);
    long d4ObjectID = verifyDirKey(d3ObjectID, "d4", "/d1/d2/d3/d4", dirKeys, omMgr);
    Assert.assertEquals("Wrong OM numKeys metrics", 4, getCluster().getOzoneManager().getMetrics().getNumKeys());
    // create sub-dirs under same parent
    Path subDir5 = new Path("/d1/d2/d3/d4/d5");
    getFs().mkdirs(subDir5);
    Path subDir6 = new Path("/d1/d2/d3/d4/d6");
    getFs().mkdirs(subDir6);
    long d5ObjectID = verifyDirKey(d4ObjectID, "d5", "/d1/d2/d3/d4/d5", dirKeys, omMgr);
    long d6ObjectID = verifyDirKey(d4ObjectID, "d6", "/d1/d2/d3/d4/d6", dirKeys, omMgr);
    Assert.assertTrue("Wrong objectIds for sub-dirs[" + d5ObjectID + "/d5, " + d6ObjectID + "/d6] of same parent!", d5ObjectID != d6ObjectID);
    Assert.assertEquals("Wrong OM numKeys metrics", 6, getCluster().getOzoneManager().getMetrics().getNumKeys());
}
Also used : Path(org.apache.hadoop.fs.Path) OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Example 3 with OMMetadataManager

use of org.apache.hadoop.ozone.om.OMMetadataManager in project ozone by apache.

the class TestOzoneFileInterfaces method testOzFsReadWrite.

@Test
public void testOzFsReadWrite() throws IOException {
    long currentTime = Time.now();
    int stringLen = 20;
    OMMetadataManager metadataManager = cluster.getOzoneManager().getMetadataManager();
    String lev1dir = "l1dir";
    Path lev1path = createPath("/" + lev1dir);
    String lev1key = metadataManager.getOzoneDirKey(volumeName, bucketName, o3fs.pathToKey(lev1path));
    String lev2dir = "l2dir";
    Path lev2path = createPath("/" + lev1dir + "/" + lev2dir);
    String lev2key = metadataManager.getOzoneDirKey(volumeName, bucketName, o3fs.pathToKey(lev2path));
    String data = RandomStringUtils.randomAlphanumeric(stringLen);
    String filePath = RandomStringUtils.randomAlphanumeric(5);
    Path path = createPath("/" + lev1dir + "/" + lev2dir + "/" + filePath);
    String fileKey = metadataManager.getOzoneDirKey(volumeName, bucketName, o3fs.pathToKey(path));
    // verify prefix directories and the file, do not already exist
    assertTrue(metadataManager.getKeyTable(getBucketLayout()).get(lev1key) == null);
    assertTrue(metadataManager.getKeyTable(getBucketLayout()).get(lev2key) == null);
    assertTrue(metadataManager.getKeyTable(getBucketLayout()).get(fileKey) == null);
    try (FSDataOutputStream stream = fs.create(path)) {
        stream.writeBytes(data);
    }
    assertEquals(statistics.getLong(StorageStatistics.CommonStatisticNames.OP_CREATE).longValue(), 1);
    assertEquals(statistics.getLong("objects_created").longValue(), 1);
    FileStatus status = fs.getFileStatus(path);
    assertEquals(statistics.getLong(StorageStatistics.CommonStatisticNames.OP_GET_FILE_STATUS).longValue(), 1);
    assertEquals(statistics.getLong("objects_query").longValue(), 1);
    // The timestamp of the newly created file should always be greater than
    // the time when the test was started
    assertTrue("Modification time has not been recorded: " + status, status.getModificationTime() > currentTime);
    assertFalse(status.isDirectory());
    assertEquals(FsPermission.getFileDefault(), status.getPermission());
    verifyOwnerGroup(status);
    FileStatus lev1status;
    FileStatus lev2status;
    // verify prefix directories got created when creating the file.
    assertTrue(metadataManager.getKeyTable(getBucketLayout()).get(lev1key).getKeyName().equals("l1dir/"));
    assertTrue(metadataManager.getKeyTable(getBucketLayout()).get(lev2key).getKeyName().equals("l1dir/l2dir/"));
    lev1status = getDirectoryStat(lev1path);
    lev2status = getDirectoryStat(lev2path);
    assertTrue((lev1status != null) && (lev2status != null));
    try (FSDataInputStream inputStream = fs.open(path)) {
        byte[] buffer = new byte[stringLen];
        // This read will not change the offset inside the file
        int readBytes = inputStream.read(0, buffer, 0, buffer.length);
        String out = new String(buffer, 0, buffer.length, UTF_8);
        assertEquals(data, out);
        assertEquals(readBytes, buffer.length);
        assertEquals(0, inputStream.getPos());
        // The following read will change the internal offset
        readBytes = inputStream.read(buffer, 0, buffer.length);
        assertEquals(data, out);
        assertEquals(readBytes, buffer.length);
        assertEquals(buffer.length, inputStream.getPos());
    }
    assertEquals(statistics.getLong(StorageStatistics.CommonStatisticNames.OP_OPEN).longValue(), 1);
    assertEquals(statistics.getLong("objects_read").longValue(), 1);
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) OzoneFileStatus(org.apache.hadoop.ozone.om.helpers.OzoneFileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 4 with OMMetadataManager

use of org.apache.hadoop.ozone.om.OMMetadataManager in project ozone by apache.

the class TestOzoneManagerDoubleBufferWithDummyResponse method setup.

@Before
public void setup() throws IOException {
    OzoneConfiguration configuration = new OzoneConfiguration();
    configuration.set(OZONE_METADATA_DIRS, folder.newFolder().getAbsolutePath());
    omMetadataManager = new OmMetadataManagerImpl(configuration);
    OzoneManagerRatisSnapshot ozoneManagerRatisSnapshot = index -> {
        lastAppliedIndex = index.get(index.size() - 1);
    };
    doubleBuffer = new OzoneManagerDoubleBuffer.Builder().setOmMetadataManager(omMetadataManager).setOzoneManagerRatisSnapShot(ozoneManagerRatisSnapshot).enableRatis(true).setIndexToTerm((val) -> term).build();
}
Also used : OmMetadataManagerImpl(org.apache.hadoop.ozone.om.OmMetadataManagerImpl) TransactionInfo(org.apache.hadoop.hdds.utils.TransactionInfo) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) TRANSACTION_INFO_KEY(org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY) CleanupTableInfo(org.apache.hadoop.ozone.om.response.CleanupTableInfo) OMClientResponse(org.apache.hadoop.ozone.om.response.OMClientResponse) After(org.junit.After) OzoneManagerProtocolProtos(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos) BUCKET_TABLE(org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE) Before(org.junit.Before) OMResponse(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse) BatchOperation(org.apache.hadoop.hdds.utils.db.BatchOperation) OZONE_METADATA_DIRS(org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS) OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) Assert.assertNotNull(org.junit.Assert.assertNotNull) CreateBucketResponse(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateBucketResponse) Assert.assertTrue(org.junit.Assert.assertTrue) IOException(java.io.IOException) Test(org.junit.Test) UUID(java.util.UUID) AtomicLong(java.util.concurrent.atomic.AtomicLong) Rule(org.junit.Rule) OmMetadataManagerImpl(org.apache.hadoop.ozone.om.OmMetadataManagerImpl) Time(org.apache.hadoop.util.Time) Assert(org.junit.Assert) TemporaryFolder(org.junit.rules.TemporaryFolder) GenericTestUtils.waitFor(org.apache.ozone.test.GenericTestUtils.waitFor) Assert.assertEquals(org.junit.Assert.assertEquals) OzoneManagerDoubleBufferMetrics(org.apache.hadoop.ozone.om.ratis.metrics.OzoneManagerDoubleBufferMetrics) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) Before(org.junit.Before)

Example 5 with OMMetadataManager

use of org.apache.hadoop.ozone.om.OMMetadataManager in project ozone by apache.

the class TestFileSizeCountTask method testReprocessAtScale.

@Test
public void testReprocessAtScale() throws IOException {
    // generate mocks for 2 volumes, 500 buckets each volume
    // and 42 keys in each bucket.
    List<OmKeyInfo> omKeyInfoList = new ArrayList<>();
    List<Boolean> hasNextAnswer = new ArrayList<>();
    for (int volIndex = 1; volIndex <= 2; volIndex++) {
        for (int bktIndex = 1; bktIndex <= 500; bktIndex++) {
            for (int keyIndex = 1; keyIndex <= 42; keyIndex++) {
                OmKeyInfo omKeyInfo = mock(OmKeyInfo.class);
                given(omKeyInfo.getKeyName()).willReturn("key" + keyIndex);
                given(omKeyInfo.getVolumeName()).willReturn("vol" + volIndex);
                given(omKeyInfo.getBucketName()).willReturn("bucket" + bktIndex);
                // Place keys in each bin
                long fileSize = (long) Math.pow(2, keyIndex + 9) - 1L;
                given(omKeyInfo.getDataSize()).willReturn(fileSize);
                omKeyInfoList.add(omKeyInfo);
                hasNextAnswer.add(true);
            }
        }
    }
    hasNextAnswer.add(false);
    OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class);
    TypedTable<String, OmKeyInfo> keyTable = mock(TypedTable.class);
    TypedTable.TypedTableIterator mockKeyIter = mock(TypedTable.TypedTableIterator.class);
    TypedTable.TypedKeyValue mockKeyValue = mock(TypedTable.TypedKeyValue.class);
    when(keyTable.iterator()).thenReturn(mockKeyIter);
    when(omMetadataManager.getKeyTable(getBucketLayout())).thenReturn(keyTable);
    when(mockKeyIter.hasNext()).thenAnswer(AdditionalAnswers.returnsElementsOf(hasNextAnswer));
    when(mockKeyIter.next()).thenReturn(mockKeyValue);
    when(mockKeyValue.getValue()).thenAnswer(AdditionalAnswers.returnsElementsOf(omKeyInfoList));
    Pair<String, Boolean> result = fileSizeCountTask.reprocess(omMetadataManager);
    assertTrue(result.getRight());
    // 2 volumes * 500 buckets * 42 bins = 42000 rows
    assertEquals(42000, fileCountBySizeDao.count());
    Record3<String, String, Long> recordToFind = dslContext.newRecord(FILE_COUNT_BY_SIZE.VOLUME, FILE_COUNT_BY_SIZE.BUCKET, FILE_COUNT_BY_SIZE.FILE_SIZE).value1("vol1").value2("bucket1").value3(1024L);
    assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue());
    // file size upper bound for 100000L is 131072L (next highest power of 2)
    recordToFind.value1("vol1");
    recordToFind.value3(131072L);
    assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue());
    recordToFind.value2("bucket500");
    recordToFind.value3(Long.MAX_VALUE);
    assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue());
}
Also used : ArrayList(java.util.ArrayList) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) TypedTable(org.apache.hadoop.hdds.utils.db.TypedTable) AbstractReconSqlDBTest(org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest) Test(org.junit.Test)

Aggregations

OMMetadataManager (org.apache.hadoop.ozone.om.OMMetadataManager)79 IOException (java.io.IOException)45 OMClientResponse (org.apache.hadoop.ozone.om.response.OMClientResponse)43 OMResponse (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse)40 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)32 OmBucketInfo (org.apache.hadoop.ozone.om.helpers.OmBucketInfo)30 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)29 OMMetrics (org.apache.hadoop.ozone.om.OMMetrics)26 Test (org.junit.Test)22 AuditLogger (org.apache.hadoop.ozone.audit.AuditLogger)19 OzoneManagerProtocolProtos (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos)19 ArrayList (java.util.ArrayList)17 KeyArgs (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs)17 OmVolumeArgs (org.apache.hadoop.ozone.om.helpers.OmVolumeArgs)13 ReconOMMetadataManager (org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager)12 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)11 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)9 HashMap (java.util.HashMap)8 OmMetadataManagerImpl (org.apache.hadoop.ozone.om.OmMetadataManagerImpl)7 RepeatedOmKeyInfo (org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo)7