Search in sources :

Example 1 with RDBStore

use of org.apache.hadoop.hdds.utils.db.RDBStore in project ozone by apache.

the class TestReconWithOzoneManager method testOmDBSyncing.

@Test
public void testOmDBSyncing() throws Exception {
    // add a vol, bucket and key
    addKeys(0, 1);
    // check if OM metadata has vol0/bucket0/key0 info
    String ozoneKey = metadataManager.getOzoneKey("vol0", "bucket0", "key0");
    OmKeyInfo keyInfo1 = metadataManager.getKeyTable(getBucketLayout()).get(ozoneKey);
    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> omKeyValueTableIterator = metadataManager.getKeyTable(getBucketLayout()).iterator();
    long omMetadataKeyCount = getTableKeyCount(omKeyValueTableIterator);
    // verify if OM has /vol0/bucket0/key0
    Assert.assertEquals("vol0", keyInfo1.getVolumeName());
    Assert.assertEquals("bucket0", keyInfo1.getBucketName());
    OzoneManagerServiceProviderImpl impl = (OzoneManagerServiceProviderImpl) cluster.getReconServer().getOzoneManagerServiceProvider();
    impl.syncDataFromOM();
    OzoneManagerSyncMetrics metrics = impl.getMetrics();
    // HTTP call to /api/containers
    String containerResponse = makeHttpCall(containerKeyServiceURL);
    long reconMetadataContainerCount = getReconContainerCount(containerResponse);
    // verify count of keys after full snapshot
    Assert.assertEquals(omMetadataKeyCount, reconMetadataContainerCount);
    // verify if Recon Metadata captures vol0/bucket0/key0 info in container0
    LinkedTreeMap containerResponseMap = getContainerResponseMap(containerResponse, 0);
    Assert.assertEquals(0, (long) (double) containerResponseMap.get("ContainerID"));
    Assert.assertEquals(1, (long) (double) containerResponseMap.get("NumberOfKeys"));
    // HTTP call to /api/task/status
    long omLatestSeqNumber = ((RDBStore) metadataManager.getStore()).getDb().getLatestSequenceNumber();
    String taskStatusResponse = makeHttpCall(taskStatusURL);
    long reconLatestSeqNumber = getReconTaskAttributeFromJson(taskStatusResponse, OmSnapshotRequest.name(), "lastUpdatedSeqNumber");
    // verify sequence number after full snapshot
    Assert.assertEquals(omLatestSeqNumber, reconLatestSeqNumber);
    Assert.assertEquals(0, metrics.getSequenceNumberLag().value());
    // add 4 keys to check for delta updates
    addKeys(1, 5);
    omKeyValueTableIterator = metadataManager.getKeyTable(getBucketLayout()).iterator();
    omMetadataKeyCount = getTableKeyCount(omKeyValueTableIterator);
    // update the next snapshot from om to verify delta updates
    impl.syncDataFromOM();
    // HTTP call to /api/containers
    containerResponse = makeHttpCall(containerKeyServiceURL);
    reconMetadataContainerCount = getReconContainerCount(containerResponse);
    // verify count of keys
    Assert.assertEquals(omMetadataKeyCount, reconMetadataContainerCount);
    // verify if Recon Metadata captures vol3/bucket3/key3 info in container3
    containerResponseMap = getContainerResponseMap(containerResponse, 3);
    Assert.assertEquals(3, (long) (double) containerResponseMap.get("ContainerID"));
    Assert.assertEquals(1, (long) (double) containerResponseMap.get("NumberOfKeys"));
    // HTTP call to /api/task/status
    omLatestSeqNumber = ((RDBStore) metadataManager.getStore()).getDb().getLatestSequenceNumber();
    taskStatusResponse = makeHttpCall(taskStatusURL);
    reconLatestSeqNumber = getReconTaskAttributeFromJson(taskStatusResponse, OmDeltaRequest.name(), "lastUpdatedSeqNumber");
    // verify sequence number after Delta Updates
    Assert.assertEquals(omLatestSeqNumber, reconLatestSeqNumber);
    Assert.assertEquals(0, metrics.getSequenceNumberLag().value());
    long beforeRestartSnapShotTimeStamp = getReconTaskAttributeFromJson(taskStatusResponse, OmSnapshotRequest.name(), "lastUpdatedTimestamp");
    // restart Recon
    cluster.restartReconServer();
    impl = (OzoneManagerServiceProviderImpl) cluster.getReconServer().getOzoneManagerServiceProvider();
    // add 5 more keys to OM
    addKeys(5, 10);
    omKeyValueTableIterator = metadataManager.getKeyTable(getBucketLayout()).iterator();
    omMetadataKeyCount = getTableKeyCount(omKeyValueTableIterator);
    // get the next snapshot from om
    impl.syncDataFromOM();
    // HTTP call to /api/containers
    containerResponse = makeHttpCall(containerKeyServiceURL);
    reconMetadataContainerCount = getReconContainerCount(containerResponse);
    // verify count of keys
    Assert.assertEquals(omMetadataKeyCount, reconMetadataContainerCount);
    // verify if Recon Metadata captures vol7/bucket7/key7 info in container7
    containerResponseMap = getContainerResponseMap(containerResponse, 7);
    Assert.assertEquals(7, (long) (double) containerResponseMap.get("ContainerID"));
    Assert.assertEquals(1, (long) (double) containerResponseMap.get("NumberOfKeys"));
    // HTTP call to /api/task/status
    omLatestSeqNumber = ((RDBStore) metadataManager.getStore()).getDb().getLatestSequenceNumber();
    taskStatusResponse = makeHttpCall(taskStatusURL);
    reconLatestSeqNumber = getReconTaskAttributeFromJson(taskStatusResponse, OmDeltaRequest.name(), "lastUpdatedSeqNumber");
    long afterRestartSnapShotTimeStamp = getReconTaskAttributeFromJson(taskStatusResponse, OmSnapshotRequest.name(), "lastUpdatedTimestamp");
    // verify only Delta updates were added to recon after restart.
    Assert.assertEquals(beforeRestartSnapShotTimeStamp, afterRestartSnapShotTimeStamp);
    // verify sequence number after Delta Updates
    Assert.assertEquals(omLatestSeqNumber, reconLatestSeqNumber);
    Assert.assertEquals(0, metrics.getSequenceNumberLag().value());
}
Also used : Table(org.apache.hadoop.hdds.utils.db.Table) LinkedTreeMap(com.google.gson.internal.LinkedTreeMap) OzoneManagerSyncMetrics(org.apache.hadoop.ozone.recon.metrics.OzoneManagerSyncMetrics) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OzoneManagerServiceProviderImpl(org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl) RDBStore(org.apache.hadoop.hdds.utils.db.RDBStore) Test(org.junit.Test)

Example 2 with RDBStore

use of org.apache.hadoop.hdds.utils.db.RDBStore in project ozone by apache.

the class KeyManagerImpl method getNextGreaterString.

private String getNextGreaterString(String volumeName, String bucketName, String keyPrefix) throws IOException {
    // Increment the last character of the string and return the new ozone key.
    Preconditions.checkArgument(!Strings.isNullOrEmpty(keyPrefix), "Key prefix is null or empty");
    CodecRegistry codecRegistry = ((RDBStore) metadataManager.getStore()).getCodecRegistry();
    byte[] keyPrefixInBytes = codecRegistry.asRawData(keyPrefix);
    keyPrefixInBytes[keyPrefixInBytes.length - 1]++;
    String nextPrefix = codecRegistry.asObject(keyPrefixInBytes, String.class);
    return metadataManager.getOzoneKey(volumeName, bucketName, nextPrefix);
}
Also used : CodecRegistry(org.apache.hadoop.hdds.utils.db.CodecRegistry) RDBStore(org.apache.hadoop.hdds.utils.db.RDBStore)

Example 3 with RDBStore

use of org.apache.hadoop.hdds.utils.db.RDBStore in project ozone by apache.

the class MockOzoneServiceProvider method testGetAndApplyDeltaUpdatesFromOM.

@Test
public void testGetAndApplyDeltaUpdatesFromOM() throws Exception {
    // Writing 2 Keys into a source OM DB and collecting it in a
    // DBUpdatesWrapper.
    OMMetadataManager sourceOMMetadataMgr = initializeNewOmMetadataManager(temporaryFolder.newFolder());
    writeDataToOm(sourceOMMetadataMgr, "key_one");
    writeDataToOm(sourceOMMetadataMgr, "key_two");
    RocksDB rocksDB = ((RDBStore) sourceOMMetadataMgr.getStore()).getDb();
    TransactionLogIterator transactionLogIterator = rocksDB.getUpdatesSince(0L);
    DBUpdates dbUpdatesWrapper = new DBUpdates();
    while (transactionLogIterator.isValid()) {
        TransactionLogIterator.BatchResult result = transactionLogIterator.getBatch();
        result.writeBatch().markWalTerminationPoint();
        WriteBatch writeBatch = result.writeBatch();
        dbUpdatesWrapper.addWriteBatch(writeBatch.data(), result.sequenceNumber());
        transactionLogIterator.next();
    }
    // OM Service Provider's Metadata Manager.
    OMMetadataManager omMetadataManager = initializeNewOmMetadataManager(temporaryFolder.newFolder());
    OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = new OzoneManagerServiceProviderImpl(configuration, getTestReconOmMetadataManager(omMetadataManager, temporaryFolder.newFolder()), getMockTaskController(), new ReconUtils(), getMockOzoneManagerClient(dbUpdatesWrapper));
    OMDBUpdatesHandler updatesHandler = new OMDBUpdatesHandler(omMetadataManager);
    ozoneManagerServiceProvider.getAndApplyDeltaUpdatesFromOM(0L, updatesHandler);
    OzoneManagerSyncMetrics metrics = ozoneManagerServiceProvider.getMetrics();
    assertEquals(4.0, metrics.getAverageNumUpdatesInDeltaRequest().value(), 0.0);
    assertEquals(1, metrics.getNumNonZeroDeltaRequests().value());
    // In this method, we have to assert the "GET" path and the "APPLY" path.
    // Assert GET path --> verify if the OMDBUpdatesHandler picked up the 4
    // events ( 1 Vol PUT + 1 Bucket PUT + 2 Key PUTs).
    assertEquals(4, updatesHandler.getEvents().size());
    // Assert APPLY path --> Verify if the OM service provider's RocksDB got
    // the changes.
    String fullKey = omMetadataManager.getOzoneKey("sampleVol", "bucketOne", "key_one");
    assertTrue(ozoneManagerServiceProvider.getOMMetadataManagerInstance().getKeyTable(getBucketLayout()).isExist(fullKey));
    fullKey = omMetadataManager.getOzoneKey("sampleVol", "bucketOne", "key_two");
    assertTrue(ozoneManagerServiceProvider.getOMMetadataManagerInstance().getKeyTable(getBucketLayout()).isExist(fullKey));
}
Also used : RocksDB(org.rocksdb.RocksDB) ReconUtils(org.apache.hadoop.ozone.recon.ReconUtils) DBUpdates(org.apache.hadoop.ozone.om.helpers.DBUpdates) OzoneManagerSyncMetrics(org.apache.hadoop.ozone.recon.metrics.OzoneManagerSyncMetrics) ReconOMMetadataManager(org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) WriteBatch(org.rocksdb.WriteBatch) RDBStore(org.apache.hadoop.hdds.utils.db.RDBStore) TransactionLogIterator(org.rocksdb.TransactionLogIterator) OMDBUpdatesHandler(org.apache.hadoop.ozone.recon.tasks.OMDBUpdatesHandler) Test(org.junit.Test)

Example 4 with RDBStore

use of org.apache.hadoop.hdds.utils.db.RDBStore in project ozone by apache.

the class MockOzoneServiceProvider method testGetAndApplyDeltaUpdatesFromOMWithLimit.

@Test
public void testGetAndApplyDeltaUpdatesFromOMWithLimit() throws Exception {
    // Writing 2 Keys into a source OM DB and collecting it in a
    // DBUpdatesWrapper.
    OMMetadataManager sourceOMMetadataMgr = initializeNewOmMetadataManager(temporaryFolder.newFolder());
    writeDataToOm(sourceOMMetadataMgr, "key_one");
    writeDataToOm(sourceOMMetadataMgr, "key_two");
    RocksDB rocksDB = ((RDBStore) sourceOMMetadataMgr.getStore()).getDb();
    TransactionLogIterator transactionLogIterator = rocksDB.getUpdatesSince(0L);
    DBUpdates[] dbUpdatesWrapper = new DBUpdates[4];
    int index = 0;
    while (transactionLogIterator.isValid()) {
        TransactionLogIterator.BatchResult result = transactionLogIterator.getBatch();
        result.writeBatch().markWalTerminationPoint();
        WriteBatch writeBatch = result.writeBatch();
        dbUpdatesWrapper[index] = new DBUpdates();
        dbUpdatesWrapper[index].addWriteBatch(writeBatch.data(), result.sequenceNumber());
        index++;
        transactionLogIterator.next();
    }
    // OM Service Provider's Metadata Manager.
    OMMetadataManager omMetadataManager = initializeNewOmMetadataManager(temporaryFolder.newFolder());
    OzoneConfiguration withLimitConfiguration = new OzoneConfiguration(configuration);
    withLimitConfiguration.setLong(RECON_OM_DELTA_UPDATE_LIMIT, 1);
    withLimitConfiguration.setLong(RECON_OM_DELTA_UPDATE_LOOP_LIMIT, 3);
    OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = new OzoneManagerServiceProviderImpl(withLimitConfiguration, getTestReconOmMetadataManager(omMetadataManager, temporaryFolder.newFolder()), getMockTaskController(), new ReconUtils(), getMockOzoneManagerClientWith4Updates(dbUpdatesWrapper[0], dbUpdatesWrapper[1], dbUpdatesWrapper[2], dbUpdatesWrapper[3]));
    OMDBUpdatesHandler updatesHandler = new OMDBUpdatesHandler(omMetadataManager);
    ozoneManagerServiceProvider.getAndApplyDeltaUpdatesFromOM(0L, updatesHandler);
    OzoneManagerSyncMetrics metrics = ozoneManagerServiceProvider.getMetrics();
    assertEquals(1.0, metrics.getAverageNumUpdatesInDeltaRequest().value(), 0.0);
    assertEquals(3, metrics.getNumNonZeroDeltaRequests().value());
    // In this method, we have to assert the "GET" path and the "APPLY" path.
    // Assert GET path --> verify if the OMDBUpdatesHandler picked up the first
    // 3 of 4 events ( 1 Vol PUT + 1 Bucket PUT + 2 Key PUTs).
    assertEquals(3, updatesHandler.getEvents().size());
    // Assert APPLY path --> Verify if the OM service provider's RocksDB got
    // the first 3 changes, last change not applied.
    String fullKey = omMetadataManager.getOzoneKey("sampleVol", "bucketOne", "key_one");
    assertTrue(ozoneManagerServiceProvider.getOMMetadataManagerInstance().getKeyTable(getBucketLayout()).isExist(fullKey));
    fullKey = omMetadataManager.getOzoneKey("sampleVol", "bucketOne", "key_two");
    assertFalse(ozoneManagerServiceProvider.getOMMetadataManagerInstance().getKeyTable(getBucketLayout()).isExist(fullKey));
}
Also used : RocksDB(org.rocksdb.RocksDB) ReconUtils(org.apache.hadoop.ozone.recon.ReconUtils) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) TransactionLogIterator(org.rocksdb.TransactionLogIterator) DBCheckpoint(org.apache.hadoop.hdds.utils.db.DBCheckpoint) DBUpdates(org.apache.hadoop.ozone.om.helpers.DBUpdates) OzoneManagerSyncMetrics(org.apache.hadoop.ozone.recon.metrics.OzoneManagerSyncMetrics) ReconOMMetadataManager(org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) WriteBatch(org.rocksdb.WriteBatch) RDBStore(org.apache.hadoop.hdds.utils.db.RDBStore) OMDBUpdatesHandler(org.apache.hadoop.ozone.recon.tasks.OMDBUpdatesHandler) Test(org.junit.Test)

Example 5 with RDBStore

use of org.apache.hadoop.hdds.utils.db.RDBStore in project ozone by apache.

the class TestOMDBUpdatesHandler method testDelete.

@Test
public void testDelete() throws Exception {
    OzoneConfiguration configuration = createNewTestPath();
    OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(configuration);
    OzoneConfiguration conf2 = createNewTestPath();
    OmMetadataManagerImpl metaMgrCopy = new OmMetadataManagerImpl(conf2);
    // Write 1 volume, 1 key into source and target OM DBs.
    String volumeKey = metaMgr.getVolumeKey("sampleVol");
    String nonExistVolumeKey = metaMgr.getVolumeKey("nonExistingVolume");
    OmVolumeArgs args = OmVolumeArgs.newBuilder().setVolume("sampleVol").setAdminName("bilbo").setOwnerName("bilbo").build();
    metaMgr.getVolumeTable().put(volumeKey, args);
    metaMgrCopy.getVolumeTable().put(volumeKey, args);
    OmKeyInfo omKeyInfo = getOmKeyInfo("sampleVol", "bucketOne", "key_one");
    metaMgr.getKeyTable(getBucketLayout()).put("/sampleVol/bucketOne/key_one", omKeyInfo);
    metaMgrCopy.getKeyTable(getBucketLayout()).put("/sampleVol/bucketOne/key_one", omKeyInfo);
    // Delete the volume and key from target DB.
    metaMgr.getKeyTable(getBucketLayout()).delete("/sampleVol/bucketOne/key_one");
    metaMgr.getVolumeTable().delete(volumeKey);
    // Delete a non-existing volume and key
    metaMgr.getKeyTable(getBucketLayout()).delete("/sampleVol/bucketOne/key_two");
    metaMgr.getVolumeTable().delete(metaMgr.getVolumeKey("nonExistingVolume"));
    RDBStore rdbStore = (RDBStore) metaMgr.getStore();
    RocksDB rocksDB = rdbStore.getDb();
    TransactionLogIterator transactionLogIterator = rocksDB.getUpdatesSince(3);
    List<byte[]> writeBatches = new ArrayList<>();
    while (transactionLogIterator.isValid()) {
        TransactionLogIterator.BatchResult result = transactionLogIterator.getBatch();
        result.writeBatch().markWalTerminationPoint();
        WriteBatch writeBatch = result.writeBatch();
        writeBatches.add(writeBatch.data());
        transactionLogIterator.next();
    }
    // OMDBUpdatesHandler has access to target DB. So it has the volume and
    // key.
    OMDBUpdatesHandler omdbUpdatesHandler = new OMDBUpdatesHandler(metaMgrCopy);
    for (byte[] data : writeBatches) {
        WriteBatch writeBatch = new WriteBatch(data);
        writeBatch.iterate(omdbUpdatesHandler);
    }
    List<OMDBUpdateEvent> events = omdbUpdatesHandler.getEvents();
    assertEquals(4, events.size());
    OMDBUpdateEvent keyEvent = events.get(0);
    assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, keyEvent.getAction());
    assertEquals("/sampleVol/bucketOne/key_one", keyEvent.getKey());
    assertEquals(omKeyInfo, keyEvent.getValue());
    OMDBUpdateEvent volEvent = events.get(1);
    assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, volEvent.getAction());
    assertEquals(volumeKey, volEvent.getKey());
    assertNotNull(volEvent.getValue());
    OmVolumeArgs volumeInfo = (OmVolumeArgs) volEvent.getValue();
    assertEquals("sampleVol", volumeInfo.getVolume());
    // Assert the values of non existent keys are set to null.
    OMDBUpdateEvent nonExistKey = events.get(2);
    assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, nonExistKey.getAction());
    assertEquals("/sampleVol/bucketOne/key_two", nonExistKey.getKey());
    assertNull(nonExistKey.getValue());
    OMDBUpdateEvent nonExistVolume = events.get(3);
    assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, nonExistVolume.getAction());
    assertEquals(nonExistVolumeKey, nonExistVolume.getKey());
    assertNull(nonExistVolume.getValue());
}
Also used : RocksDB(org.rocksdb.RocksDB) OmVolumeArgs(org.apache.hadoop.ozone.om.helpers.OmVolumeArgs) ArrayList(java.util.ArrayList) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) TransactionLogIterator(org.rocksdb.TransactionLogIterator) OmMetadataManagerImpl(org.apache.hadoop.ozone.om.OmMetadataManagerImpl) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) WriteBatch(org.rocksdb.WriteBatch) RDBStore(org.apache.hadoop.hdds.utils.db.RDBStore) Test(org.junit.Test)

Aggregations

RDBStore (org.apache.hadoop.hdds.utils.db.RDBStore)8 Test (org.junit.Test)6 RocksDB (org.rocksdb.RocksDB)5 WriteBatch (org.rocksdb.WriteBatch)5 TransactionLogIterator (org.rocksdb.TransactionLogIterator)4 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)3 DBUpdates (org.apache.hadoop.ozone.om.helpers.DBUpdates)3 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)3 OzoneManagerSyncMetrics (org.apache.hadoop.ozone.recon.metrics.OzoneManagerSyncMetrics)3 ArrayList (java.util.ArrayList)2 DBCheckpoint (org.apache.hadoop.hdds.utils.db.DBCheckpoint)2 OMMetadataManager (org.apache.hadoop.ozone.om.OMMetadataManager)2 OmMetadataManagerImpl (org.apache.hadoop.ozone.om.OmMetadataManagerImpl)2 OmVolumeArgs (org.apache.hadoop.ozone.om.helpers.OmVolumeArgs)2 ReconUtils (org.apache.hadoop.ozone.recon.ReconUtils)2 ReconOMMetadataManager (org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager)2 OMDBUpdatesHandler (org.apache.hadoop.ozone.recon.tasks.OMDBUpdatesHandler)2 ArgumentMatchers.anyString (org.mockito.ArgumentMatchers.anyString)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 LinkedTreeMap (com.google.gson.internal.LinkedTreeMap)1