Search in sources :

Example 1 with DebugDurableDataLogWrapper

use of io.pravega.segmentstore.storage.DebugDurableDataLogWrapper in project pravega by pravega.

the class DurableDataLogRepairCommand method execute.

@Override
public void execute() throws Exception {
    ensureArgCount(1);
    int containerId = getIntArg(0);
    val bkConfig = getCommandArgs().getState().getConfigBuilder().include(BookKeeperConfig.builder().with(BookKeeperConfig.ZK_ADDRESS, getServiceConfig().getZkURL())).build().getConfig(BookKeeperConfig::builder);
    @Cleanup val zkClient = createZKClient();
    @Cleanup DurableDataLogFactory dataLogFactory = new BookKeeperLogFactory(bkConfig, zkClient, getCommandArgs().getState().getExecutor());
    dataLogFactory.initialize();
    // Open the Original Log in read-only mode.
    @Cleanup val originalDataLog = dataLogFactory.createDebugLogWrapper(containerId);
    // Check if the Original Log is disabled.
    if (originalDataLog.fetchMetadata().isEnabled()) {
        output("Original DurableLog is enabled. Repairs can only be done on disabled logs, exiting.");
        return;
    }
    // Make sure that the reserved id for Backup log is free before making any further progress.
    boolean createNewBackupLog = true;
    if (existsBackupLog(dataLogFactory)) {
        output("We found data in the Backup log, probably from a previous repair operation (or someone else running the same command at the same time). " + "You have three options: 1) Delete existing Backup Log and start a new repair process, " + "2) Keep existing Backup Log and re-use it for the current repair (i.e., skip creating a new Backup Log), " + "3) Quit.");
        switch(getIntUserInput("Select an option: [1|2|3]")) {
            case 1:
                // Delete everything related to the old Backup Log.
                try (DebugDurableDataLogWrapper backupDataLogDebugLogWrapper = dataLogFactory.createDebugLogWrapper(dataLogFactory.getBackupLogId())) {
                    backupDataLogDebugLogWrapper.deleteDurableLogMetadata();
                }
                break;
            case 2:
                // Keeping existing Backup Log, so not creating a new one.
                createNewBackupLog = false;
                break;
            default:
                output("Not doing anything with existing Backup Log this time.");
                return;
        }
    }
    // Create a new Backup Log if there wasn't any or if we removed the existing one.
    if (createNewBackupLog) {
        createBackupLog(dataLogFactory, containerId, originalDataLog);
    }
    int backupLogReadOperations = validateBackupLog(dataLogFactory, containerId, originalDataLog, createNewBackupLog);
    // Get user input of operations to skip, replace, or delete.
    List<LogEditOperation> durableLogEdits = getDurableLogEditsFromUser();
    // Show the edits to be committed to the original durable log so the user can confirm.
    output("The following edits will be used to edit the Original Log:");
    durableLogEdits.forEach(e -> output(e.toString()));
    output("Original DurableLog has been backed up correctly. Ready to apply admin-provided changes to the Original Log.");
    if (!confirmContinue()) {
        output("Not editing Original DurableLog this time. A Backup Log has been left during the process and you " + "will find it the next time this command gets executed.");
        return;
    }
    // Ensure that the Repair Log is going to start from a clean state.
    output("Deleting existing medatadata from Repair Log (if any)");
    try (val editedLogWrapper = dataLogFactory.createDebugLogWrapper(dataLogFactory.getRepairLogId())) {
        editedLogWrapper.deleteDurableLogMetadata();
    } catch (DurableDataLogException e) {
        if (e.getCause() instanceof KeeperException.NoNodeException) {
            output("Repair Log does not exist, so nothing to delete.");
        } else {
            outputError("Error happened while attempting to cleanup Repair Log metadata.");
            outputException(e);
        }
    }
    // that will write the edited contents into the Repair Log.
    try (DurableDataLog editedDataLog = dataLogFactory.createDurableDataLog(dataLogFactory.getRepairLogId());
        EditingLogProcessor logEditState = new EditingLogProcessor(editedDataLog, durableLogEdits, getCommandArgs().getState().getExecutor());
        DurableDataLog backupDataLog = dataLogFactory.createDebugLogWrapper(dataLogFactory.getBackupLogId()).asReadOnly()) {
        editedDataLog.initialize(TIMEOUT);
        readDurableDataLogWithCustomCallback(logEditState, dataLogFactory.getBackupLogId(), backupDataLog);
        Preconditions.checkState(!logEditState.isFailed);
        // After the edition has completed, we need to disable it before the metadata overwrite.
        editedDataLog.disable();
    } catch (Exception ex) {
        outputError("There have been errors while creating the edited version of the DurableLog.");
        outputException(ex);
        throw ex;
    }
    // Validate the contents of the newly created Repair Log.
    int editedDurableLogOperations = validateRepairLog(dataLogFactory, backupLogReadOperations, durableLogEdits);
    // Overwrite the original DurableLog metadata with the edited DurableLog metadata.
    try (val editedLogWrapper = dataLogFactory.createDebugLogWrapper(dataLogFactory.getRepairLogId())) {
        output("Original DurableLog Metadata: " + originalDataLog.fetchMetadata());
        output("Edited DurableLog Metadata: " + editedLogWrapper.fetchMetadata());
        originalDataLog.forceMetadataOverWrite(editedLogWrapper.fetchMetadata());
        output("New Original DurableLog Metadata (after replacement): " + originalDataLog.fetchMetadata());
    }
    // Read the edited contents that are now reachable from the original log id.
    try (val editedLogWrapper = dataLogFactory.createDebugLogWrapper(dataLogFactory.getRepairLogId())) {
        int finalEditedLogReadOps = readDurableDataLogWithCustomCallback((op, list) -> output("Original Log Operations after repair: " + op), containerId, editedLogWrapper.asReadOnly());
        output("Original DurableLog operations read (after editing): " + finalEditedLogReadOps);
        Preconditions.checkState(editedDurableLogOperations == finalEditedLogReadOps, "Repair Log operations not matching before (" + editedDurableLogOperations + ") and after the metadata overwrite (" + finalEditedLogReadOps + ")");
    } catch (Exception ex) {
        outputError("Problem reading Original DurableLog after editing.");
        outputException(ex);
    }
    output("Process completed successfully! (You still need to enable the Durable Log so Pravega can use it)");
}
Also used : lombok.val(lombok.val) DurableDataLog(io.pravega.segmentstore.storage.DurableDataLog) BookKeeperConfig(io.pravega.segmentstore.storage.impl.bookkeeper.BookKeeperConfig) DurableDataLogFactory(io.pravega.segmentstore.storage.DurableDataLogFactory) Cleanup(lombok.Cleanup) DataLogInitializationException(io.pravega.segmentstore.storage.DataLogInitializationException) DurableDataLogException(io.pravega.segmentstore.storage.DurableDataLogException) KeeperException(org.apache.zookeeper.KeeperException) IOException(java.io.IOException) DurableDataLogException(io.pravega.segmentstore.storage.DurableDataLogException) DebugDurableDataLogWrapper(io.pravega.segmentstore.storage.DebugDurableDataLogWrapper) BookKeeperLogFactory(io.pravega.segmentstore.storage.impl.bookkeeper.BookKeeperLogFactory) KeeperException(org.apache.zookeeper.KeeperException)

Example 2 with DebugDurableDataLogWrapper

use of io.pravega.segmentstore.storage.DebugDurableDataLogWrapper in project pravega by pravega.

the class DataRecoveryTest method testDurableLogRepairCommandExpectedLogOutput.

@Test
public void testDurableLogRepairCommandExpectedLogOutput() throws Exception {
    int instanceId = 0;
    int bookieCount = 3;
    int containerCount = 1;
    @Cleanup TestUtils.PravegaRunner pravegaRunner = new TestUtils.PravegaRunner(bookieCount, containerCount);
    pravegaRunner.startBookKeeperRunner(instanceId);
    val bkConfig = BookKeeperConfig.builder().with(BookKeeperConfig.ZK_ADDRESS, "localhost:" + pravegaRunner.getBookKeeperRunner().getBkPort()).with(BookKeeperConfig.BK_LEDGER_PATH, pravegaRunner.getBookKeeperRunner().getLedgerPath()).with(BookKeeperConfig.ZK_METADATA_PATH, pravegaRunner.getBookKeeperRunner().getLogMetaNamespace()).with(BookKeeperConfig.BK_ENSEMBLE_SIZE, 1).with(BookKeeperConfig.BK_WRITE_QUORUM_SIZE, 1).with(BookKeeperConfig.BK_ACK_QUORUM_SIZE, 1).build();
    this.factory = new BookKeeperLogFactory(bkConfig, pravegaRunner.getBookKeeperRunner().getZkClient().get(), this.executorService());
    pravegaRunner.startControllerAndSegmentStore(this.storageFactory, this.factory);
    String streamName = "testDataRecoveryCommand";
    TestUtils.createScopeStream(pravegaRunner.getControllerRunner().getController(), SCOPE, streamName, config);
    try (val clientRunner = new TestUtils.ClientRunner(pravegaRunner.getControllerRunner(), SCOPE)) {
        // Write events to the streams.
        TestUtils.writeEvents(streamName, clientRunner.getClientFactory());
    }
    // Shut down services, we assume that the cluster is in very bad shape in this test.
    pravegaRunner.shutDownControllerRunner();
    pravegaRunner.shutDownSegmentStoreRunner();
    // set Pravega properties for the test
    STATE.set(new AdminCommandState());
    Properties pravegaProperties = new Properties();
    pravegaProperties.setProperty("pravegaservice.container.count", "1");
    pravegaProperties.setProperty("pravegaservice.storage.impl.name", "FILESYSTEM");
    pravegaProperties.setProperty("pravegaservice.storage.layout", "ROLLING_STORAGE");
    pravegaProperties.setProperty("pravegaservice.zk.connect.uri", "localhost:" + pravegaRunner.getBookKeeperRunner().getBkPort());
    pravegaProperties.setProperty("bookkeeper.ledger.path", pravegaRunner.getBookKeeperRunner().getLedgerPath());
    pravegaProperties.setProperty("bookkeeper.zk.metadata.path", pravegaRunner.getBookKeeperRunner().getLogMetaNamespace());
    pravegaProperties.setProperty("pravegaservice.clusterName", "pravega0");
    pravegaProperties.setProperty("filesystem.root", this.baseDir.getAbsolutePath());
    STATE.get().getConfigBuilder().include(pravegaProperties);
    // Execute basic command workflow for repairing DurableLog.
    CommandArgs args = new CommandArgs(List.of("0"), STATE.get());
    DurableDataLogRepairCommand command = Mockito.spy(new DurableDataLogRepairCommand(args));
    this.factory = new BookKeeperLogFactory(bkConfig, pravegaRunner.getBookKeeperRunner().getZkClient().get(), this.executorService());
    this.factory.initialize();
    // First, keep all the Operations of Container 0 in this list, so we can compare with the modified one.
    List<Operation> originalOperations = new ArrayList<>();
    @Cleanup DebugDurableDataLogWrapper wrapper = this.factory.createDebugLogWrapper(0);
    command.readDurableDataLogWithCustomCallback((op, entry) -> originalOperations.add(op), 0, wrapper.asReadOnly());
    // Disable Original Log first.
    System.setIn(new ByteArrayInputStream("yes".getBytes()));
    TestUtils.executeCommand("bk disable 0", STATE.get());
    // Second, add 2 operations, delete 1 operation, replace 1 operation.
    Mockito.doReturn(true).doReturn(true).doReturn(false).doReturn(true).doReturn(true).doReturn(false).doReturn(false).doReturn(true).when(command).confirmContinue();
    Mockito.doReturn(900L).doReturn(901L).doReturn(901L).doReturn(1L).doReturn(123L).doReturn(2L).doReturn(123L).doReturn(903L).doReturn(3L).doReturn(123L).doReturn(905L).doReturn(4L).doReturn(123L).when(command).getLongUserInput(Mockito.any());
    Mockito.doReturn("delete").doReturn("add").doReturn("DeleteSegmentOperation").doReturn("DeleteSegmentOperation").doReturn("replace").doReturn("DeleteSegmentOperation").doReturn("add").doReturn("StreamSegmentSealOperation").when(command).getStringUserInput(Mockito.any());
    command.execute();
    List<Operation> originalOperationsEdited = new ArrayList<>();
    @Cleanup DebugDurableDataLogWrapper wrapperEdited = this.factory.createDebugLogWrapper(0);
    command.readDurableDataLogWithCustomCallback((op, entry) -> originalOperationsEdited.add(op), 0, wrapperEdited.asReadOnly());
    // OP-905 (now 907)
    for (int i = 899; i < 910; i++) {
        // Sequence numbers will defer between the original and edited logs. To do equality comparisons between
        // Operations in both logs, reset the sequence numbers (other fields should be the same).
        originalOperations.get(i).resetSequenceNumber(0);
        originalOperationsEdited.get(i).resetSequenceNumber(0);
    }
    Assert.assertNotEquals(originalOperations.get(899), originalOperationsEdited.get(899));
    Assert.assertTrue(originalOperationsEdited.get(899) instanceof DeleteSegmentOperation);
    Assert.assertTrue(originalOperationsEdited.get(900) instanceof DeleteSegmentOperation);
    Assert.assertEquals(originalOperations.get(900).toString(), originalOperationsEdited.get(901).toString());
    Assert.assertEquals(originalOperations.get(901).toString(), originalOperationsEdited.get(902).toString());
    Assert.assertTrue(originalOperationsEdited.get(903) instanceof DeleteSegmentOperation);
    Assert.assertEquals(originalOperations.get(903).toString(), originalOperationsEdited.get(904).toString());
    Assert.assertTrue(originalOperationsEdited.get(905) instanceof StreamSegmentSealOperation);
    Assert.assertEquals(originalOperations.get(904).toString(), originalOperationsEdited.get(906).toString());
    this.factory.close();
}
Also used : lombok.val(lombok.val) CommandArgs(io.pravega.cli.admin.CommandArgs) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) ArrayList(java.util.ArrayList) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentTruncateOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) UpdateAttributesOperation(io.pravega.segmentstore.server.logs.operations.UpdateAttributesOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) Properties(java.util.Properties) Cleanup(lombok.Cleanup) TestUtils(io.pravega.cli.admin.utils.TestUtils) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) DebugDurableDataLogWrapper(io.pravega.segmentstore.storage.DebugDurableDataLogWrapper) ByteArrayInputStream(java.io.ByteArrayInputStream) AdminCommandState(io.pravega.cli.admin.AdminCommandState) BookKeeperLogFactory(io.pravega.segmentstore.storage.impl.bookkeeper.BookKeeperLogFactory) Test(org.junit.Test)

Aggregations

DebugDurableDataLogWrapper (io.pravega.segmentstore.storage.DebugDurableDataLogWrapper)2 BookKeeperLogFactory (io.pravega.segmentstore.storage.impl.bookkeeper.BookKeeperLogFactory)2 Cleanup (lombok.Cleanup)2 lombok.val (lombok.val)2 AdminCommandState (io.pravega.cli.admin.AdminCommandState)1 CommandArgs (io.pravega.cli.admin.CommandArgs)1 TestUtils (io.pravega.cli.admin.utils.TestUtils)1 SegmentProperties (io.pravega.segmentstore.contracts.SegmentProperties)1 DeleteSegmentOperation (io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation)1 MergeSegmentOperation (io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation)1 MetadataCheckpointOperation (io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation)1 Operation (io.pravega.segmentstore.server.logs.operations.Operation)1 StorageMetadataCheckpointOperation (io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation)1 StreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation)1 StreamSegmentMapOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation)1 StreamSegmentSealOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation)1 StreamSegmentTruncateOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation)1 UpdateAttributesOperation (io.pravega.segmentstore.server.logs.operations.UpdateAttributesOperation)1 DataLogInitializationException (io.pravega.segmentstore.storage.DataLogInitializationException)1 DurableDataLog (io.pravega.segmentstore.storage.DurableDataLog)1