Search in sources :

Example 1 with CommandStatus

use of org.apache.hadoop.ozone.protocol.commands.CommandStatus in project ozone by apache.

the class TestReportPublisher method testCommandStatusPublisher.

@Test
public void testCommandStatusPublisher() throws InterruptedException {
    StateContext dummyContext = Mockito.mock(StateContext.class);
    ReportPublisher publisher = new CommandStatusReportPublisher();
    final Map<Long, CommandStatus> cmdStatusMap = new ConcurrentHashMap<>();
    when(dummyContext.getCommandStatusMap()).thenReturn(cmdStatusMap);
    publisher.setConf(config);
    ScheduledExecutorService executorService = HadoopExecutors.newScheduledThreadPool(1, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Unit test ReportManager Thread - %d").build());
    publisher.init(dummyContext, executorService);
    Assert.assertNull(((CommandStatusReportPublisher) publisher).getReport());
    // Insert to status object to state context map and then get the report.
    CommandStatus obj1 = CommandStatus.CommandStatusBuilder.newBuilder().setCmdId(HddsIdFactory.getLongId()).setType(Type.deleteBlocksCommand).setStatus(Status.PENDING).build();
    CommandStatus obj2 = CommandStatus.CommandStatusBuilder.newBuilder().setCmdId(HddsIdFactory.getLongId()).setType(Type.closeContainerCommand).setStatus(Status.EXECUTED).build();
    cmdStatusMap.put(obj1.getCmdId(), obj1);
    cmdStatusMap.put(obj2.getCmdId(), obj2);
    // We are not sending the commands whose status is PENDING.
    Assert.assertEquals("Should publish report with 2 status objects", 1, ((CommandStatusReportPublisher) publisher).getReport().getCmdStatusCount());
    executorService.shutdown();
}
Also used : ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) CommandStatus(org.apache.hadoop.ozone.protocol.commands.CommandStatus) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Test(org.junit.Test)

Example 2 with CommandStatus

use of org.apache.hadoop.ozone.protocol.commands.CommandStatus in project ozone by apache.

the class DeleteBlocksCommandHandler method processCmd.

private void processCmd(DeleteCmdInfo cmd) {
    LOG.debug("Processing block deletion command.");
    ContainerBlocksDeletionACKProto blockDeletionACK = null;
    long startTime = Time.monotonicNow();
    boolean cmdExecuted = false;
    try {
        // move blocks to deleting state.
        // this is a metadata update, the actual deletion happens in another
        // recycling thread.
        List<DeletedBlocksTransaction> containerBlocks = cmd.getCmd().blocksTobeDeleted();
        DeletedContainerBlocksSummary summary = DeletedContainerBlocksSummary.getFrom(containerBlocks);
        LOG.info("Start to delete container blocks, TXIDs={}, " + "numOfContainers={}, numOfBlocks={}", summary.getTxIDSummary(), summary.getNumOfContainers(), summary.getNumOfBlocks());
        ContainerBlocksDeletionACKProto.Builder resultBuilder = ContainerBlocksDeletionACKProto.newBuilder();
        List<Future> futures = new ArrayList<>();
        for (int i = 0; i < containerBlocks.size(); i++) {
            DeletedBlocksTransaction tx = containerBlocks.get(i);
            Future future = executor.submit(new ProcessTransactionTask(tx, resultBuilder));
            futures.add(future);
        }
        // Wait for tasks to finish
        futures.forEach(f -> {
            try {
                f.get();
            } catch (InterruptedException | ExecutionException e) {
                LOG.error("task failed.", e);
                Thread.currentThread().interrupt();
            }
        });
        resultBuilder.setDnId(cmd.getContext().getParent().getDatanodeDetails().getUuid().toString());
        blockDeletionACK = resultBuilder.build();
        // TODO Or we should wait until the blocks are actually deleted?
        if (!containerBlocks.isEmpty()) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Sending following block deletion ACK to SCM");
                for (DeleteBlockTransactionResult result : blockDeletionACK.getResultsList()) {
                    LOG.debug("{} : {}", result.getTxID(), result.getSuccess());
                }
            }
        }
        cmdExecuted = true;
    } finally {
        final ContainerBlocksDeletionACKProto deleteAck = blockDeletionACK;
        final boolean status = cmdExecuted;
        Consumer<CommandStatus> statusUpdater = (cmdStatus) -> {
            cmdStatus.setStatus(status);
            ((DeleteBlockCommandStatus) cmdStatus).setBlocksDeletionAck(deleteAck);
        };
        updateCommandStatus(cmd.getContext(), cmd.getCmd(), statusUpdater, LOG);
        long endTime = Time.monotonicNow();
        totalTime += endTime - startTime;
        invocationCount++;
    }
}
Also used : ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) DeletedContainerBlocksSummary(org.apache.hadoop.ozone.container.common.helpers.DeletedContainerBlocksSummary) ContainerBlocksDeletionACKProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto) BlockUtils(org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) LoggerFactory(org.slf4j.LoggerFactory) DeleteBlockCommandStatus(org.apache.hadoop.ozone.protocol.commands.DeleteBlockCommandStatus) ContainerSet(org.apache.hadoop.ozone.container.common.impl.ContainerSet) ArrayList(java.util.ArrayList) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) SCMCommandProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) DatanodeStore(org.apache.hadoop.ozone.container.metadata.DatanodeStore) Future(java.util.concurrent.Future) ConfigurationSource(org.apache.hadoop.hdds.conf.ConfigurationSource) DeleteBlocksCommand(org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand) ChunkInfoList(org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList) OzoneContainer(org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer) DeleteBlockTransactionResult(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult) DatanodeStoreSchemaTwoImpl(org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) ExecutorService(java.util.concurrent.ExecutorService) CONTAINER_NOT_FOUND(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_FOUND) BatchOperation(org.apache.hadoop.hdds.utils.db.BatchOperation) Logger(org.slf4j.Logger) IOException(java.io.IOException) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) DeletedBlocksTransaction(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction) OzoneConsts(org.apache.hadoop.ozone.OzoneConsts) Daemon(org.apache.hadoop.util.Daemon) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) List(java.util.List) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) Table(org.apache.hadoop.hdds.utils.db.Table) CommandStatus(org.apache.hadoop.ozone.protocol.commands.CommandStatus) Time(org.apache.hadoop.util.Time) SCMConnectionManager(org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager) SCMCommand(org.apache.hadoop.ozone.protocol.commands.SCMCommand) SCHEMA_V2(org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2) SCHEMA_V1(org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1) ArrayList(java.util.ArrayList) DeletedContainerBlocksSummary(org.apache.hadoop.ozone.container.common.helpers.DeletedContainerBlocksSummary) DeleteBlockTransactionResult(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult) ContainerBlocksDeletionACKProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto) Future(java.util.concurrent.Future) DeleteBlockCommandStatus(org.apache.hadoop.ozone.protocol.commands.DeleteBlockCommandStatus) CommandStatus(org.apache.hadoop.ozone.protocol.commands.CommandStatus) ExecutionException(java.util.concurrent.ExecutionException) DeletedBlocksTransaction(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction)

Example 3 with CommandStatus

use of org.apache.hadoop.ozone.protocol.commands.CommandStatus in project ozone by apache.

the class CommandStatusReportPublisher method getReport.

@Override
protected CommandStatusReportsProto getReport() {
    Map<Long, CommandStatus> map = this.getContext().getCommandStatusMap();
    Iterator<Long> iterator = map.keySet().iterator();
    CommandStatusReportsProto.Builder builder = CommandStatusReportsProto.newBuilder();
    iterator.forEachRemaining(key -> {
        CommandStatus cmdStatus = map.get(key);
        // CommandHandler will change its status when it works on this command.
        if (!cmdStatus.getStatus().equals(Status.PENDING)) {
            builder.addCmdStatus(cmdStatus.getProtoBufMessage());
            map.remove(key);
        }
    });
    return builder.getCmdStatusCount() > 0 ? builder.build() : null;
}
Also used : CommandStatusReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto) CommandStatus(org.apache.hadoop.ozone.protocol.commands.CommandStatus)

Example 4 with CommandStatus

use of org.apache.hadoop.ozone.protocol.commands.CommandStatus in project ozone by apache.

the class TestEndPoint method testHeartbeatWithCommandStatusReport.

@Test
public void testHeartbeatWithCommandStatusReport() throws Exception {
    DatanodeDetails dataNode = randomDatanodeDetails();
    try (EndpointStateMachine rpcEndPoint = createEndpoint(SCMTestUtils.getConf(), serverAddress, 1000)) {
        // Add some scmCommands for heartbeat response
        addScmCommands();
        SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder().setDatanodeDetails(dataNode.getProtoBufMessage()).setNodeReport(HddsTestUtils.createNodeReport(Arrays.asList(getStorageReports(dataNode.getUuid())), Arrays.asList(getMetadataStorageReports(dataNode.getUuid())))).build();
        SCMHeartbeatResponseProto responseProto = rpcEndPoint.getEndPoint().sendHeartbeat(request);
        assertNotNull(responseProto);
        assertEquals(3, responseProto.getCommandsCount());
        assertEquals(0, scmServerImpl.getCommandStatusReportCount());
        // Send heartbeat again from heartbeat endpoint task
        final StateContext stateContext = heartbeatTaskHelper(serverAddress, 3000);
        Map<Long, CommandStatus> map = stateContext.getCommandStatusMap();
        assertNotNull(map);
        assertEquals("Should have 1 objects", 1, map.size());
        assertTrue(map.containsKey(3L));
        assertEquals(Type.deleteBlocksCommand, map.get(3L).getType());
        assertEquals(Status.PENDING, map.get(3L).getStatus());
        scmServerImpl.clearScmCommandRequests();
    }
}
Also used : EndpointStateMachine(org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine) SCMHeartbeatRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) SCMHeartbeatResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) CommandStatus(org.apache.hadoop.ozone.protocol.commands.CommandStatus) Test(org.junit.Test)

Aggregations

CommandStatus (org.apache.hadoop.ozone.protocol.commands.CommandStatus)4 StateContext (org.apache.hadoop.ozone.container.common.statemachine.StateContext)3 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)2 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 List (java.util.List)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 ExecutionException (java.util.concurrent.ExecutionException)1 ExecutorService (java.util.concurrent.ExecutorService)1 Future (java.util.concurrent.Future)1 LinkedBlockingQueue (java.util.concurrent.LinkedBlockingQueue)1 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)1 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)1 TimeUnit (java.util.concurrent.TimeUnit)1 Consumer (java.util.function.Consumer)1 ConfigurationSource (org.apache.hadoop.hdds.conf.ConfigurationSource)1 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)1 MockDatanodeDetails.randomDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails)1 ContainerType (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType)1 CONTAINER_NOT_FOUND (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_FOUND)1