Search in sources :

Example 1 with ContainerType

use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType in project ozone by apache.

the class HddsDispatcher method dispatchRequest.

@SuppressWarnings("methodlength")
private ContainerCommandResponseProto dispatchRequest(ContainerCommandRequestProto msg, DispatcherContext dispatcherContext) {
    Preconditions.checkNotNull(msg);
    if (LOG.isTraceEnabled()) {
        LOG.trace("Command {}, trace ID: {} ", msg.getCmdType(), msg.getTraceID());
    }
    AuditAction action = ContainerCommandRequestPBHelper.getAuditAction(msg.getCmdType());
    EventType eventType = getEventType(msg);
    Map<String, String> params = ContainerCommandRequestPBHelper.getAuditParams(msg);
    ContainerType containerType;
    ContainerCommandResponseProto responseProto = null;
    long startTime = System.currentTimeMillis();
    Type cmdType = msg.getCmdType();
    long containerID = msg.getContainerID();
    metrics.incContainerOpsMetrics(cmdType);
    Container container = getContainer(containerID);
    boolean isWriteStage = (cmdType == Type.WriteChunk && dispatcherContext != null && dispatcherContext.getStage() == DispatcherContext.WriteChunkStage.WRITE_DATA);
    boolean isWriteCommitStage = (cmdType == Type.WriteChunk && dispatcherContext != null && dispatcherContext.getStage() == DispatcherContext.WriteChunkStage.COMMIT_DATA);
    try {
        validateToken(msg);
    } catch (IOException ioe) {
        StorageContainerException sce = new StorageContainerException("Block token verification failed. " + ioe.getMessage(), ioe, ContainerProtos.Result.BLOCK_TOKEN_VERIFICATION_FAILED);
        return ContainerUtils.logAndReturnError(LOG, sce, msg);
    }
    // if the command gets executed other than Ratis, the default write stage
    // is WriteChunkStage.COMBINED
    boolean isCombinedStage = cmdType == Type.WriteChunk && (dispatcherContext == null || dispatcherContext.getStage() == DispatcherContext.WriteChunkStage.COMBINED);
    Map<Long, Long> container2BCSIDMap = null;
    if (dispatcherContext != null) {
        container2BCSIDMap = dispatcherContext.getContainer2BCSIDMap();
    }
    if (isWriteCommitStage) {
        // check if the container Id exist in the loaded snapshot file. if
        // it does not , it infers that , this is a restart of dn where
        // the we are reapplying the transaction which was not captured in the
        // snapshot.
        // just add it to the list, and remove it from missing container set
        // as it might have been added in the list during "init".
        Preconditions.checkNotNull(container2BCSIDMap);
        if (container != null && container2BCSIDMap.get(containerID) == null) {
            container2BCSIDMap.put(containerID, container.getBlockCommitSequenceId());
            getMissingContainerSet().remove(containerID);
        }
    }
    if (getMissingContainerSet().contains(containerID)) {
        StorageContainerException sce = new StorageContainerException("ContainerID " + containerID + " has been lost and and cannot be recreated on this DataNode", ContainerProtos.Result.CONTAINER_MISSING);
        audit(action, eventType, params, AuditEventStatus.FAILURE, sce);
        return ContainerUtils.logAndReturnError(LOG, sce, msg);
    }
    if (cmdType != Type.CreateContainer) {
        /**
         * Create Container should happen only as part of Write_Data phase of
         * writeChunk.
         */
        if (container == null && ((isWriteStage || isCombinedStage) || cmdType == Type.PutSmallFile)) {
            // If container does not exist, create one for WriteChunk and
            // PutSmallFile request
            responseProto = createContainer(msg);
            if (responseProto.getResult() != Result.SUCCESS) {
                StorageContainerException sce = new StorageContainerException("ContainerID " + containerID + " creation failed", responseProto.getResult());
                audit(action, eventType, params, AuditEventStatus.FAILURE, sce);
                return ContainerUtils.logAndReturnError(LOG, sce, msg);
            }
            Preconditions.checkArgument(isWriteStage && container2BCSIDMap != null || dispatcherContext == null);
            if (container2BCSIDMap != null) {
                // adds this container to list of containers created in the pipeline
                // with initial BCSID recorded as 0.
                container2BCSIDMap.putIfAbsent(containerID, 0L);
            }
            container = getContainer(containerID);
        }
        // if container not found return error
        if (container == null) {
            StorageContainerException sce = new StorageContainerException("ContainerID " + containerID + " does not exist", ContainerProtos.Result.CONTAINER_NOT_FOUND);
            audit(action, eventType, params, AuditEventStatus.FAILURE, sce);
            return ContainerUtils.logAndReturnError(LOG, sce, msg);
        }
        containerType = getContainerType(container);
    } else {
        if (!msg.hasCreateContainer()) {
            audit(action, eventType, params, AuditEventStatus.FAILURE, new Exception("MALFORMED_REQUEST"));
            return malformedRequest(msg);
        }
        containerType = msg.getCreateContainer().getContainerType();
    }
    // write before trying to send CloseContainerAction.
    if (!HddsUtils.isReadOnly(msg)) {
        sendCloseContainerActionIfNeeded(container);
    }
    Handler handler = getHandler(containerType);
    if (handler == null) {
        StorageContainerException ex = new StorageContainerException("Invalid " + "ContainerType " + containerType, ContainerProtos.Result.CONTAINER_INTERNAL_ERROR);
        // log failure
        audit(action, eventType, params, AuditEventStatus.FAILURE, ex);
        return ContainerUtils.logAndReturnError(LOG, ex, msg);
    }
    responseProto = handler.handle(msg, container, dispatcherContext);
    if (responseProto != null) {
        metrics.incContainerOpsLatencies(cmdType, System.currentTimeMillis() - startTime);
        // If the request is of Write Type and the container operation
        // is unsuccessful, it implies the applyTransaction on the container
        // failed. All subsequent transactions on the container should fail and
        // hence replica will be marked unhealthy here. In this case, a close
        // container action will be sent to SCM to close the container.
        // ApplyTransaction called on closed Container will fail with Closed
        // container exception. In such cases, ignore the exception here
        // If the container is already marked unhealthy, no need to change the
        // state here.
        Result result = responseProto.getResult();
        if (cmdType == Type.CreateContainer && result == Result.SUCCESS && dispatcherContext != null) {
            Preconditions.checkNotNull(dispatcherContext.getContainer2BCSIDMap());
            container2BCSIDMap.putIfAbsent(containerID, Long.valueOf(0));
        }
        if (!HddsUtils.isReadOnly(msg) && !canIgnoreException(result)) {
            if (container == null) {
                throw new NullPointerException("Error on creating containers " + result + " " + responseProto.getMessage());
            }
            // For container to be moved to unhealthy state here, the container can
            // only be in open or closing state.
            State containerState = container.getContainerData().getState();
            Preconditions.checkState(containerState == State.OPEN || containerState == State.CLOSING);
            // mark and persist the container state to be unhealthy
            try {
                handler.markContainerUnhealthy(container);
                LOG.info("Marked Container UNHEALTHY, ContainerID: {}", containerID);
            } catch (IOException ioe) {
                // just log the error here in case marking the container fails,
                // Return the actual failure response to the client
                LOG.error("Failed to mark container " + containerID + " UNHEALTHY. ", ioe);
            }
            // in any case, the in memory state of the container should be unhealthy
            Preconditions.checkArgument(container.getContainerData().getState() == State.UNHEALTHY);
            sendCloseContainerActionIfNeeded(container);
        }
        if (result == Result.SUCCESS) {
            updateBCSID(container, dispatcherContext, cmdType);
            audit(action, eventType, params, AuditEventStatus.SUCCESS, null);
        } else {
            audit(action, eventType, params, AuditEventStatus.FAILURE, new Exception(responseProto.getMessage()));
        }
        return responseProto;
    } else {
        // log failure
        audit(action, eventType, params, AuditEventStatus.FAILURE, new Exception("UNSUPPORTED_REQUEST"));
        return unsupportedRequest(msg);
    }
}
Also used : ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) IOException(java.io.IOException) InvalidContainerStateException(org.apache.hadoop.hdds.scm.container.common.helpers.InvalidContainerStateException) ServiceException(com.google.protobuf.ServiceException) ContainerNotOpenException(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException) IOException(java.io.IOException) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) Result(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result) AuditAction(org.apache.hadoop.ozone.audit.AuditAction) AuditLoggerType(org.apache.hadoop.ozone.audit.AuditLoggerType) Type(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type) ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) State(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)

Example 2 with ContainerType

use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType in project ozone by apache.

the class TestHddsDispatcher method testContainerCloseActionWhenFull.

@Test
public void testContainerCloseActionWhenFull() throws IOException {
    String testDir = GenericTestUtils.getTempPath(TestHddsDispatcher.class.getSimpleName());
    OzoneConfiguration conf = new OzoneConfiguration();
    conf.set(HDDS_DATANODE_DIR_KEY, testDir);
    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
    DatanodeDetails dd = randomDatanodeDetails();
    MutableVolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
    try {
        UUID scmId = UUID.randomUUID();
        ContainerSet containerSet = new ContainerSet();
        DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
        StateContext context = Mockito.mock(StateContext.class);
        Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
        Mockito.when(context.getParent()).thenReturn(stateMachine);
        KeyValueContainerData containerData = new KeyValueContainerData(1L, layout, (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), dd.getUuidString());
        Container container = new KeyValueContainer(containerData, conf);
        container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString());
        containerSet.addContainer(container);
        ContainerMetrics metrics = ContainerMetrics.create(conf);
        Map<ContainerType, Handler> handlers = Maps.newHashMap();
        for (ContainerType containerType : ContainerType.values()) {
            handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, NO_OP_ICR_SENDER));
        }
        HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
        hddsDispatcher.setClusterId(scmId.toString());
        ContainerCommandResponseProto responseOne = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseOne.getResult());
        verify(context, times(0)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
        containerData.setBytesUsed(Double.valueOf(StorageUnit.MB.toBytes(950)).longValue());
        ContainerCommandResponseProto responseTwo = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 2L), null);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseTwo.getResult());
        verify(context, times(1)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
    } finally {
        volumeSet.shutdown();
        ContainerMetrics.remove();
        FileUtils.deleteDirectory(new File(testDir));
    }
}
Also used : RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) ContainerAction(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerAction) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) UUID(java.util.UUID) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) File(java.io.File) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Test(org.junit.Test)

Example 3 with ContainerType

use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType in project ozone by apache.

the class TestHddsDispatcher method createDispatcher.

/**
 * Creates HddsDispatcher instance with given infos.
 * @param dd datanode detail info.
 * @param scmId UUID of scm id.
 * @param conf configuration be used.
 * @return HddsDispatcher HddsDispatcher instance.
 * @throws IOException
 */
private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException {
    ContainerSet containerSet = new ContainerSet();
    VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
    DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
    StateContext context = Mockito.mock(StateContext.class);
    Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
    Mockito.when(context.getParent()).thenReturn(stateMachine);
    ContainerMetrics metrics = ContainerMetrics.create(conf);
    Map<ContainerType, Handler> handlers = Maps.newHashMap();
    for (ContainerType containerType : ContainerType.values()) {
        handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, NO_OP_ICR_SENDER));
    }
    HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
    hddsDispatcher.setClusterId(scmId.toString());
    return hddsDispatcher;
}
Also used : ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) VolumeSet(org.apache.hadoop.ozone.container.common.volume.VolumeSet)

Example 4 with ContainerType

use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType in project ozone by apache.

the class TestKeyValueHandler method setup.

@Before
public void setup() throws StorageContainerException {
    // Create mock HddsDispatcher and KeyValueHandler.
    handler = Mockito.mock(KeyValueHandler.class);
    HashMap<ContainerType, Handler> handlers = new HashMap<>();
    handlers.put(ContainerType.KeyValueContainer, handler);
    dispatcher = new HddsDispatcher(new OzoneConfiguration(), Mockito.mock(ContainerSet.class), Mockito.mock(VolumeSet.class), handlers, Mockito.mock(StateContext.class), Mockito.mock(ContainerMetrics.class), Mockito.mock(TokenVerifier.class));
}
Also used : HashMap(java.util.HashMap) ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) HddsDispatcher(org.apache.hadoop.ozone.container.common.impl.HddsDispatcher) Before(org.junit.Before)

Example 5 with ContainerType

use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType in project ozone by apache.

the class ClosedContainerReplicator method initializeReplicationSupervisor.

@NotNull
private void initializeReplicationSupervisor(ConfigurationSource conf) throws IOException {
    String fakeDatanodeUuid = datanode;
    if (fakeDatanodeUuid.isEmpty()) {
        fakeDatanodeUuid = UUID.randomUUID().toString();
    }
    ContainerSet containerSet = new ContainerSet();
    ContainerMetrics metrics = ContainerMetrics.create(conf);
    MutableVolumeSet volumeSet = new MutableVolumeSet(fakeDatanodeUuid, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
    Map<ContainerType, Handler> handlers = new HashMap<>();
    for (ContainerType containerType : ContainerType.values()) {
        final Handler handler = Handler.getHandlerForContainerType(containerType, conf, fakeDatanodeUuid, containerSet, volumeSet, metrics, containerReplicaProto -> {
        });
        handler.setClusterID(UUID.randomUUID().toString());
        handlers.put(containerType, handler);
    }
    ContainerController controller = new ContainerController(containerSet, handlers);
    ContainerReplicator replicator = new DownloadAndImportReplicator(containerSet, controller, new SimpleContainerDownloader(conf, null), new TarContainerPacker());
    supervisor = new ReplicationSupervisor(containerSet, replicator, 10);
}
Also used : ReplicationSupervisor(org.apache.hadoop.ozone.container.replication.ReplicationSupervisor) HashMap(java.util.HashMap) DownloadAndImportReplicator(org.apache.hadoop.ozone.container.replication.DownloadAndImportReplicator) ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) ContainerController(org.apache.hadoop.ozone.container.ozoneimpl.ContainerController) SimpleContainerDownloader(org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader) TarContainerPacker(org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker) ContainerSet(org.apache.hadoop.ozone.container.common.impl.ContainerSet) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) ContainerReplicator(org.apache.hadoop.ozone.container.replication.ContainerReplicator) NotNull(org.jetbrains.annotations.NotNull)

Aggregations

ContainerType (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType)7 Handler (org.apache.hadoop.ozone.container.common.interfaces.Handler)7 ContainerMetrics (org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics)3 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)3 MutableVolumeSet (org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet)3 IOException (java.io.IOException)2 HashMap (java.util.HashMap)2 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)2 ContainerCommandResponseProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)2 State (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State)2 Type (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type)2 ContainerNotOpenException (org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException)2 InvalidContainerStateException (org.apache.hadoop.hdds.scm.container.common.helpers.InvalidContainerStateException)2 StorageContainerException (org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)2 AuditAction (org.apache.hadoop.ozone.audit.AuditAction)2 AuditLoggerType (org.apache.hadoop.ozone.audit.AuditLoggerType)2 DatanodeStateMachine (org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine)2 StateContext (org.apache.hadoop.ozone.container.common.statemachine.StateContext)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 ServiceException (com.google.protobuf.ServiceException)1