Search in sources :

Example 1 with Result

use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result in project ozone by apache.

the class HddsDispatcher method dispatchRequest.

@SuppressWarnings("methodlength")
private ContainerCommandResponseProto dispatchRequest(ContainerCommandRequestProto msg, DispatcherContext dispatcherContext) {
    Preconditions.checkNotNull(msg);
    if (LOG.isTraceEnabled()) {
        LOG.trace("Command {}, trace ID: {} ", msg.getCmdType(), msg.getTraceID());
    }
    AuditAction action = ContainerCommandRequestPBHelper.getAuditAction(msg.getCmdType());
    EventType eventType = getEventType(msg);
    Map<String, String> params = ContainerCommandRequestPBHelper.getAuditParams(msg);
    ContainerType containerType;
    ContainerCommandResponseProto responseProto = null;
    long startTime = System.currentTimeMillis();
    Type cmdType = msg.getCmdType();
    long containerID = msg.getContainerID();
    metrics.incContainerOpsMetrics(cmdType);
    Container container = getContainer(containerID);
    boolean isWriteStage = (cmdType == Type.WriteChunk && dispatcherContext != null && dispatcherContext.getStage() == DispatcherContext.WriteChunkStage.WRITE_DATA);
    boolean isWriteCommitStage = (cmdType == Type.WriteChunk && dispatcherContext != null && dispatcherContext.getStage() == DispatcherContext.WriteChunkStage.COMMIT_DATA);
    try {
        validateToken(msg);
    } catch (IOException ioe) {
        StorageContainerException sce = new StorageContainerException("Block token verification failed. " + ioe.getMessage(), ioe, ContainerProtos.Result.BLOCK_TOKEN_VERIFICATION_FAILED);
        return ContainerUtils.logAndReturnError(LOG, sce, msg);
    }
    // if the command gets executed other than Ratis, the default write stage
    // is WriteChunkStage.COMBINED
    boolean isCombinedStage = cmdType == Type.WriteChunk && (dispatcherContext == null || dispatcherContext.getStage() == DispatcherContext.WriteChunkStage.COMBINED);
    Map<Long, Long> container2BCSIDMap = null;
    if (dispatcherContext != null) {
        container2BCSIDMap = dispatcherContext.getContainer2BCSIDMap();
    }
    if (isWriteCommitStage) {
        // check if the container Id exist in the loaded snapshot file. if
        // it does not , it infers that , this is a restart of dn where
        // the we are reapplying the transaction which was not captured in the
        // snapshot.
        // just add it to the list, and remove it from missing container set
        // as it might have been added in the list during "init".
        Preconditions.checkNotNull(container2BCSIDMap);
        if (container != null && container2BCSIDMap.get(containerID) == null) {
            container2BCSIDMap.put(containerID, container.getBlockCommitSequenceId());
            getMissingContainerSet().remove(containerID);
        }
    }
    if (getMissingContainerSet().contains(containerID)) {
        StorageContainerException sce = new StorageContainerException("ContainerID " + containerID + " has been lost and and cannot be recreated on this DataNode", ContainerProtos.Result.CONTAINER_MISSING);
        audit(action, eventType, params, AuditEventStatus.FAILURE, sce);
        return ContainerUtils.logAndReturnError(LOG, sce, msg);
    }
    if (cmdType != Type.CreateContainer) {
        /**
         * Create Container should happen only as part of Write_Data phase of
         * writeChunk.
         */
        if (container == null && ((isWriteStage || isCombinedStage) || cmdType == Type.PutSmallFile)) {
            // If container does not exist, create one for WriteChunk and
            // PutSmallFile request
            responseProto = createContainer(msg);
            if (responseProto.getResult() != Result.SUCCESS) {
                StorageContainerException sce = new StorageContainerException("ContainerID " + containerID + " creation failed", responseProto.getResult());
                audit(action, eventType, params, AuditEventStatus.FAILURE, sce);
                return ContainerUtils.logAndReturnError(LOG, sce, msg);
            }
            Preconditions.checkArgument(isWriteStage && container2BCSIDMap != null || dispatcherContext == null);
            if (container2BCSIDMap != null) {
                // adds this container to list of containers created in the pipeline
                // with initial BCSID recorded as 0.
                container2BCSIDMap.putIfAbsent(containerID, 0L);
            }
            container = getContainer(containerID);
        }
        // if container not found return error
        if (container == null) {
            StorageContainerException sce = new StorageContainerException("ContainerID " + containerID + " does not exist", ContainerProtos.Result.CONTAINER_NOT_FOUND);
            audit(action, eventType, params, AuditEventStatus.FAILURE, sce);
            return ContainerUtils.logAndReturnError(LOG, sce, msg);
        }
        containerType = getContainerType(container);
    } else {
        if (!msg.hasCreateContainer()) {
            audit(action, eventType, params, AuditEventStatus.FAILURE, new Exception("MALFORMED_REQUEST"));
            return malformedRequest(msg);
        }
        containerType = msg.getCreateContainer().getContainerType();
    }
    // write before trying to send CloseContainerAction.
    if (!HddsUtils.isReadOnly(msg)) {
        sendCloseContainerActionIfNeeded(container);
    }
    Handler handler = getHandler(containerType);
    if (handler == null) {
        StorageContainerException ex = new StorageContainerException("Invalid " + "ContainerType " + containerType, ContainerProtos.Result.CONTAINER_INTERNAL_ERROR);
        // log failure
        audit(action, eventType, params, AuditEventStatus.FAILURE, ex);
        return ContainerUtils.logAndReturnError(LOG, ex, msg);
    }
    responseProto = handler.handle(msg, container, dispatcherContext);
    if (responseProto != null) {
        metrics.incContainerOpsLatencies(cmdType, System.currentTimeMillis() - startTime);
        // If the request is of Write Type and the container operation
        // is unsuccessful, it implies the applyTransaction on the container
        // failed. All subsequent transactions on the container should fail and
        // hence replica will be marked unhealthy here. In this case, a close
        // container action will be sent to SCM to close the container.
        // ApplyTransaction called on closed Container will fail with Closed
        // container exception. In such cases, ignore the exception here
        // If the container is already marked unhealthy, no need to change the
        // state here.
        Result result = responseProto.getResult();
        if (cmdType == Type.CreateContainer && result == Result.SUCCESS && dispatcherContext != null) {
            Preconditions.checkNotNull(dispatcherContext.getContainer2BCSIDMap());
            container2BCSIDMap.putIfAbsent(containerID, Long.valueOf(0));
        }
        if (!HddsUtils.isReadOnly(msg) && !canIgnoreException(result)) {
            if (container == null) {
                throw new NullPointerException("Error on creating containers " + result + " " + responseProto.getMessage());
            }
            // For container to be moved to unhealthy state here, the container can
            // only be in open or closing state.
            State containerState = container.getContainerData().getState();
            Preconditions.checkState(containerState == State.OPEN || containerState == State.CLOSING);
            // mark and persist the container state to be unhealthy
            try {
                handler.markContainerUnhealthy(container);
                LOG.info("Marked Container UNHEALTHY, ContainerID: {}", containerID);
            } catch (IOException ioe) {
                // just log the error here in case marking the container fails,
                // Return the actual failure response to the client
                LOG.error("Failed to mark container " + containerID + " UNHEALTHY. ", ioe);
            }
            // in any case, the in memory state of the container should be unhealthy
            Preconditions.checkArgument(container.getContainerData().getState() == State.UNHEALTHY);
            sendCloseContainerActionIfNeeded(container);
        }
        if (result == Result.SUCCESS) {
            updateBCSID(container, dispatcherContext, cmdType);
            audit(action, eventType, params, AuditEventStatus.SUCCESS, null);
        } else {
            audit(action, eventType, params, AuditEventStatus.FAILURE, new Exception(responseProto.getMessage()));
        }
        return responseProto;
    } else {
        // log failure
        audit(action, eventType, params, AuditEventStatus.FAILURE, new Exception("UNSUPPORTED_REQUEST"));
        return unsupportedRequest(msg);
    }
}
Also used : ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) IOException(java.io.IOException) InvalidContainerStateException(org.apache.hadoop.hdds.scm.container.common.helpers.InvalidContainerStateException) ServiceException(com.google.protobuf.ServiceException) ContainerNotOpenException(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException) IOException(java.io.IOException) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) Result(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result) AuditAction(org.apache.hadoop.ozone.audit.AuditAction) AuditLoggerType(org.apache.hadoop.ozone.audit.AuditLoggerType) Type(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type) ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) State(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)

Aggregations

ServiceException (com.google.protobuf.ServiceException)1 IOException (java.io.IOException)1 ContainerCommandResponseProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)1 State (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State)1 ContainerType (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType)1 Result (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result)1 Type (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type)1 ContainerNotOpenException (org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException)1 InvalidContainerStateException (org.apache.hadoop.hdds.scm.container.common.helpers.InvalidContainerStateException)1 StorageContainerException (org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)1 AuditAction (org.apache.hadoop.ozone.audit.AuditAction)1 AuditLoggerType (org.apache.hadoop.ozone.audit.AuditLoggerType)1 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)1 Handler (org.apache.hadoop.ozone.container.common.interfaces.Handler)1