Search in sources :

Example 1 with Type

use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type in project ozone by apache.

the class HddsDispatcher method dispatchRequest.

@SuppressWarnings("methodlength")
private ContainerCommandResponseProto dispatchRequest(ContainerCommandRequestProto msg, DispatcherContext dispatcherContext) {
    Preconditions.checkNotNull(msg);
    if (LOG.isTraceEnabled()) {
        LOG.trace("Command {}, trace ID: {} ", msg.getCmdType(), msg.getTraceID());
    }
    AuditAction action = ContainerCommandRequestPBHelper.getAuditAction(msg.getCmdType());
    EventType eventType = getEventType(msg);
    Map<String, String> params = ContainerCommandRequestPBHelper.getAuditParams(msg);
    ContainerType containerType;
    ContainerCommandResponseProto responseProto = null;
    long startTime = System.currentTimeMillis();
    Type cmdType = msg.getCmdType();
    long containerID = msg.getContainerID();
    metrics.incContainerOpsMetrics(cmdType);
    Container container = getContainer(containerID);
    boolean isWriteStage = (cmdType == Type.WriteChunk && dispatcherContext != null && dispatcherContext.getStage() == DispatcherContext.WriteChunkStage.WRITE_DATA);
    boolean isWriteCommitStage = (cmdType == Type.WriteChunk && dispatcherContext != null && dispatcherContext.getStage() == DispatcherContext.WriteChunkStage.COMMIT_DATA);
    try {
        validateToken(msg);
    } catch (IOException ioe) {
        StorageContainerException sce = new StorageContainerException("Block token verification failed. " + ioe.getMessage(), ioe, ContainerProtos.Result.BLOCK_TOKEN_VERIFICATION_FAILED);
        return ContainerUtils.logAndReturnError(LOG, sce, msg);
    }
    // if the command gets executed other than Ratis, the default write stage
    // is WriteChunkStage.COMBINED
    boolean isCombinedStage = cmdType == Type.WriteChunk && (dispatcherContext == null || dispatcherContext.getStage() == DispatcherContext.WriteChunkStage.COMBINED);
    Map<Long, Long> container2BCSIDMap = null;
    if (dispatcherContext != null) {
        container2BCSIDMap = dispatcherContext.getContainer2BCSIDMap();
    }
    if (isWriteCommitStage) {
        // check if the container Id exist in the loaded snapshot file. if
        // it does not , it infers that , this is a restart of dn where
        // the we are reapplying the transaction which was not captured in the
        // snapshot.
        // just add it to the list, and remove it from missing container set
        // as it might have been added in the list during "init".
        Preconditions.checkNotNull(container2BCSIDMap);
        if (container != null && container2BCSIDMap.get(containerID) == null) {
            container2BCSIDMap.put(containerID, container.getBlockCommitSequenceId());
            getMissingContainerSet().remove(containerID);
        }
    }
    if (getMissingContainerSet().contains(containerID)) {
        StorageContainerException sce = new StorageContainerException("ContainerID " + containerID + " has been lost and and cannot be recreated on this DataNode", ContainerProtos.Result.CONTAINER_MISSING);
        audit(action, eventType, params, AuditEventStatus.FAILURE, sce);
        return ContainerUtils.logAndReturnError(LOG, sce, msg);
    }
    if (cmdType != Type.CreateContainer) {
        /**
         * Create Container should happen only as part of Write_Data phase of
         * writeChunk.
         */
        if (container == null && ((isWriteStage || isCombinedStage) || cmdType == Type.PutSmallFile)) {
            // If container does not exist, create one for WriteChunk and
            // PutSmallFile request
            responseProto = createContainer(msg);
            if (responseProto.getResult() != Result.SUCCESS) {
                StorageContainerException sce = new StorageContainerException("ContainerID " + containerID + " creation failed", responseProto.getResult());
                audit(action, eventType, params, AuditEventStatus.FAILURE, sce);
                return ContainerUtils.logAndReturnError(LOG, sce, msg);
            }
            Preconditions.checkArgument(isWriteStage && container2BCSIDMap != null || dispatcherContext == null);
            if (container2BCSIDMap != null) {
                // adds this container to list of containers created in the pipeline
                // with initial BCSID recorded as 0.
                container2BCSIDMap.putIfAbsent(containerID, 0L);
            }
            container = getContainer(containerID);
        }
        // if container not found return error
        if (container == null) {
            StorageContainerException sce = new StorageContainerException("ContainerID " + containerID + " does not exist", ContainerProtos.Result.CONTAINER_NOT_FOUND);
            audit(action, eventType, params, AuditEventStatus.FAILURE, sce);
            return ContainerUtils.logAndReturnError(LOG, sce, msg);
        }
        containerType = getContainerType(container);
    } else {
        if (!msg.hasCreateContainer()) {
            audit(action, eventType, params, AuditEventStatus.FAILURE, new Exception("MALFORMED_REQUEST"));
            return malformedRequest(msg);
        }
        containerType = msg.getCreateContainer().getContainerType();
    }
    // write before trying to send CloseContainerAction.
    if (!HddsUtils.isReadOnly(msg)) {
        sendCloseContainerActionIfNeeded(container);
    }
    Handler handler = getHandler(containerType);
    if (handler == null) {
        StorageContainerException ex = new StorageContainerException("Invalid " + "ContainerType " + containerType, ContainerProtos.Result.CONTAINER_INTERNAL_ERROR);
        // log failure
        audit(action, eventType, params, AuditEventStatus.FAILURE, ex);
        return ContainerUtils.logAndReturnError(LOG, ex, msg);
    }
    responseProto = handler.handle(msg, container, dispatcherContext);
    if (responseProto != null) {
        metrics.incContainerOpsLatencies(cmdType, System.currentTimeMillis() - startTime);
        // If the request is of Write Type and the container operation
        // is unsuccessful, it implies the applyTransaction on the container
        // failed. All subsequent transactions on the container should fail and
        // hence replica will be marked unhealthy here. In this case, a close
        // container action will be sent to SCM to close the container.
        // ApplyTransaction called on closed Container will fail with Closed
        // container exception. In such cases, ignore the exception here
        // If the container is already marked unhealthy, no need to change the
        // state here.
        Result result = responseProto.getResult();
        if (cmdType == Type.CreateContainer && result == Result.SUCCESS && dispatcherContext != null) {
            Preconditions.checkNotNull(dispatcherContext.getContainer2BCSIDMap());
            container2BCSIDMap.putIfAbsent(containerID, Long.valueOf(0));
        }
        if (!HddsUtils.isReadOnly(msg) && !canIgnoreException(result)) {
            if (container == null) {
                throw new NullPointerException("Error on creating containers " + result + " " + responseProto.getMessage());
            }
            // For container to be moved to unhealthy state here, the container can
            // only be in open or closing state.
            State containerState = container.getContainerData().getState();
            Preconditions.checkState(containerState == State.OPEN || containerState == State.CLOSING);
            // mark and persist the container state to be unhealthy
            try {
                handler.markContainerUnhealthy(container);
                LOG.info("Marked Container UNHEALTHY, ContainerID: {}", containerID);
            } catch (IOException ioe) {
                // just log the error here in case marking the container fails,
                // Return the actual failure response to the client
                LOG.error("Failed to mark container " + containerID + " UNHEALTHY. ", ioe);
            }
            // in any case, the in memory state of the container should be unhealthy
            Preconditions.checkArgument(container.getContainerData().getState() == State.UNHEALTHY);
            sendCloseContainerActionIfNeeded(container);
        }
        if (result == Result.SUCCESS) {
            updateBCSID(container, dispatcherContext, cmdType);
            audit(action, eventType, params, AuditEventStatus.SUCCESS, null);
        } else {
            audit(action, eventType, params, AuditEventStatus.FAILURE, new Exception(responseProto.getMessage()));
        }
        return responseProto;
    } else {
        // log failure
        audit(action, eventType, params, AuditEventStatus.FAILURE, new Exception("UNSUPPORTED_REQUEST"));
        return unsupportedRequest(msg);
    }
}
Also used : ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) IOException(java.io.IOException) InvalidContainerStateException(org.apache.hadoop.hdds.scm.container.common.helpers.InvalidContainerStateException) ServiceException(com.google.protobuf.ServiceException) ContainerNotOpenException(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException) IOException(java.io.IOException) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) Result(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result) AuditAction(org.apache.hadoop.ozone.audit.AuditAction) AuditLoggerType(org.apache.hadoop.ozone.audit.AuditLoggerType) Type(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type) ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) State(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)

Example 2 with Type

use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type in project ozone by apache.

the class ContainerCommandRequestPBHelper method getAuditParams.

public static Map<String, String> getAuditParams(ContainerCommandRequestProto msg) {
    Map<String, String> auditParams = new TreeMap<>();
    Type cmdType = msg.getCmdType();
    String containerID = String.valueOf(msg.getContainerID());
    switch(cmdType) {
        case CreateContainer:
            auditParams.put("containerID", containerID);
            auditParams.put("containerType", msg.getCreateContainer().getContainerType().toString());
            return auditParams;
        case ReadContainer:
            auditParams.put("containerID", containerID);
            return auditParams;
        case UpdateContainer:
            auditParams.put("containerID", containerID);
            auditParams.put("forceUpdate", String.valueOf(msg.getUpdateContainer().getForceUpdate()));
            return auditParams;
        case DeleteContainer:
            auditParams.put("containerID", containerID);
            auditParams.put("forceDelete", String.valueOf(msg.getDeleteContainer().getForceDelete()));
            return auditParams;
        case ListContainer:
            auditParams.put("startContainerID", containerID);
            auditParams.put("count", String.valueOf(msg.getListContainer().getCount()));
            return auditParams;
        case PutBlock:
            try {
                auditParams.put("blockData", BlockData.getFromProtoBuf(msg.getPutBlock().getBlockData()).toString());
            } catch (IOException ex) {
                if (LOG.isTraceEnabled()) {
                    LOG.trace("Encountered error parsing BlockData from protobuf: " + ex.getMessage());
                }
                return null;
            }
            return auditParams;
        case GetBlock:
            auditParams.put("blockData", BlockID.getFromProtobuf(msg.getGetBlock().getBlockID()).toString());
            return auditParams;
        case DeleteBlock:
            auditParams.put("blockData", BlockID.getFromProtobuf(msg.getDeleteBlock().getBlockID()).toString());
            return auditParams;
        case ListBlock:
            auditParams.put("startLocalID", String.valueOf(msg.getListBlock().getStartLocalID()));
            auditParams.put("count", String.valueOf(msg.getListBlock().getCount()));
            return auditParams;
        case ReadChunk:
            auditParams.put("blockData", BlockID.getFromProtobuf(msg.getReadChunk().getBlockID()).toString());
            return auditParams;
        case DeleteChunk:
            auditParams.put("blockData", BlockID.getFromProtobuf(msg.getDeleteChunk().getBlockID()).toString());
            return auditParams;
        case WriteChunk:
            auditParams.put("blockData", BlockID.getFromProtobuf(msg.getWriteChunk().getBlockID()).toString());
            return auditParams;
        case ListChunk:
            auditParams.put("blockData", BlockID.getFromProtobuf(msg.getListChunk().getBlockID()).toString());
            auditParams.put("prevChunkName", msg.getListChunk().getPrevChunkName());
            auditParams.put("count", String.valueOf(msg.getListChunk().getCount()));
            return auditParams;
        // CompactChunk operation
        case CompactChunk:
            return null;
        case PutSmallFile:
            try {
                auditParams.put("blockData", BlockData.getFromProtoBuf(msg.getPutSmallFile().getBlock().getBlockData()).toString());
            } catch (IOException ex) {
                if (LOG.isTraceEnabled()) {
                    LOG.trace("Encountered error parsing BlockData from protobuf: " + ex.getMessage());
                }
            }
            return auditParams;
        case GetSmallFile:
            auditParams.put("blockData", BlockID.getFromProtobuf(msg.getGetSmallFile().getBlock().getBlockID()).toString());
            return auditParams;
        case CloseContainer:
            auditParams.put("containerID", containerID);
            return auditParams;
        case GetCommittedBlockLength:
            auditParams.put("blockData", BlockID.getFromProtobuf(msg.getGetCommittedBlockLength().getBlockID()).toString());
            return auditParams;
        default:
            LOG.debug("Invalid command type - {}", cmdType);
            return null;
    }
}
Also used : Type(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type) IOException(java.io.IOException) TreeMap(java.util.TreeMap)

Example 3 with Type

use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type in project ozone by apache.

the class ContainerStateMachine method applyTransaction.

/*
   * ApplyTransaction calls in Ratis are sequential.
   */
@Override
public CompletableFuture<Message> applyTransaction(TransactionContext trx) {
    long index = trx.getLogEntry().getIndex();
    try {
        // Remove the stateMachine data once both followers have caught up. If any
        // one of the follower is behind, the pending queue will max out as
        // configurable limit on pending request size and count and then will
        // block and client will backoff as a result of that.
        removeStateMachineDataIfNeeded(index);
        // as soon as its applied and such entry exists in the cache.
        if (!waitOnBothFollowers) {
            stateMachineDataCache.removeIf(k -> k >= index);
        }
        DispatcherContext.Builder builder = new DispatcherContext.Builder().setTerm(trx.getLogEntry().getTerm()).setLogIndex(index);
        long applyTxnStartTime = Time.monotonicNowNanos();
        applyTransactionSemaphore.acquire();
        metrics.incNumApplyTransactionsOps();
        ContainerCommandRequestProto requestProto = getContainerCommandRequestProto(gid, trx.getStateMachineLogEntry().getLogData());
        Type cmdType = requestProto.getCmdType();
        // Make sure that in write chunk, the user data is not set
        if (cmdType == Type.WriteChunk) {
            Preconditions.checkArgument(requestProto.getWriteChunk().getData().isEmpty());
            builder.setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA);
        }
        if (cmdType == Type.WriteChunk || cmdType == Type.PutSmallFile || cmdType == Type.PutBlock || cmdType == Type.CreateContainer) {
            builder.setContainer2BCSIDMap(container2BCSIDMap);
        }
        CompletableFuture<Message> applyTransactionFuture = new CompletableFuture<>();
        final Consumer<Exception> exceptionHandler = e -> {
            LOG.error("gid {} : ApplyTransaction failed. cmd {} logIndex " + "{} exception {}", gid, requestProto.getCmdType(), index, e);
            stateMachineHealthy.compareAndSet(true, false);
            metrics.incNumApplyTransactionsFails();
            applyTransactionFuture.completeExceptionally(e);
        };
        // Ensure the command gets executed in a separate thread than
        // stateMachineUpdater thread which is calling applyTransaction here.
        final CompletableFuture<ContainerCommandResponseProto> future = submitTask(requestProto, builder, exceptionHandler);
        future.thenApply(r -> {
            if (trx.getServerRole() == RaftPeerRole.LEADER && trx.getStateMachineContext() != null) {
                long startTime = (long) trx.getStateMachineContext();
                metrics.incPipelineLatency(cmdType, (Time.monotonicNowNanos() - startTime) / 1000000L);
            }
            // unhealthy
            if (r.getResult() != ContainerProtos.Result.SUCCESS && r.getResult() != ContainerProtos.Result.CONTAINER_NOT_OPEN && r.getResult() != ContainerProtos.Result.CLOSED_CONTAINER_IO) {
                StorageContainerException sce = new StorageContainerException(r.getMessage(), r.getResult());
                LOG.error("gid {} : ApplyTransaction failed. cmd {} logIndex {} msg : " + "{} Container Result: {}", gid, r.getCmdType(), index, r.getMessage(), r.getResult());
                metrics.incNumApplyTransactionsFails();
                // Since the applyTransaction now is completed exceptionally,
                // before any further snapshot is taken , the exception will be
                // caught in stateMachineUpdater in Ratis and ratis server will
                // shutdown.
                applyTransactionFuture.completeExceptionally(sce);
                stateMachineHealthy.compareAndSet(true, false);
                ratisServer.handleApplyTransactionFailure(gid, trx.getServerRole());
            } else {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("gid {} : ApplyTransaction completed. cmd {} logIndex {} msg : " + "{} Container Result: {}", gid, r.getCmdType(), index, r.getMessage(), r.getResult());
                }
                applyTransactionFuture.complete(r::toByteString);
                if (cmdType == Type.WriteChunk || cmdType == Type.PutSmallFile) {
                    metrics.incNumBytesCommittedCount(requestProto.getWriteChunk().getChunkData().getLen());
                }
                // failures before.
                if (isStateMachineHealthy()) {
                    final Long previous = applyTransactionCompletionMap.put(index, trx.getLogEntry().getTerm());
                    Preconditions.checkState(previous == null);
                    updateLastApplied();
                }
            }
            return applyTransactionFuture;
        }).whenComplete((r, t) -> {
            if (t != null) {
                stateMachineHealthy.set(false);
                LOG.error("gid {} : ApplyTransaction failed. cmd {} logIndex " + "{} exception {}", gid, requestProto.getCmdType(), index, t);
            }
            applyTransactionSemaphore.release();
            metrics.recordApplyTransactionCompletion(Time.monotonicNowNanos() - applyTxnStartTime);
        });
        return applyTransactionFuture;
    } catch (InterruptedException e) {
        metrics.incNumApplyTransactionsFails();
        Thread.currentThread().interrupt();
        return completeExceptionally(e);
    } catch (IOException e) {
        metrics.incNumApplyTransactionsFails();
        return completeExceptionally(e);
    }
}
Also used : ScmConfigKeys(org.apache.hadoop.hdds.scm.ScmConfigKeys) Arrays(java.util.Arrays) DatanodeRatisServerConfig(org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig) TermIndex(org.apache.ratis.server.protocol.TermIndex) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) TransactionContext(org.apache.ratis.statemachine.TransactionContext) LoggerFactory(org.slf4j.LoggerFactory) InvalidProtocolBufferException(org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException) DatanodeConfiguration(org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration) ConfigurationSource(org.apache.hadoop.hdds.conf.ConfigurationSource) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) Map(java.util.Map) RaftStorage(org.apache.ratis.server.storage.RaftStorage) SingleFileSnapshotInfo(org.apache.ratis.statemachine.impl.SingleFileSnapshotInfo) StateMachineException(org.apache.ratis.protocol.exceptions.StateMachineException) ResourceLimitCache(org.apache.hadoop.hdds.utils.ResourceLimitCache) LogEntryProto(org.apache.ratis.proto.RaftProtos.LogEntryProto) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) BufferUtils(org.apache.hadoop.ozone.common.utils.BufferUtils) Container2BCSIDMapProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Container2BCSIDMapProto) SimpleStateMachineStorage(org.apache.ratis.statemachine.impl.SimpleStateMachineStorage) StateMachineLogEntryProto(org.apache.ratis.proto.RaftProtos.StateMachineLogEntryProto) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) List(java.util.List) StorageUnit(org.apache.hadoop.hdds.conf.StorageUnit) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) BaseStateMachine(org.apache.ratis.statemachine.impl.BaseStateMachine) OzoneConfigKeys(org.apache.hadoop.ozone.OzoneConfigKeys) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) ContainerController(org.apache.hadoop.ozone.container.ozoneimpl.ContainerController) RoleInfoProto(org.apache.ratis.proto.RaftProtos.RoleInfoProto) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) RaftGroupMemberId(org.apache.ratis.protocol.RaftGroupMemberId) RaftLog(org.apache.ratis.server.raftlog.RaftLog) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ReadChunkRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadChunkRequestProto) CheckedSupplier(org.apache.ratis.util.function.CheckedSupplier) CompletableFuture(java.util.concurrent.CompletableFuture) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) ConcurrentMap(java.util.concurrent.ConcurrentMap) RaftGroupId(org.apache.ratis.protocol.RaftGroupId) Message(org.apache.ratis.protocol.Message) Type(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type) StateMachineStorage(org.apache.ratis.statemachine.StateMachineStorage) HddsUtils(org.apache.hadoop.hdds.HddsUtils) ContainerCommandRequestMessage(org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage) WriteChunkRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto) ContainerDispatcher(org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher) ExecutorService(java.util.concurrent.ExecutorService) ContainerNotOpenException(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException) OutputStream(java.io.OutputStream) TaskQueue(org.apache.ratis.util.TaskQueue) Logger(org.slf4j.Logger) Semaphore(java.util.concurrent.Semaphore) RaftPeerId(org.apache.ratis.protocol.RaftPeerId) FileOutputStream(java.io.FileOutputStream) IOException(java.io.IOException) RaftClientRequest(org.apache.ratis.protocol.RaftClientRequest) FileInputStream(java.io.FileInputStream) File(java.io.File) TextFormat(org.apache.ratis.thirdparty.com.google.protobuf.TextFormat) Consumer(java.util.function.Consumer) RaftPeerRole(org.apache.ratis.proto.RaftProtos.RaftPeerRole) Cache(org.apache.hadoop.hdds.utils.Cache) Time(org.apache.hadoop.util.Time) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) RaftServer(org.apache.ratis.server.RaftServer) ReadChunkResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadChunkResponseProto) Message(org.apache.ratis.protocol.Message) ContainerCommandRequestMessage(org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) IOException(java.io.IOException) InvalidProtocolBufferException(org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException) StateMachineException(org.apache.ratis.protocol.exceptions.StateMachineException) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) ContainerNotOpenException(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException) IOException(java.io.IOException) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) Type(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type) CompletableFuture(java.util.concurrent.CompletableFuture) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)

Example 4 with Type

use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type in project ozone by apache.

the class ContainerStateMachine method write.

/*
   * writeStateMachineData calls are not synchronized with each other
   * and also with applyTransaction.
   */
@Override
public CompletableFuture<Message> write(LogEntryProto entry) {
    try {
        metrics.incNumWriteStateMachineOps();
        long writeStateMachineStartTime = Time.monotonicNowNanos();
        ContainerCommandRequestProto requestProto = getContainerCommandRequestProto(gid, entry.getStateMachineLogEntry().getLogData());
        WriteChunkRequestProto writeChunk = WriteChunkRequestProto.newBuilder(requestProto.getWriteChunk()).setData(getStateMachineData(entry.getStateMachineLogEntry())).build();
        requestProto = ContainerCommandRequestProto.newBuilder(requestProto).setWriteChunk(writeChunk).build();
        Type cmdType = requestProto.getCmdType();
        // CreateContainer will happen as a part of writeChunk only.
        switch(cmdType) {
            case WriteChunk:
                return handleWriteChunk(requestProto, entry.getIndex(), entry.getTerm(), writeStateMachineStartTime);
            default:
                throw new IllegalStateException("Cmd Type:" + cmdType + " should not have state machine data");
        }
    } catch (IOException e) {
        metrics.incNumWriteStateMachineFails();
        return completeExceptionally(e);
    }
}
Also used : WriteChunkRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto) Type(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) IOException(java.io.IOException)

Example 5 with Type

use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type in project ozone by apache.

the class HddsDispatcher method validateContainerCommand.

/**
 * This will be called as a part of creating the log entry during
 * startTransaction in Ratis on the leader node. In such cases, if the
 * container is not in open state for writing we should just fail.
 * Leader will propagate the exception to client.
 * @param msg  container command proto
 * @throws StorageContainerException In case container state is open for write
 *         requests and in invalid state for read requests.
 */
@Override
public void validateContainerCommand(ContainerCommandRequestProto msg) throws StorageContainerException {
    long containerID = msg.getContainerID();
    Container container = getContainer(containerID);
    if (container == null) {
        return;
    }
    ContainerType containerType = container.getContainerType();
    Type cmdType = msg.getCmdType();
    AuditAction action = ContainerCommandRequestPBHelper.getAuditAction(cmdType);
    EventType eventType = getEventType(msg);
    Map<String, String> params = ContainerCommandRequestPBHelper.getAuditParams(msg);
    Handler handler = getHandler(containerType);
    if (handler == null) {
        StorageContainerException ex = new StorageContainerException("Invalid " + "ContainerType " + containerType, ContainerProtos.Result.CONTAINER_INTERNAL_ERROR);
        audit(action, eventType, params, AuditEventStatus.FAILURE, ex);
        throw ex;
    }
    State containerState = container.getContainerState();
    if (!HddsUtils.isReadOnly(msg) && containerState != State.OPEN) {
        switch(cmdType) {
            case CreateContainer:
                // Create Container is idempotent. There is nothing to validate.
                break;
            case CloseContainer:
                // while execution. Nothing to validate here.
                break;
            default:
                // if the container is not open, no updates can happen. Just throw
                // an exception
                ContainerNotOpenException cex = new ContainerNotOpenException("Container " + containerID + " in " + containerState + " state");
                audit(action, eventType, params, AuditEventStatus.FAILURE, cex);
                throw cex;
        }
    } else if (HddsUtils.isReadOnly(msg) && containerState == State.INVALID) {
        InvalidContainerStateException iex = new InvalidContainerStateException("Container " + containerID + " in " + containerState + " state");
        audit(action, eventType, params, AuditEventStatus.FAILURE, iex);
        throw iex;
    }
    try {
        validateToken(msg);
    } catch (IOException ioe) {
        throw new StorageContainerException("Block token verification failed. " + ioe.getMessage(), ioe, ContainerProtos.Result.BLOCK_TOKEN_VERIFICATION_FAILED);
    }
}
Also used : ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) IOException(java.io.IOException) ContainerNotOpenException(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException) AuditAction(org.apache.hadoop.ozone.audit.AuditAction) InvalidContainerStateException(org.apache.hadoop.hdds.scm.container.common.helpers.InvalidContainerStateException) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) AuditLoggerType(org.apache.hadoop.ozone.audit.AuditLoggerType) Type(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type) ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) State(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)

Aggregations

IOException (java.io.IOException)5 Type (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type)5 ContainerNotOpenException (org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException)3 StorageContainerException (org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)3 ContainerCommandRequestProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto)2 ContainerCommandResponseProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)2 State (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State)2 ContainerType (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType)2 WriteChunkRequestProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 Preconditions (com.google.common.base.Preconditions)1 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)1 ServiceException (com.google.protobuf.ServiceException)1 File (java.io.File)1 FileInputStream (java.io.FileInputStream)1 FileOutputStream (java.io.FileOutputStream)1 OutputStream (java.io.OutputStream)1 Arrays (java.util.Arrays)1 Collection (java.util.Collection)1 List (java.util.List)1