use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType in project ozone by apache.
the class HddsDispatcher method dispatchRequest.
@SuppressWarnings("methodlength")
private ContainerCommandResponseProto dispatchRequest(ContainerCommandRequestProto msg, DispatcherContext dispatcherContext) {
Preconditions.checkNotNull(msg);
if (LOG.isTraceEnabled()) {
LOG.trace("Command {}, trace ID: {} ", msg.getCmdType(), msg.getTraceID());
}
AuditAction action = ContainerCommandRequestPBHelper.getAuditAction(msg.getCmdType());
EventType eventType = getEventType(msg);
Map<String, String> params = ContainerCommandRequestPBHelper.getAuditParams(msg);
ContainerType containerType;
ContainerCommandResponseProto responseProto = null;
long startTime = System.currentTimeMillis();
Type cmdType = msg.getCmdType();
long containerID = msg.getContainerID();
metrics.incContainerOpsMetrics(cmdType);
Container container = getContainer(containerID);
boolean isWriteStage = (cmdType == Type.WriteChunk && dispatcherContext != null && dispatcherContext.getStage() == DispatcherContext.WriteChunkStage.WRITE_DATA);
boolean isWriteCommitStage = (cmdType == Type.WriteChunk && dispatcherContext != null && dispatcherContext.getStage() == DispatcherContext.WriteChunkStage.COMMIT_DATA);
try {
validateToken(msg);
} catch (IOException ioe) {
StorageContainerException sce = new StorageContainerException("Block token verification failed. " + ioe.getMessage(), ioe, ContainerProtos.Result.BLOCK_TOKEN_VERIFICATION_FAILED);
return ContainerUtils.logAndReturnError(LOG, sce, msg);
}
// if the command gets executed other than Ratis, the default write stage
// is WriteChunkStage.COMBINED
boolean isCombinedStage = cmdType == Type.WriteChunk && (dispatcherContext == null || dispatcherContext.getStage() == DispatcherContext.WriteChunkStage.COMBINED);
Map<Long, Long> container2BCSIDMap = null;
if (dispatcherContext != null) {
container2BCSIDMap = dispatcherContext.getContainer2BCSIDMap();
}
if (isWriteCommitStage) {
// check if the container Id exist in the loaded snapshot file. if
// it does not , it infers that , this is a restart of dn where
// the we are reapplying the transaction which was not captured in the
// snapshot.
// just add it to the list, and remove it from missing container set
// as it might have been added in the list during "init".
Preconditions.checkNotNull(container2BCSIDMap);
if (container != null && container2BCSIDMap.get(containerID) == null) {
container2BCSIDMap.put(containerID, container.getBlockCommitSequenceId());
getMissingContainerSet().remove(containerID);
}
}
if (getMissingContainerSet().contains(containerID)) {
StorageContainerException sce = new StorageContainerException("ContainerID " + containerID + " has been lost and and cannot be recreated on this DataNode", ContainerProtos.Result.CONTAINER_MISSING);
audit(action, eventType, params, AuditEventStatus.FAILURE, sce);
return ContainerUtils.logAndReturnError(LOG, sce, msg);
}
if (cmdType != Type.CreateContainer) {
/**
* Create Container should happen only as part of Write_Data phase of
* writeChunk.
*/
if (container == null && ((isWriteStage || isCombinedStage) || cmdType == Type.PutSmallFile)) {
// If container does not exist, create one for WriteChunk and
// PutSmallFile request
responseProto = createContainer(msg);
if (responseProto.getResult() != Result.SUCCESS) {
StorageContainerException sce = new StorageContainerException("ContainerID " + containerID + " creation failed", responseProto.getResult());
audit(action, eventType, params, AuditEventStatus.FAILURE, sce);
return ContainerUtils.logAndReturnError(LOG, sce, msg);
}
Preconditions.checkArgument(isWriteStage && container2BCSIDMap != null || dispatcherContext == null);
if (container2BCSIDMap != null) {
// adds this container to list of containers created in the pipeline
// with initial BCSID recorded as 0.
container2BCSIDMap.putIfAbsent(containerID, 0L);
}
container = getContainer(containerID);
}
// if container not found return error
if (container == null) {
StorageContainerException sce = new StorageContainerException("ContainerID " + containerID + " does not exist", ContainerProtos.Result.CONTAINER_NOT_FOUND);
audit(action, eventType, params, AuditEventStatus.FAILURE, sce);
return ContainerUtils.logAndReturnError(LOG, sce, msg);
}
containerType = getContainerType(container);
} else {
if (!msg.hasCreateContainer()) {
audit(action, eventType, params, AuditEventStatus.FAILURE, new Exception("MALFORMED_REQUEST"));
return malformedRequest(msg);
}
containerType = msg.getCreateContainer().getContainerType();
}
// write before trying to send CloseContainerAction.
if (!HddsUtils.isReadOnly(msg)) {
sendCloseContainerActionIfNeeded(container);
}
Handler handler = getHandler(containerType);
if (handler == null) {
StorageContainerException ex = new StorageContainerException("Invalid " + "ContainerType " + containerType, ContainerProtos.Result.CONTAINER_INTERNAL_ERROR);
// log failure
audit(action, eventType, params, AuditEventStatus.FAILURE, ex);
return ContainerUtils.logAndReturnError(LOG, ex, msg);
}
responseProto = handler.handle(msg, container, dispatcherContext);
if (responseProto != null) {
metrics.incContainerOpsLatencies(cmdType, System.currentTimeMillis() - startTime);
// If the request is of Write Type and the container operation
// is unsuccessful, it implies the applyTransaction on the container
// failed. All subsequent transactions on the container should fail and
// hence replica will be marked unhealthy here. In this case, a close
// container action will be sent to SCM to close the container.
// ApplyTransaction called on closed Container will fail with Closed
// container exception. In such cases, ignore the exception here
// If the container is already marked unhealthy, no need to change the
// state here.
Result result = responseProto.getResult();
if (cmdType == Type.CreateContainer && result == Result.SUCCESS && dispatcherContext != null) {
Preconditions.checkNotNull(dispatcherContext.getContainer2BCSIDMap());
container2BCSIDMap.putIfAbsent(containerID, Long.valueOf(0));
}
if (!HddsUtils.isReadOnly(msg) && !canIgnoreException(result)) {
if (container == null) {
throw new NullPointerException("Error on creating containers " + result + " " + responseProto.getMessage());
}
// For container to be moved to unhealthy state here, the container can
// only be in open or closing state.
State containerState = container.getContainerData().getState();
Preconditions.checkState(containerState == State.OPEN || containerState == State.CLOSING);
// mark and persist the container state to be unhealthy
try {
handler.markContainerUnhealthy(container);
LOG.info("Marked Container UNHEALTHY, ContainerID: {}", containerID);
} catch (IOException ioe) {
// just log the error here in case marking the container fails,
// Return the actual failure response to the client
LOG.error("Failed to mark container " + containerID + " UNHEALTHY. ", ioe);
}
// in any case, the in memory state of the container should be unhealthy
Preconditions.checkArgument(container.getContainerData().getState() == State.UNHEALTHY);
sendCloseContainerActionIfNeeded(container);
}
if (result == Result.SUCCESS) {
updateBCSID(container, dispatcherContext, cmdType);
audit(action, eventType, params, AuditEventStatus.SUCCESS, null);
} else {
audit(action, eventType, params, AuditEventStatus.FAILURE, new Exception(responseProto.getMessage()));
}
return responseProto;
} else {
// log failure
audit(action, eventType, params, AuditEventStatus.FAILURE, new Exception("UNSUPPORTED_REQUEST"));
return unsupportedRequest(msg);
}
}
use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType in project ozone by apache.
the class TestHddsDispatcher method testContainerCloseActionWhenFull.
@Test
public void testContainerCloseActionWhenFull() throws IOException {
String testDir = GenericTestUtils.getTempPath(TestHddsDispatcher.class.getSimpleName());
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(HDDS_DATANODE_DIR_KEY, testDir);
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
DatanodeDetails dd = randomDatanodeDetails();
MutableVolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
try {
UUID scmId = UUID.randomUUID();
ContainerSet containerSet = new ContainerSet();
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
Mockito.when(context.getParent()).thenReturn(stateMachine);
KeyValueContainerData containerData = new KeyValueContainerData(1L, layout, (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), dd.getUuidString());
Container container = new KeyValueContainer(containerData, conf);
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString());
containerSet.addContainer(container);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerType containerType : ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, NO_OP_ICR_SENDER));
}
HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
hddsDispatcher.setClusterId(scmId.toString());
ContainerCommandResponseProto responseOne = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseOne.getResult());
verify(context, times(0)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
containerData.setBytesUsed(Double.valueOf(StorageUnit.MB.toBytes(950)).longValue());
ContainerCommandResponseProto responseTwo = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 2L), null);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseTwo.getResult());
verify(context, times(1)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
} finally {
volumeSet.shutdown();
ContainerMetrics.remove();
FileUtils.deleteDirectory(new File(testDir));
}
}
use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType in project ozone by apache.
the class TestHddsDispatcher method createDispatcher.
/**
* Creates HddsDispatcher instance with given infos.
* @param dd datanode detail info.
* @param scmId UUID of scm id.
* @param conf configuration be used.
* @return HddsDispatcher HddsDispatcher instance.
* @throws IOException
*/
private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException {
ContainerSet containerSet = new ContainerSet();
VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
Mockito.when(context.getParent()).thenReturn(stateMachine);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerType containerType : ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, NO_OP_ICR_SENDER));
}
HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
hddsDispatcher.setClusterId(scmId.toString());
return hddsDispatcher;
}
use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType in project ozone by apache.
the class TestKeyValueHandler method setup.
@Before
public void setup() throws StorageContainerException {
// Create mock HddsDispatcher and KeyValueHandler.
handler = Mockito.mock(KeyValueHandler.class);
HashMap<ContainerType, Handler> handlers = new HashMap<>();
handlers.put(ContainerType.KeyValueContainer, handler);
dispatcher = new HddsDispatcher(new OzoneConfiguration(), Mockito.mock(ContainerSet.class), Mockito.mock(VolumeSet.class), handlers, Mockito.mock(StateContext.class), Mockito.mock(ContainerMetrics.class), Mockito.mock(TokenVerifier.class));
}
use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType in project ozone by apache.
the class ClosedContainerReplicator method initializeReplicationSupervisor.
@NotNull
private void initializeReplicationSupervisor(ConfigurationSource conf) throws IOException {
String fakeDatanodeUuid = datanode;
if (fakeDatanodeUuid.isEmpty()) {
fakeDatanodeUuid = UUID.randomUUID().toString();
}
ContainerSet containerSet = new ContainerSet();
ContainerMetrics metrics = ContainerMetrics.create(conf);
MutableVolumeSet volumeSet = new MutableVolumeSet(fakeDatanodeUuid, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
Map<ContainerType, Handler> handlers = new HashMap<>();
for (ContainerType containerType : ContainerType.values()) {
final Handler handler = Handler.getHandlerForContainerType(containerType, conf, fakeDatanodeUuid, containerSet, volumeSet, metrics, containerReplicaProto -> {
});
handler.setClusterID(UUID.randomUUID().toString());
handlers.put(containerType, handler);
}
ContainerController controller = new ContainerController(containerSet, handlers);
ContainerReplicator replicator = new DownloadAndImportReplicator(containerSet, controller, new SimpleContainerDownloader(conf, null), new TarContainerPacker());
supervisor = new ReplicationSupervisor(containerSet, replicator, 10);
}
Aggregations