use of org.apache.hadoop.ozone.container.common.interfaces.Handler in project ozone by apache.
the class TestContainerMetrics method testContainerMetrics.
@Test
public void testContainerMetrics() throws Exception {
XceiverServerGrpc server = null;
XceiverClientGrpc client = null;
long containerID = ContainerTestHelper.getTestContainerID();
String path = GenericTestUtils.getRandomizedTempPath();
try {
final int interval = 1;
Pipeline pipeline = MockPipeline.createSingleNodePipeline();
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
conf.setInt(DFSConfigKeysLegacy.DFS_METRICS_PERCENTILES_INTERVALS_KEY, interval);
DatanodeDetails datanodeDetails = randomDatanodeDetails();
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, path);
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, path);
VolumeSet volumeSet = new MutableVolumeSet(datanodeDetails.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
ContainerSet containerSet = new ContainerSet();
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
Mockito.when(context.getParent()).thenReturn(stateMachine);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, c -> {
}));
}
HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
dispatcher.setClusterId(UUID.randomUUID().toString());
server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher, null);
client = new XceiverClientGrpc(pipeline, conf);
server.start();
client.connect();
// Create container
ContainerCommandRequestProto request = ContainerTestHelper.getCreateContainerRequest(containerID, pipeline);
ContainerCommandResponseProto response = client.sendCommand(request);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
// Write Chunk
BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, 1024, null);
ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, 1024, null);
response = client.sendCommand(writeChunkRequest);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
// Read Chunk
ContainerProtos.ContainerCommandRequestProto readChunkRequest = ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest.getWriteChunk());
response = client.sendCommand(readChunkRequest);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
MetricsRecordBuilder containerMetrics = getMetrics("StorageContainerMetrics");
assertCounter("NumOps", 3L, containerMetrics);
assertCounter("numCreateContainer", 1L, containerMetrics);
assertCounter("numWriteChunk", 1L, containerMetrics);
assertCounter("numReadChunk", 1L, containerMetrics);
assertCounter("bytesWriteChunk", 1024L, containerMetrics);
assertCounter("bytesReadChunk", 1024L, containerMetrics);
String sec = interval + "s";
Thread.sleep((interval + 1) * 1000);
assertQuantileGauges("WriteChunkNanos" + sec, containerMetrics);
// Check VolumeIOStats metrics
List<HddsVolume> volumes = StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList());
HddsVolume hddsVolume = volumes.get(0);
MetricsRecordBuilder volumeIOMetrics = getMetrics(hddsVolume.getVolumeIOStats().getMetricsSourceName());
assertCounter("ReadBytes", 1024L, volumeIOMetrics);
assertCounter("ReadOpCount", 1L, volumeIOMetrics);
assertCounter("WriteBytes", 1024L, volumeIOMetrics);
assertCounter("WriteOpCount", 1L, volumeIOMetrics);
} finally {
if (client != null) {
client.close();
}
if (server != null) {
server.stop();
}
// clean up volume dir
File file = new File(path);
if (file.exists()) {
FileUtil.fullyDelete(file);
}
}
}
use of org.apache.hadoop.ozone.container.common.interfaces.Handler in project ozone by apache.
the class TestSecureContainerServer method createDispatcher.
private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException {
ContainerSet containerSet = new ContainerSet();
conf.set(HDDS_DATANODE_DIR_KEY, Paths.get(TEST_DIR, "dfs", "data", "hdds", RandomStringUtils.randomAlphabetic(4)).toString());
conf.set(OZONE_METADATA_DIRS, TEST_DIR);
VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
Mockito.when(context.getParent()).thenReturn(stateMachine);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, dd.getUuid().toString(), containerSet, volumeSet, metrics, c -> {
}));
}
HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, TokenVerifier.create(new SecurityConfig((conf)), caClient));
hddsDispatcher.setClusterId(scmId.toString());
return hddsDispatcher;
}
use of org.apache.hadoop.ozone.container.common.interfaces.Handler in project ozone by apache.
the class TestContainerServer method testClientServerWithContainerDispatcher.
@Test
public void testClientServerWithContainerDispatcher() throws Exception {
XceiverServerGrpc server = null;
XceiverClientGrpc client = null;
UUID scmId = UUID.randomUUID();
try {
Pipeline pipeline = MockPipeline.createSingleNodePipeline();
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
ContainerSet containerSet = new ContainerSet();
VolumeSet volumeSet = mock(MutableVolumeSet.class);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
DatanodeDetails datanodeDetails = randomDatanodeDetails();
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
Mockito.when(context.getParent()).thenReturn(stateMachine);
for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, c -> {
}));
}
HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
dispatcher.setClusterId(scmId.toString());
dispatcher.init();
server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher, caClient);
client = new XceiverClientGrpc(pipeline, conf);
server.start();
client.connect();
ContainerCommandRequestProto request = ContainerTestHelper.getCreateContainerRequest(ContainerTestHelper.getTestContainerID(), pipeline);
ContainerCommandResponseProto response = client.sendCommand(request);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
} finally {
if (client != null) {
client.close();
}
if (server != null) {
server.stop();
}
}
}
use of org.apache.hadoop.ozone.container.common.interfaces.Handler in project ozone by apache.
the class ClosedContainerReplicator method initializeReplicationSupervisor.
@NotNull
private void initializeReplicationSupervisor(ConfigurationSource conf) throws IOException {
String fakeDatanodeUuid = datanode;
if (fakeDatanodeUuid.isEmpty()) {
fakeDatanodeUuid = UUID.randomUUID().toString();
}
ContainerSet containerSet = new ContainerSet();
ContainerMetrics metrics = ContainerMetrics.create(conf);
MutableVolumeSet volumeSet = new MutableVolumeSet(fakeDatanodeUuid, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
Map<ContainerType, Handler> handlers = new HashMap<>();
for (ContainerType containerType : ContainerType.values()) {
final Handler handler = Handler.getHandlerForContainerType(containerType, conf, fakeDatanodeUuid, containerSet, volumeSet, metrics, containerReplicaProto -> {
});
handler.setClusterID(UUID.randomUUID().toString());
handlers.put(containerType, handler);
}
ContainerController controller = new ContainerController(containerSet, handlers);
ContainerReplicator replicator = new DownloadAndImportReplicator(containerSet, controller, new SimpleContainerDownloader(conf, null), new TarContainerPacker());
supervisor = new ReplicationSupervisor(containerSet, replicator, 10);
}
use of org.apache.hadoop.ozone.container.common.interfaces.Handler in project ozone by apache.
the class HddsDispatcher method validateContainerCommand.
/**
* This will be called as a part of creating the log entry during
* startTransaction in Ratis on the leader node. In such cases, if the
* container is not in open state for writing we should just fail.
* Leader will propagate the exception to client.
* @param msg container command proto
* @throws StorageContainerException In case container state is open for write
* requests and in invalid state for read requests.
*/
@Override
public void validateContainerCommand(ContainerCommandRequestProto msg) throws StorageContainerException {
long containerID = msg.getContainerID();
Container container = getContainer(containerID);
if (container == null) {
return;
}
ContainerType containerType = container.getContainerType();
Type cmdType = msg.getCmdType();
AuditAction action = ContainerCommandRequestPBHelper.getAuditAction(cmdType);
EventType eventType = getEventType(msg);
Map<String, String> params = ContainerCommandRequestPBHelper.getAuditParams(msg);
Handler handler = getHandler(containerType);
if (handler == null) {
StorageContainerException ex = new StorageContainerException("Invalid " + "ContainerType " + containerType, ContainerProtos.Result.CONTAINER_INTERNAL_ERROR);
audit(action, eventType, params, AuditEventStatus.FAILURE, ex);
throw ex;
}
State containerState = container.getContainerState();
if (!HddsUtils.isReadOnly(msg) && containerState != State.OPEN) {
switch(cmdType) {
case CreateContainer:
// Create Container is idempotent. There is nothing to validate.
break;
case CloseContainer:
// while execution. Nothing to validate here.
break;
default:
// if the container is not open, no updates can happen. Just throw
// an exception
ContainerNotOpenException cex = new ContainerNotOpenException("Container " + containerID + " in " + containerState + " state");
audit(action, eventType, params, AuditEventStatus.FAILURE, cex);
throw cex;
}
} else if (HddsUtils.isReadOnly(msg) && containerState == State.INVALID) {
InvalidContainerStateException iex = new InvalidContainerStateException("Container " + containerID + " in " + containerState + " state");
audit(action, eventType, params, AuditEventStatus.FAILURE, iex);
throw iex;
}
try {
validateToken(msg);
} catch (IOException ioe) {
throw new StorageContainerException("Block token verification failed. " + ioe.getMessage(), ioe, ContainerProtos.Result.BLOCK_TOKEN_VERIFICATION_FAILED);
}
}
Aggregations