use of org.apache.hadoop.ozone.container.common.impl.HddsDispatcher in project ozone by apache.
the class TestValidateBCSIDOnRestart method testValidateBCSIDOnDnRestart.
@Test
public void testValidateBCSIDOnDnRestart() throws Exception {
OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName).createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
// First write and flush creates a container in the datanode
key.write("ratis".getBytes(UTF_8));
key.flush();
key.write("ratis".getBytes(UTF_8));
KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
List<OmKeyLocationInfo> locationInfoList = groupOutputStream.getLocationInfoList();
Assert.assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, cluster);
ContainerData containerData = TestHelper.getDatanodeService(omKeyLocationInfo, cluster).getDatanodeStateMachine().getContainer().getContainerSet().getContainer(omKeyLocationInfo.getContainerID()).getContainerData();
Assert.assertTrue(containerData instanceof KeyValueContainerData);
KeyValueContainerData keyValueContainerData = (KeyValueContainerData) containerData;
key.close();
long containerID = omKeyLocationInfo.getContainerID();
int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails());
// delete the container db file
FileUtil.fullyDelete(new File(keyValueContainerData.getContainerPath()));
HddsDatanodeService dnService = cluster.getHddsDatanodes().get(index);
OzoneContainer ozoneContainer = dnService.getDatanodeStateMachine().getContainer();
ozoneContainer.getContainerSet().removeContainer(containerID);
ContainerStateMachine stateMachine = (ContainerStateMachine) TestHelper.getStateMachine(cluster.getHddsDatanodes().get(index), omKeyLocationInfo.getPipeline());
SimpleStateMachineStorage storage = (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
stateMachine.takeSnapshot();
Path parentPath = storage.findLatestSnapshot().getFile().getPath();
stateMachine.buildMissingContainerSet(parentPath.toFile());
// Since the snapshot threshold is set to 1, since there are
// applyTransactions, we should see snapshots
Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
// make sure the missing containerSet is not empty
HddsDispatcher dispatcher = (HddsDispatcher) ozoneContainer.getDispatcher();
Assert.assertTrue(!dispatcher.getMissingContainerSet().isEmpty());
Assert.assertTrue(dispatcher.getMissingContainerSet().contains(containerID));
// write a new key
key = objectStore.getVolume(volumeName).getBucket(bucketName).createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
// First write and flush creates a container in the datanode
key.write("ratis1".getBytes(UTF_8));
key.flush();
groupOutputStream = (KeyOutputStream) key.getOutputStream();
locationInfoList = groupOutputStream.getLocationInfoList();
Assert.assertEquals(1, locationInfoList.size());
omKeyLocationInfo = locationInfoList.get(0);
key.close();
containerID = omKeyLocationInfo.getContainerID();
dn = TestHelper.getDatanodeService(omKeyLocationInfo, cluster);
containerData = dn.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(omKeyLocationInfo.getContainerID()).getContainerData();
Assert.assertTrue(containerData instanceof KeyValueContainerData);
keyValueContainerData = (KeyValueContainerData) containerData;
ReferenceCountedDB db = BlockUtils.getDB(keyValueContainerData, conf);
// modify the bcsid for the container in the ROCKS DB thereby inducing
// corruption
db.getStore().getMetadataTable().put(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID, 0L);
db.decrementReference();
// after the restart, there will be a mismatch in BCSID of what is recorded
// in the and what is there in RockSDB and hence the container would be
// marked unhealthy
index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails());
cluster.restartHddsDatanode(dn.getDatanodeDetails(), false);
// Make sure the container is marked unhealthy
Assert.assertTrue(cluster.getHddsDatanodes().get(index).getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerState() == ContainerProtos.ContainerDataProto.State.UNHEALTHY);
}
use of org.apache.hadoop.ozone.container.common.impl.HddsDispatcher in project ozone by apache.
the class TestHandler method setup.
@Before
public void setup() throws Exception {
this.conf = new OzoneConfiguration();
this.containerSet = Mockito.mock(ContainerSet.class);
this.volumeSet = Mockito.mock(MutableVolumeSet.class);
DatanodeDetails datanodeDetails = Mockito.mock(DatanodeDetails.class);
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
Mockito.when(context.getParent()).thenReturn(stateMachine);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, TestHddsDispatcher.NO_OP_ICR_SENDER));
}
this.dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, null, metrics, null);
}
use of org.apache.hadoop.ozone.container.common.impl.HddsDispatcher in project ozone by apache.
the class TestKeyValueHandler method setup.
@Before
public void setup() throws StorageContainerException {
// Create mock HddsDispatcher and KeyValueHandler.
handler = Mockito.mock(KeyValueHandler.class);
HashMap<ContainerType, Handler> handlers = new HashMap<>();
handlers.put(ContainerType.KeyValueContainer, handler);
dispatcher = new HddsDispatcher(new OzoneConfiguration(), Mockito.mock(ContainerSet.class), Mockito.mock(VolumeSet.class), handlers, Mockito.mock(StateContext.class), Mockito.mock(ContainerMetrics.class), Mockito.mock(TokenVerifier.class));
}
use of org.apache.hadoop.ozone.container.common.impl.HddsDispatcher in project ozone by apache.
the class TestContainerMetrics method testContainerMetrics.
@Test
public void testContainerMetrics() throws Exception {
XceiverServerGrpc server = null;
XceiverClientGrpc client = null;
long containerID = ContainerTestHelper.getTestContainerID();
String path = GenericTestUtils.getRandomizedTempPath();
try {
final int interval = 1;
Pipeline pipeline = MockPipeline.createSingleNodePipeline();
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
conf.setInt(DFSConfigKeysLegacy.DFS_METRICS_PERCENTILES_INTERVALS_KEY, interval);
DatanodeDetails datanodeDetails = randomDatanodeDetails();
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, path);
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, path);
VolumeSet volumeSet = new MutableVolumeSet(datanodeDetails.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
ContainerSet containerSet = new ContainerSet();
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
Mockito.when(context.getParent()).thenReturn(stateMachine);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, c -> {
}));
}
HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
dispatcher.setClusterId(UUID.randomUUID().toString());
server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher, null);
client = new XceiverClientGrpc(pipeline, conf);
server.start();
client.connect();
// Create container
ContainerCommandRequestProto request = ContainerTestHelper.getCreateContainerRequest(containerID, pipeline);
ContainerCommandResponseProto response = client.sendCommand(request);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
// Write Chunk
BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, 1024, null);
ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, 1024, null);
response = client.sendCommand(writeChunkRequest);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
// Read Chunk
ContainerProtos.ContainerCommandRequestProto readChunkRequest = ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest.getWriteChunk());
response = client.sendCommand(readChunkRequest);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
MetricsRecordBuilder containerMetrics = getMetrics("StorageContainerMetrics");
assertCounter("NumOps", 3L, containerMetrics);
assertCounter("numCreateContainer", 1L, containerMetrics);
assertCounter("numWriteChunk", 1L, containerMetrics);
assertCounter("numReadChunk", 1L, containerMetrics);
assertCounter("bytesWriteChunk", 1024L, containerMetrics);
assertCounter("bytesReadChunk", 1024L, containerMetrics);
String sec = interval + "s";
Thread.sleep((interval + 1) * 1000);
assertQuantileGauges("WriteChunkNanos" + sec, containerMetrics);
// Check VolumeIOStats metrics
List<HddsVolume> volumes = StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList());
HddsVolume hddsVolume = volumes.get(0);
MetricsRecordBuilder volumeIOMetrics = getMetrics(hddsVolume.getVolumeIOStats().getMetricsSourceName());
assertCounter("ReadBytes", 1024L, volumeIOMetrics);
assertCounter("ReadOpCount", 1L, volumeIOMetrics);
assertCounter("WriteBytes", 1024L, volumeIOMetrics);
assertCounter("WriteOpCount", 1L, volumeIOMetrics);
} finally {
if (client != null) {
client.close();
}
if (server != null) {
server.stop();
}
// clean up volume dir
File file = new File(path);
if (file.exists()) {
FileUtil.fullyDelete(file);
}
}
}
use of org.apache.hadoop.ozone.container.common.impl.HddsDispatcher in project ozone by apache.
the class TestSecureContainerServer method createDispatcher.
private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException {
ContainerSet containerSet = new ContainerSet();
conf.set(HDDS_DATANODE_DIR_KEY, Paths.get(TEST_DIR, "dfs", "data", "hdds", RandomStringUtils.randomAlphabetic(4)).toString());
conf.set(OZONE_METADATA_DIRS, TEST_DIR);
VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
Mockito.when(context.getParent()).thenReturn(stateMachine);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, dd.getUuid().toString(), containerSet, volumeSet, metrics, c -> {
}));
}
HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, TokenVerifier.create(new SecurityConfig((conf)), caClient));
hddsDispatcher.setClusterId(scmId.toString());
return hddsDispatcher;
}
Aggregations