use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.
the class TestContainerReplicationEndToEnd method testContainerReplication.
/**
* The test simulates end to end container replication.
*/
@Test
public void testContainerReplication() throws Exception {
String keyName = "testContainerReplication";
OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName).createKey(keyName, 0, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>());
byte[] testData = "ratis".getBytes(UTF_8);
// First write and flush creates a container in the datanode
key.write(testData);
key.flush();
KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
List<OmKeyLocationInfo> locationInfoList = groupOutputStream.getLocationInfoList();
Assert.assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
long containerID = omKeyLocationInfo.getContainerID();
PipelineID pipelineID = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID)).getPipelineID();
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(pipelineID);
key.close();
HddsProtos.LifeCycleState containerState = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID)).getState();
LoggerFactory.getLogger(TestContainerReplicationEndToEnd.class).info("Current Container State is {}", containerState);
if ((containerState != HddsProtos.LifeCycleState.CLOSING) && (containerState != HddsProtos.LifeCycleState.CLOSED)) {
cluster.getStorageContainerManager().getContainerManager().updateContainerState(ContainerID.valueOf(containerID), HddsProtos.LifeCycleEvent.FINALIZE);
}
// wait for container to move to OPEN state in SCM
Thread.sleep(2 * containerReportInterval);
DatanodeDetails oldReplicaNode = pipeline.getFirstNode();
// now move the container to the closed on the datanode.
XceiverClientSpi xceiverClient = xceiverClientManager.acquireClient(pipeline);
ContainerProtos.ContainerCommandRequestProto.Builder request = ContainerProtos.ContainerCommandRequestProto.newBuilder();
request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
request.setCmdType(ContainerProtos.Type.CloseContainer);
request.setContainerID(containerID);
request.setCloseContainer(ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
xceiverClient.sendCommand(request.build());
// wait for container to move to closed state in SCM
Thread.sleep(2 * containerReportInterval);
Assert.assertTrue(cluster.getStorageContainerManager().getContainerInfo(containerID).getState() == HddsProtos.LifeCycleState.CLOSED);
// shutdown the replica node
cluster.shutdownHddsDatanode(oldReplicaNode);
// now the container is under replicated and will be moved to a different dn
HddsDatanodeService dnService = null;
for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
Predicate<DatanodeDetails> p = i -> i.getUuid().equals(dn.getDatanodeDetails().getUuid());
if (!pipeline.getNodes().stream().anyMatch(p)) {
dnService = dn;
}
}
Assert.assertNotNull(dnService);
final HddsDatanodeService newReplicaNode = dnService;
// wait for the container to get replicated
GenericTestUtils.waitFor(() -> {
return newReplicaNode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID) != null;
}, 500, 100000);
Assert.assertTrue(newReplicaNode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerData().getBlockCommitSequenceId() > 0);
// wait for SCM to update the replica Map
Thread.sleep(5 * containerReportInterval);
// the key again
for (DatanodeDetails dn : pipeline.getNodes()) {
cluster.shutdownHddsDatanode(dn);
}
// This will try to read the data from the dn to which the container got
// replicated after the container got closed.
TestHelper.validateData(keyName, testData, objectStore, volumeName, bucketName);
}
use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.
the class TestContainerStateMachineFailures method testApplyTransactionFailure.
@Test
public void testApplyTransactionFailure() throws Exception {
OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName).createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
// First write and flush creates a container in the datanode
key.write("ratis".getBytes(UTF_8));
key.flush();
key.write("ratis".getBytes(UTF_8));
KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
List<OmKeyLocationInfo> locationInfoList = groupOutputStream.getLocationInfoList();
Assert.assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, cluster);
int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails());
ContainerData containerData = dn.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(omKeyLocationInfo.getContainerID()).getContainerData();
Assert.assertTrue(containerData instanceof KeyValueContainerData);
KeyValueContainerData keyValueContainerData = (KeyValueContainerData) containerData;
key.close();
ContainerStateMachine stateMachine = (ContainerStateMachine) TestHelper.getStateMachine(cluster.getHddsDatanodes().get(index), omKeyLocationInfo.getPipeline());
SimpleStateMachineStorage storage = (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
stateMachine.takeSnapshot();
Path parentPath = storage.findLatestSnapshot().getFile().getPath();
// Since the snapshot threshold is set to 1, since there are
// applyTransactions, we should see snapshots
Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
FileInfo snapshot = storage.findLatestSnapshot().getFile();
Assert.assertNotNull(snapshot);
long containerID = omKeyLocationInfo.getContainerID();
// delete the container db file
FileUtil.fullyDelete(new File(keyValueContainerData.getContainerPath()));
Pipeline pipeline = cluster.getStorageContainerLocationClient().getContainerWithPipeline(containerID).getPipeline();
XceiverClientSpi xceiverClient = xceiverClientManager.acquireClient(pipeline);
ContainerProtos.ContainerCommandRequestProto.Builder request = ContainerProtos.ContainerCommandRequestProto.newBuilder();
request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
request.setCmdType(ContainerProtos.Type.CloseContainer);
request.setContainerID(containerID);
request.setCloseContainer(ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
try {
xceiverClient.sendCommand(request.build());
Assert.fail("Expected exception not thrown");
} catch (IOException e) {
// Exception should be thrown
}
// Make sure the container is marked unhealthy
Assert.assertTrue(dn.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerState() == ContainerProtos.ContainerDataProto.State.UNHEALTHY);
try {
// try to take a new snapshot, ideally it should just fail
stateMachine.takeSnapshot();
} catch (IOException ioe) {
Assert.assertTrue(ioe instanceof StateMachineException);
}
if (snapshot.getPath().toFile().exists()) {
// Make sure the latest snapshot is same as the previous one
try {
FileInfo latestSnapshot = storage.findLatestSnapshot().getFile();
Assert.assertTrue(snapshot.getPath().equals(latestSnapshot.getPath()));
} catch (Throwable e) {
Assert.assertFalse(snapshot.getPath().toFile().exists());
}
}
// when remove pipeline, group dir including snapshot will be deleted
LambdaTestUtils.await(5000, 500, () -> (!snapshot.getPath().toFile().exists()));
}
use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.
the class TestContainerStateMachineFailures method testApplyTransactionIdempotencyWithClosedContainer.
@Test
@Flaky("HDDS-6115")
public void testApplyTransactionIdempotencyWithClosedContainer() throws Exception {
OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName).createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
// First write and flush creates a container in the datanode
key.write("ratis".getBytes(UTF_8));
key.flush();
key.write("ratis".getBytes(UTF_8));
KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
List<OmKeyLocationInfo> locationInfoList = groupOutputStream.getLocationInfoList();
Assert.assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, cluster);
ContainerData containerData = dn.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(omKeyLocationInfo.getContainerID()).getContainerData();
Assert.assertTrue(containerData instanceof KeyValueContainerData);
key.close();
ContainerStateMachine stateMachine = (ContainerStateMachine) TestHelper.getStateMachine(dn, omKeyLocationInfo.getPipeline());
SimpleStateMachineStorage storage = (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
Path parentPath = storage.findLatestSnapshot().getFile().getPath();
stateMachine.takeSnapshot();
Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
FileInfo snapshot = storage.findLatestSnapshot().getFile();
Assert.assertNotNull(snapshot);
long containerID = omKeyLocationInfo.getContainerID();
Pipeline pipeline = cluster.getStorageContainerLocationClient().getContainerWithPipeline(containerID).getPipeline();
XceiverClientSpi xceiverClient = xceiverClientManager.acquireClient(pipeline);
ContainerProtos.ContainerCommandRequestProto.Builder request = ContainerProtos.ContainerCommandRequestProto.newBuilder();
request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
request.setCmdType(ContainerProtos.Type.CloseContainer);
request.setContainerID(containerID);
request.setCloseContainer(ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
try {
xceiverClient.sendCommand(request.build());
} catch (IOException e) {
Assert.fail("Exception should not be thrown");
}
Assert.assertTrue(TestHelper.getDatanodeService(omKeyLocationInfo, cluster).getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerState() == ContainerProtos.ContainerDataProto.State.CLOSED);
Assert.assertTrue(stateMachine.isStateMachineHealthy());
try {
stateMachine.takeSnapshot();
} catch (IOException ioe) {
Assert.fail("Exception should not be thrown");
}
FileInfo latestSnapshot = storage.findLatestSnapshot().getFile();
Assert.assertFalse(snapshot.getPath().equals(latestSnapshot.getPath()));
}
use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.
the class TestReadRetries method testPutKeyAndGetKeyThreeNodes.
@Test
public void testPutKeyAndGetKeyThreeNodes() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String value = "sample value";
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName = "a/b/c/" + UUID.randomUUID().toString();
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>());
KeyOutputStream groupOutputStream = (KeyOutputStream) out.getOutputStream();
XceiverClientFactory factory = groupOutputStream.getXceiverClientFactory();
out.write(value.getBytes(UTF_8));
out.close();
// First, confirm the key info from the client matches the info in OM.
OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
builder.setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true);
OmKeyLocationInfo keyInfo = ozoneManager.lookupKey(builder.build()).getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0);
long containerID = keyInfo.getContainerID();
long localID = keyInfo.getLocalID();
OzoneKeyDetails keyDetails = bucket.getKey(keyName);
Assert.assertEquals(keyName, keyDetails.getName());
List<OzoneKeyLocation> keyLocations = keyDetails.getOzoneKeyLocations();
Assert.assertEquals(1, keyLocations.size());
Assert.assertEquals(containerID, keyLocations.get(0).getContainerID());
Assert.assertEquals(localID, keyLocations.get(0).getLocalID());
// Make sure that the data size matched.
Assert.assertEquals(value.getBytes(UTF_8).length, keyLocations.get(0).getLength());
ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
List<DatanodeDetails> datanodes = pipeline.getNodes();
DatanodeDetails datanodeDetails = datanodes.get(0);
Assert.assertNotNull(datanodeDetails);
XceiverClientSpi clientSpi = factory.acquireClient(pipeline);
Assert.assertTrue(clientSpi instanceof XceiverClientRatis);
XceiverClientRatis ratisClient = (XceiverClientRatis) clientSpi;
ratisClient.watchForCommit(keyInfo.getBlockCommitSequenceId());
// shutdown the datanode
cluster.shutdownHddsDatanode(datanodeDetails);
// try to read, this should be successful
readKey(bucket, keyName, value);
// read intermediate directory
verifyIntermediateDir(bucket, "a/b/c");
// shutdown the second datanode
datanodeDetails = datanodes.get(1);
cluster.shutdownHddsDatanode(datanodeDetails);
// we still should be able to read via Standalone protocol
// try to read
readKey(bucket, keyName, value);
// shutdown the 3rd datanode
datanodeDetails = datanodes.get(2);
cluster.shutdownHddsDatanode(datanodeDetails);
try {
// try to read
readKey(bucket, keyName, value);
fail("Expected exception not thrown");
} catch (IOException e) {
// it should throw an ioException as none of the servers
// are available
}
factory.releaseClient(clientSpi, false);
}
use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.
the class TestSecureContainerServer method runTestClientServer.
private static void runTestClientServer(int numDatanodes, CheckedBiConsumer<Pipeline, OzoneConfiguration, IOException> initConf, CheckedBiFunction<Pipeline, OzoneConfiguration, XceiverClientSpi, IOException> createClient, CheckedBiFunction<DatanodeDetails, OzoneConfiguration, XceiverServerSpi, IOException> createServer, CheckedBiConsumer<DatanodeDetails, Pipeline, IOException> initServer, Consumer<Pipeline> stopServer) throws Exception {
final List<XceiverServerSpi> servers = new ArrayList<>();
final Pipeline pipeline = MockPipeline.createPipeline(numDatanodes);
initConf.accept(pipeline, CONF);
for (DatanodeDetails dn : pipeline.getNodes()) {
final XceiverServerSpi s = createServer.apply(dn, CONF);
servers.add(s);
s.start();
initServer.accept(dn, pipeline);
}
try (XceiverClientSpi client = createClient.apply(pipeline, CONF)) {
client.connect();
long containerID = getTestContainerID();
BlockID blockID = getTestBlockID(containerID);
assertFailsTokenVerification(client, getCreateContainerRequest(containerID, pipeline));
// create the container
ContainerProtocolCalls.createContainer(client, containerID, getToken(ContainerID.valueOf(containerID)));
Token<OzoneBlockTokenIdentifier> token = blockTokenSecretManager.generateToken(blockID, EnumSet.allOf(AccessModeProto.class), RandomUtils.nextLong());
String encodedToken = token.encodeToUrlString();
ContainerCommandRequestProto.Builder writeChunk = newWriteChunkRequestBuilder(pipeline, blockID, 1024, 0);
assertRequiresToken(client, encodedToken, writeChunk);
ContainerCommandRequestProto.Builder putBlock = newPutBlockRequestBuilder(pipeline, writeChunk.getWriteChunk());
assertRequiresToken(client, encodedToken, putBlock);
ContainerCommandRequestProto.Builder readChunk = newReadChunkRequestBuilder(pipeline, writeChunk.getWriteChunk());
assertRequiresToken(client, encodedToken, readChunk);
ContainerCommandRequestProto.Builder getBlock = newGetBlockRequestBuilder(pipeline, putBlock.getPutBlock());
assertRequiresToken(client, encodedToken, getBlock);
ContainerCommandRequestProto.Builder getCommittedBlockLength = newGetCommittedBlockLengthBuilder(pipeline, putBlock.getPutBlock());
assertRequiresToken(client, encodedToken, getCommittedBlockLength);
} finally {
stopServer.accept(pipeline);
servers.forEach(XceiverServerSpi::stop);
}
}
Aggregations