use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.
the class TestCommitWatcher method testReleaseBuffers.
@Test
public void testReleaseBuffers() throws Exception {
int capacity = 2;
BufferPool bufferPool = new BufferPool(chunkSize, capacity);
XceiverClientManager clientManager = new XceiverClientManager(conf);
ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
Pipeline pipeline = container.getPipeline();
long containerId = container.getContainerInfo().getContainerID();
XceiverClientSpi xceiverClient = clientManager.acquireClient(pipeline);
Assert.assertEquals(1, xceiverClient.getRefcount());
Assert.assertTrue(xceiverClient instanceof XceiverClientRatis);
XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
CommitWatcher watcher = new CommitWatcher(bufferPool, ratisClient);
BlockID blockID = ContainerTestHelper.getTestBlockID(containerId);
List<XceiverClientReply> replies = new ArrayList<>();
long length = 0;
List<CompletableFuture<ContainerProtos.ContainerCommandResponseProto>> futures = new ArrayList<>();
for (int i = 0; i < capacity; i++) {
ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, chunkSize, null);
// add the data to the buffer pool
final ChunkBuffer byteBuffer = bufferPool.allocateBuffer(0);
byteBuffer.put(writeChunkRequest.getWriteChunk().getData());
ratisClient.sendCommandAsync(writeChunkRequest);
ContainerProtos.ContainerCommandRequestProto putBlockRequest = ContainerTestHelper.getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
XceiverClientReply reply = ratisClient.sendCommandAsync(putBlockRequest);
final List<ChunkBuffer> bufferList = singletonList(byteBuffer);
length += byteBuffer.position();
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future = reply.getResponse().thenApply(v -> {
watcher.updateCommitInfoMap(reply.getLogIndex(), bufferList);
return v;
});
futures.add(future);
watcher.getFutureMap().put(length, future);
replies.add(reply);
}
Assert.assertTrue(replies.size() == 2);
// wait on the 1st putBlock to complete
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future1 = futures.get(0);
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future2 = futures.get(1);
future1.get();
Assert.assertNotNull(watcher.getFutureMap().get((long) chunkSize));
Assert.assertTrue(watcher.getFutureMap().get((long) chunkSize).equals(future1));
// wait on 2nd putBlock to complete
future2.get();
Assert.assertNotNull(watcher.getFutureMap().get((long) 2 * chunkSize));
Assert.assertTrue(watcher.getFutureMap().get((long) 2 * chunkSize).equals(future2));
Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().size() == 2);
watcher.watchOnFirstIndex();
Assert.assertFalse(watcher.getCommitIndex2flushedDataMap().containsKey(replies.get(0).getLogIndex()));
Assert.assertFalse(watcher.getFutureMap().containsKey(chunkSize));
Assert.assertTrue(watcher.getTotalAckDataLength() >= chunkSize);
watcher.watchOnLastIndex();
Assert.assertFalse(watcher.getCommitIndex2flushedDataMap().containsKey(replies.get(1).getLogIndex()));
Assert.assertFalse(watcher.getFutureMap().containsKey(2 * chunkSize));
Assert.assertTrue(watcher.getTotalAckDataLength() == 2 * chunkSize);
Assert.assertTrue(watcher.getFutureMap().isEmpty());
Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().isEmpty());
}
use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.
the class TestContainerStateMachineFailures method testApplyTransactionIdempotencyWithClosedContainer.
@Test
public void testApplyTransactionIdempotencyWithClosedContainer() throws Exception {
OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName).createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
// First write and flush creates a container in the datanode
key.write("ratis".getBytes(UTF_8));
key.flush();
key.write("ratis".getBytes(UTF_8));
KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
List<OmKeyLocationInfo> locationInfoList = groupOutputStream.getLocationInfoList();
Assert.assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, cluster);
ContainerData containerData = dn.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(omKeyLocationInfo.getContainerID()).getContainerData();
Assert.assertTrue(containerData instanceof KeyValueContainerData);
key.close();
ContainerStateMachine stateMachine = (ContainerStateMachine) TestHelper.getStateMachine(dn, omKeyLocationInfo.getPipeline());
SimpleStateMachineStorage storage = (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
Path parentPath = storage.findLatestSnapshot().getFile().getPath();
stateMachine.takeSnapshot();
Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
FileInfo snapshot = storage.findLatestSnapshot().getFile();
Assert.assertNotNull(snapshot);
long containerID = omKeyLocationInfo.getContainerID();
Pipeline pipeline = cluster.getStorageContainerLocationClient().getContainerWithPipeline(containerID).getPipeline();
XceiverClientSpi xceiverClient = xceiverClientManager.acquireClient(pipeline);
ContainerProtos.ContainerCommandRequestProto.Builder request = ContainerProtos.ContainerCommandRequestProto.newBuilder();
request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
request.setCmdType(ContainerProtos.Type.CloseContainer);
request.setContainerID(containerID);
request.setCloseContainer(ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
try {
xceiverClient.sendCommand(request.build());
} catch (IOException e) {
Assert.fail("Exception should not be thrown");
}
Assert.assertTrue(TestHelper.getDatanodeService(omKeyLocationInfo, cluster).getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerState() == ContainerProtos.ContainerDataProto.State.CLOSED);
Assert.assertTrue(stateMachine.isStateMachineHealthy());
try {
stateMachine.takeSnapshot();
} catch (IOException ioe) {
Assert.fail("Exception should not be thrown");
}
FileInfo latestSnapshot = storage.findLatestSnapshot().getFile();
Assert.assertFalse(snapshot.getPath().equals(latestSnapshot.getPath()));
}
use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.
the class TestContainerStateMachineFailures method testApplyTransactionFailure.
@Test
public void testApplyTransactionFailure() throws Exception {
OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName).createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
// First write and flush creates a container in the datanode
key.write("ratis".getBytes(UTF_8));
key.flush();
key.write("ratis".getBytes(UTF_8));
KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
List<OmKeyLocationInfo> locationInfoList = groupOutputStream.getLocationInfoList();
Assert.assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, cluster);
int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails());
ContainerData containerData = dn.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(omKeyLocationInfo.getContainerID()).getContainerData();
Assert.assertTrue(containerData instanceof KeyValueContainerData);
KeyValueContainerData keyValueContainerData = (KeyValueContainerData) containerData;
key.close();
ContainerStateMachine stateMachine = (ContainerStateMachine) TestHelper.getStateMachine(cluster.getHddsDatanodes().get(index), omKeyLocationInfo.getPipeline());
SimpleStateMachineStorage storage = (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
stateMachine.takeSnapshot();
Path parentPath = storage.findLatestSnapshot().getFile().getPath();
// Since the snapshot threshold is set to 1, since there are
// applyTransactions, we should see snapshots
Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
FileInfo snapshot = storage.findLatestSnapshot().getFile();
Assert.assertNotNull(snapshot);
long containerID = omKeyLocationInfo.getContainerID();
// delete the container db file
FileUtil.fullyDelete(new File(keyValueContainerData.getContainerPath()));
Pipeline pipeline = cluster.getStorageContainerLocationClient().getContainerWithPipeline(containerID).getPipeline();
XceiverClientSpi xceiverClient = xceiverClientManager.acquireClient(pipeline);
ContainerProtos.ContainerCommandRequestProto.Builder request = ContainerProtos.ContainerCommandRequestProto.newBuilder();
request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
request.setCmdType(ContainerProtos.Type.CloseContainer);
request.setContainerID(containerID);
request.setCloseContainer(ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
try {
xceiverClient.sendCommand(request.build());
Assert.fail("Expected exception not thrown");
} catch (IOException e) {
// Exception should be thrown
}
// Make sure the container is marked unhealthy
Assert.assertTrue(dn.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerState() == ContainerProtos.ContainerDataProto.State.UNHEALTHY);
try {
// try to take a new snapshot, ideally it should just fail
stateMachine.takeSnapshot();
} catch (IOException ioe) {
Assert.assertTrue(ioe instanceof StateMachineException);
}
if (snapshot.getPath().toFile().exists()) {
// Make sure the latest snapshot is same as the previous one
try {
FileInfo latestSnapshot = storage.findLatestSnapshot().getFile();
Assert.assertTrue(snapshot.getPath().equals(latestSnapshot.getPath()));
} catch (Throwable e) {
Assert.assertFalse(snapshot.getPath().toFile().exists());
}
}
// when remove pipeline, group dir including snapshot will be deleted
LambdaTestUtils.await(5000, 500, () -> (!snapshot.getPath().toFile().exists()));
}
use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.
the class TestHDDSUpgrade method createTestContainers.
/*
* Helper function for container creation.
*/
private void createTestContainers() throws IOException {
XceiverClientManager xceiverClientManager = new XceiverClientManager(conf);
ContainerInfo ci1 = scmContainerManager.allocateContainer(RATIS_THREE, "Owner1");
Pipeline ratisPipeline1 = scmPipelineManager.getPipeline(ci1.getPipelineID());
scmPipelineManager.openPipeline(ratisPipeline1.getId());
XceiverClientSpi client1 = xceiverClientManager.acquireClient(ratisPipeline1);
ContainerProtocolCalls.createContainer(client1, ci1.getContainerID(), null);
xceiverClientManager.releaseClient(client1, false);
}
use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.
the class TestOzoneClientRetriesOnExceptionFlushDelay method testGroupMismatchExceptionHandling.
@Test
public void testGroupMismatchExceptionHandling() throws Exception {
String keyName = getKeyName();
// make sure flush will sync data.
int dataLength = maxFlushSize + chunkSize;
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, dataLength);
// write data more than 1 chunk
byte[] data1 = ContainerTestHelper.getFixedLengthString(keyString, dataLength).getBytes(UTF_8);
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
long containerID = keyOutputStream.getStreamEntries().get(0).getBlockID().getContainerID();
Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
XceiverClientSpi xceiverClient = xceiverClientManager.acquireClient(pipeline);
xceiverClient.sendCommand(ContainerTestHelper.getCreateContainerRequest(containerID, pipeline));
xceiverClientManager.releaseClient(xceiverClient, false);
key.write(data1);
OutputStream stream = keyOutputStream.getStreamEntries().get(0).getOutputStream();
Assert.assertTrue(stream instanceof BlockOutputStream);
BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
TestHelper.waitForPipelineClose(key, cluster, false);
key.flush();
Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream.getIoException()) instanceof GroupMismatchException);
Assert.assertTrue(keyOutputStream.getExcludeList().getPipelineIds().contains(pipeline.getId()));
Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 2);
key.close();
Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 0);
validateData(keyName, data1);
}
Aggregations