use of org.apache.hadoop.hdds.client.BlockID in project ozone by apache.
the class TestFailureHandlingByClient method testContainerExclusionWithClosedContainerException.
@Test
public void testContainerExclusionWithClosedContainerException() throws Exception {
startCluster();
String keyName = UUID.randomUUID().toString();
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, blockSize);
String data = ContainerTestHelper.getFixedLengthString(keyString, chunkSize);
// get the name of a valid container
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
List<BlockOutputStreamEntry> streamEntryList = keyOutputStream.getStreamEntries();
// Assert that 1 block will be preallocated
Assert.assertEquals(1, streamEntryList.size());
key.write(data.getBytes(UTF_8));
key.flush();
long containerId = streamEntryList.get(0).getBlockID().getContainerID();
BlockID blockId = streamEntryList.get(0).getBlockID();
List<Long> containerIdList = new ArrayList<>();
containerIdList.add(containerId);
// below check will assert if the container does not get closed
TestHelper.waitForContainerClose(cluster, containerIdList.toArray(new Long[0]));
// This write will hit ClosedContainerException and this container should
// will be added in the excludelist
key.write(data.getBytes(UTF_8));
key.flush();
Assert.assertTrue(keyOutputStream.getExcludeList().getContainerIds().contains(ContainerID.valueOf(containerId)));
Assert.assertTrue(keyOutputStream.getExcludeList().getDatanodes().isEmpty());
Assert.assertTrue(keyOutputStream.getExcludeList().getPipelineIds().isEmpty());
// The close will just write to the buffer
key.close();
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(RatisReplicationConfig.getInstance(THREE)).setKeyName(keyName).setRefreshPipeline(true).build();
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
// Make sure a new block is written
Assert.assertNotEquals(keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0).getBlockID(), blockId);
Assert.assertEquals(2 * data.getBytes(UTF_8).length, keyInfo.getDataSize());
validateData(keyName, data.concat(data).getBytes(UTF_8));
}
use of org.apache.hadoop.hdds.client.BlockID in project ozone by apache.
the class TestContainerStateMachineIdempotency method testContainerStateMachineIdempotency.
@Test
public void testContainerStateMachineIdempotency() throws Exception {
ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
long containerID = container.getContainerInfo().getContainerID();
Pipeline pipeline = container.getPipeline();
XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline);
try {
// create the container
ContainerProtocolCalls.createContainer(client, containerID, null);
// call create Container again
BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
byte[] data = RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes(UTF_8);
ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(container.getPipeline(), blockID, data.length, null);
client.sendCommand(writeChunkRequest);
// Make the write chunk request again without requesting for overWrite
client.sendCommand(writeChunkRequest);
// Now, explicitly make a putKey request for the block.
ContainerProtos.ContainerCommandRequestProto putKeyRequest = ContainerTestHelper.getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
client.sendCommand(putKeyRequest).getPutBlock();
// send the putBlock again
client.sendCommand(putKeyRequest);
// close container call
ContainerProtocolCalls.closeContainer(client, containerID, null);
ContainerProtocolCalls.closeContainer(client, containerID, null);
} catch (IOException ioe) {
Assert.fail("Container operation failed" + ioe);
}
xceiverClientManager.releaseClient(client, false);
}
use of org.apache.hadoop.hdds.client.BlockID in project ozone by apache.
the class OMRequestTestUtils method addKeyLocationInfo.
/**
* Adds one block to {@code keyInfo} with the provided size and offset.
*/
public static void addKeyLocationInfo(OmKeyInfo keyInfo, long offset, long keyLength) throws IOException {
Pipeline pipeline = Pipeline.newBuilder().setState(Pipeline.PipelineState.OPEN).setId(PipelineID.randomId()).setReplicationConfig(keyInfo.getReplicationConfig()).setNodes(new ArrayList<>()).build();
OmKeyLocationInfo locationInfo = new OmKeyLocationInfo.Builder().setBlockID(new BlockID(100L, 1000L)).setOffset(offset).setLength(keyLength).setPipeline(pipeline).build();
keyInfo.appendNewBlocks(Collections.singletonList(locationInfo), false);
}
use of org.apache.hadoop.hdds.client.BlockID in project ozone by apache.
the class TestKeyManagerUnit method sortDatanodes.
@Test
public void sortDatanodes() throws Exception {
// GIVEN
String client = "anyhost";
int pipelineCount = 3;
int keysPerPipeline = 5;
OmKeyInfo[] keyInfos = new OmKeyInfo[pipelineCount * keysPerPipeline];
List<List<String>> expectedSortDatanodesInvocations = new ArrayList<>();
Map<Pipeline, List<DatanodeDetails>> expectedSortedNodes = new HashMap<>();
int ki = 0;
for (int p = 0; p < pipelineCount; p++) {
final Pipeline pipeline = MockPipeline.createPipeline(3);
final List<String> nodes = pipeline.getNodes().stream().map(DatanodeDetails::getUuidString).collect(toList());
expectedSortDatanodesInvocations.add(nodes);
final List<DatanodeDetails> sortedNodes = pipeline.getNodes().stream().sorted(comparing(DatanodeDetails::getUuidString)).collect(toList());
expectedSortedNodes.put(pipeline, sortedNodes);
when(blockClient.sortDatanodes(nodes, client)).thenReturn(sortedNodes);
for (int i = 1; i <= keysPerPipeline; i++) {
OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder().setBlockID(new BlockID(i, 1L)).setPipeline(pipeline).setOffset(0).setLength(256000).build();
OmKeyInfo keyInfo = new OmKeyInfo.Builder().setOmKeyLocationInfos(Arrays.asList(new OmKeyLocationInfoGroup(0, emptyList()), new OmKeyLocationInfoGroup(1, singletonList(keyLocationInfo)))).build();
keyInfos[ki++] = keyInfo;
}
}
// WHEN
keyManager.sortDatanodes(client, keyInfos);
// verify all key info locations got updated
for (OmKeyInfo keyInfo : keyInfos) {
OmKeyLocationInfoGroup locations = keyInfo.getLatestVersionLocations();
Assert.assertNotNull(locations);
for (OmKeyLocationInfo locationInfo : locations.getLocationList()) {
Pipeline pipeline = locationInfo.getPipeline();
List<DatanodeDetails> expectedOrder = expectedSortedNodes.get(pipeline);
Assert.assertEquals(expectedOrder, pipeline.getNodesInOrder());
}
}
// expect one invocation per pipeline
for (List<String> nodes : expectedSortDatanodesInvocations) {
verify(blockClient).sortDatanodes(nodes, client);
}
}
use of org.apache.hadoop.hdds.client.BlockID in project ozone by apache.
the class ScmBlockLocationTestingClient method deleteKeyBlocks.
@Override
public List<DeleteBlockGroupResult> deleteKeyBlocks(List<BlockGroup> keyBlocksInfoList) throws IOException {
List<DeleteBlockGroupResult> results = new ArrayList<>();
List<DeleteBlockResult> blockResultList = new ArrayList<>();
Result result;
for (BlockGroup keyBlocks : keyBlocksInfoList) {
for (BlockID blockKey : keyBlocks.getBlockIDList()) {
currentCall++;
switch(this.failCallsFrequency) {
case 0:
result = success;
numBlocksDeleted++;
break;
case 1:
result = unknownFailure;
break;
default:
if (currentCall % this.failCallsFrequency == 0) {
result = unknownFailure;
} else {
result = success;
numBlocksDeleted++;
}
}
blockResultList.add(new DeleteBlockResult(blockKey, result));
}
results.add(new DeleteBlockGroupResult(keyBlocks.getGroupID(), blockResultList));
}
return results;
}
Aggregations