use of org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo in project ozone by apache.
the class TestKeyManagerUnit method testLookupFileWithDnFailure.
@Test
public void testLookupFileWithDnFailure() throws IOException {
final DatanodeDetails dnOne = MockDatanodeDetails.randomDatanodeDetails();
final DatanodeDetails dnTwo = MockDatanodeDetails.randomDatanodeDetails();
final DatanodeDetails dnThree = MockDatanodeDetails.randomDatanodeDetails();
final DatanodeDetails dnFour = MockDatanodeDetails.randomDatanodeDetails();
final DatanodeDetails dnFive = MockDatanodeDetails.randomDatanodeDetails();
final DatanodeDetails dnSix = MockDatanodeDetails.randomDatanodeDetails();
final Pipeline pipelineOne = Pipeline.newBuilder().setId(PipelineID.randomId()).setReplicationConfig(RatisReplicationConfig.getInstance(ReplicationFactor.THREE)).setState(Pipeline.PipelineState.OPEN).setLeaderId(dnOne.getUuid()).setNodes(Arrays.asList(dnOne, dnTwo, dnThree)).build();
final Pipeline pipelineTwo = Pipeline.newBuilder().setId(PipelineID.randomId()).setReplicationConfig(RatisReplicationConfig.getInstance(ReplicationFactor.THREE)).setState(Pipeline.PipelineState.OPEN).setLeaderId(dnFour.getUuid()).setNodes(Arrays.asList(dnFour, dnFive, dnSix)).build();
List<Long> containerIDs = new ArrayList<>();
containerIDs.add(1L);
List<ContainerWithPipeline> cps = new ArrayList<>();
ContainerInfo ci = Mockito.mock(ContainerInfo.class);
when(ci.getContainerID()).thenReturn(1L);
cps.add(new ContainerWithPipeline(ci, pipelineTwo));
when(containerClient.getContainerWithPipelineBatch(containerIDs)).thenReturn(cps);
final OmVolumeArgs volumeArgs = OmVolumeArgs.newBuilder().setVolume("volumeOne").setAdminName("admin").setOwnerName("admin").build();
OMRequestTestUtils.addVolumeToOM(metadataManager, volumeArgs);
final OmBucketInfo bucketInfo = OmBucketInfo.newBuilder().setVolumeName("volumeOne").setBucketName("bucketOne").build();
OMRequestTestUtils.addBucketToOM(metadataManager, bucketInfo);
final OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder().setBlockID(new BlockID(1L, 1L)).setPipeline(pipelineOne).setOffset(0).setLength(256000).build();
final OmKeyInfo keyInfo = new OmKeyInfo.Builder().setVolumeName("volumeOne").setBucketName("bucketOne").setKeyName("keyOne").setOmKeyLocationInfos(singletonList(new OmKeyLocationInfoGroup(0, singletonList(keyLocationInfo)))).setCreationTime(Time.now()).setModificationTime(Time.now()).setDataSize(256000).setReplicationConfig(RatisReplicationConfig.getInstance(ReplicationFactor.THREE)).setAcls(Collections.emptyList()).build();
OMRequestTestUtils.addKeyToOM(metadataManager, keyInfo);
final OmKeyArgs.Builder keyArgs = new OmKeyArgs.Builder().setVolumeName("volumeOne").setBucketName("bucketOne").setKeyName("keyOne");
final OmKeyInfo newKeyInfo = keyManager.lookupFile(keyArgs.build(), "test");
final OmKeyLocationInfo newBlockLocation = newKeyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0);
Assert.assertEquals(1L, newBlockLocation.getContainerID());
Assert.assertEquals(1L, newBlockLocation.getBlockID().getLocalID());
Assert.assertEquals(pipelineTwo.getId(), newBlockLocation.getPipeline().getId());
Assert.assertTrue(newBlockLocation.getPipeline().getNodes().contains(dnFour));
Assert.assertTrue(newBlockLocation.getPipeline().getNodes().contains(dnFive));
Assert.assertTrue(newBlockLocation.getPipeline().getNodes().contains(dnSix));
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo in project ozone by apache.
the class ContainerMapper method parseOmDB.
/**
* Generates Container Id to Blocks and BlockDetails mapping.
* @param configuration @{@link OzoneConfiguration}
* @return Map<Long, List<Map<Long, @BlockDetails>>>
* Map of ContainerId -> (Block, Block info)
* @throws IOException
*/
public Map<Long, List<Map<Long, BlockIdDetails>>> parseOmDB(OzoneConfiguration configuration) throws IOException {
String path = configuration.get(OZONE_OM_DB_DIRS);
if (path == null || path.isEmpty()) {
throw new IOException(OZONE_OM_DB_DIRS + "should be set ");
} else {
Table keyTable = getMetaTable(configuration);
Map<Long, List<Map<Long, BlockIdDetails>>> dataMap = new HashMap<>();
if (keyTable != null) {
try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> keyValueTableIterator = keyTable.iterator()) {
while (keyValueTableIterator.hasNext()) {
Table.KeyValue<String, OmKeyInfo> keyValue = keyValueTableIterator.next();
OmKeyInfo omKeyInfo = keyValue.getValue();
byte[] value = omKeyInfo.getProtobuf(true, CURRENT_VERSION).toByteArray();
OmKeyInfo keyInfo = OmKeyInfo.getFromProtobuf(OzoneManagerProtocolProtos.KeyInfo.parseFrom(value));
for (OmKeyLocationInfoGroup keyLocationInfoGroup : keyInfo.getKeyLocationVersions()) {
List<OmKeyLocationInfo> keyLocationInfo = keyLocationInfoGroup.getLocationList();
for (OmKeyLocationInfo keyLocation : keyLocationInfo) {
BlockIdDetails blockIdDetails = new BlockIdDetails();
Map<Long, BlockIdDetails> innerMap = new HashMap<>();
long containerID = keyLocation.getBlockID().getContainerID();
long blockID = keyLocation.getBlockID().getLocalID();
blockIdDetails.setBucketName(keyInfo.getBucketName());
blockIdDetails.setBlockVol(keyInfo.getVolumeName());
blockIdDetails.setKeyName(keyInfo.getKeyName());
List<Map<Long, BlockIdDetails>> innerList = new ArrayList<>();
innerMap.put(blockID, blockIdDetails);
if (dataMap.containsKey(containerID)) {
innerList = dataMap.get(containerID);
}
innerList.add(innerMap);
dataMap.put(containerID, innerList);
}
}
}
}
}
return dataMap;
}
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo in project ozone by apache.
the class TestOmKeyInfoCodec method getKeyInfo.
private OmKeyInfo getKeyInfo(int chunkNum) {
List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
Pipeline pipeline = HddsTestUtils.getRandomPipeline();
for (int i = 0; i < chunkNum; i++) {
BlockID blockID = new BlockID(i, i);
OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder().setBlockID(blockID).setPipeline(pipeline).build();
omKeyLocationInfoList.add(keyLocationInfo);
}
OmKeyLocationInfoGroup omKeyLocationInfoGroup = new OmKeyLocationInfoGroup(0, omKeyLocationInfoList);
return new OmKeyInfo.Builder().setCreationTime(Time.now()).setModificationTime(Time.now()).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)).setVolumeName(VOLUME).setBucketName(BUCKET).setKeyName(KEYNAME).setObjectID(Time.now()).setUpdateID(Time.now()).setDataSize(100).setOmKeyLocationInfos(Collections.singletonList(omKeyLocationInfoGroup)).build();
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo in project ozone by apache.
the class TestContainerReplicationEndToEnd method testContainerReplication.
/**
* The test simulates end to end container replication.
*/
@Test
public void testContainerReplication() throws Exception {
String keyName = "testContainerReplication";
OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName).createKey(keyName, 0, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>());
byte[] testData = "ratis".getBytes(UTF_8);
// First write and flush creates a container in the datanode
key.write(testData);
key.flush();
KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
List<OmKeyLocationInfo> locationInfoList = groupOutputStream.getLocationInfoList();
Assert.assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
long containerID = omKeyLocationInfo.getContainerID();
PipelineID pipelineID = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID)).getPipelineID();
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(pipelineID);
key.close();
HddsProtos.LifeCycleState containerState = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID)).getState();
LoggerFactory.getLogger(TestContainerReplicationEndToEnd.class).info("Current Container State is {}", containerState);
if ((containerState != HddsProtos.LifeCycleState.CLOSING) && (containerState != HddsProtos.LifeCycleState.CLOSED)) {
cluster.getStorageContainerManager().getContainerManager().updateContainerState(ContainerID.valueOf(containerID), HddsProtos.LifeCycleEvent.FINALIZE);
}
// wait for container to move to OPEN state in SCM
Thread.sleep(2 * containerReportInterval);
DatanodeDetails oldReplicaNode = pipeline.getFirstNode();
// now move the container to the closed on the datanode.
XceiverClientSpi xceiverClient = xceiverClientManager.acquireClient(pipeline);
ContainerProtos.ContainerCommandRequestProto.Builder request = ContainerProtos.ContainerCommandRequestProto.newBuilder();
request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
request.setCmdType(ContainerProtos.Type.CloseContainer);
request.setContainerID(containerID);
request.setCloseContainer(ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
xceiverClient.sendCommand(request.build());
// wait for container to move to closed state in SCM
Thread.sleep(2 * containerReportInterval);
Assert.assertTrue(cluster.getStorageContainerManager().getContainerInfo(containerID).getState() == HddsProtos.LifeCycleState.CLOSED);
// shutdown the replica node
cluster.shutdownHddsDatanode(oldReplicaNode);
// now the container is under replicated and will be moved to a different dn
HddsDatanodeService dnService = null;
for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
Predicate<DatanodeDetails> p = i -> i.getUuid().equals(dn.getDatanodeDetails().getUuid());
if (!pipeline.getNodes().stream().anyMatch(p)) {
dnService = dn;
}
}
Assert.assertNotNull(dnService);
final HddsDatanodeService newReplicaNode = dnService;
// wait for the container to get replicated
GenericTestUtils.waitFor(() -> {
return newReplicaNode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID) != null;
}, 500, 100000);
Assert.assertTrue(newReplicaNode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerData().getBlockCommitSequenceId() > 0);
// wait for SCM to update the replica Map
Thread.sleep(5 * containerReportInterval);
// the key again
for (DatanodeDetails dn : pipeline.getNodes()) {
cluster.shutdownHddsDatanode(dn);
}
// This will try to read the data from the dn to which the container got
// replicated after the container got closed.
TestHelper.validateData(keyName, testData, objectStore, volumeName, bucketName);
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo in project ozone by apache.
the class TestDiscardPreallocatedBlocks method testDiscardPreallocatedBlocks.
@Test
public void testDiscardPreallocatedBlocks() throws Exception {
String keyName = getKeyName();
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 2 * blockSize);
KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
// With the initial size provided, it should have pre allocated 2 blocks
Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
long containerID1 = keyOutputStream.getStreamEntries().get(0).getBlockID().getContainerID();
long containerID2 = keyOutputStream.getStreamEntries().get(1).getBlockID().getContainerID();
Assert.assertEquals(containerID1, containerID2);
String dataString = ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
byte[] data = dataString.getBytes(UTF_8);
key.write(data);
List<OmKeyLocationInfo> locationInfos = new ArrayList<>(keyOutputStream.getLocationInfoList());
List<BlockOutputStreamEntry> locationStreamInfos = new ArrayList<>(keyOutputStream.getStreamEntries());
long containerID = locationInfos.get(0).getContainerID();
ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
List<DatanodeDetails> datanodes = pipeline.getNodes();
Assert.assertEquals(3, datanodes.size());
waitForContainerClose(key);
dataString = ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
data = dataString.getBytes(UTF_8);
key.write(data);
Assert.assertEquals(3, keyOutputStream.getStreamEntries().size());
// the 1st block got written. Now all the containers are closed, so the 2nd
// pre allocated block will be removed from the list and new block should
// have been allocated
Assert.assertTrue(keyOutputStream.getLocationInfoList().get(0).getBlockID().equals(locationInfos.get(0).getBlockID()));
Assert.assertFalse(locationStreamInfos.get(1).getBlockID().equals(keyOutputStream.getLocationInfoList().get(1).getBlockID()));
key.close();
}
Aggregations