use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.
the class TestDatanodeUpgradeToScmHA method startPreFinalizedDatanode.
// / CLUSTER OPERATIONS ///
/**
* Starts the datanode with the first layout version, and calls the version
* endpoint task to get cluster ID and SCM ID.
*
* The daemon for the datanode state machine is not started in this test.
* This greatly speeds up execution time.
* It means we do not have heartbeat functionality or pre-finalize
* upgrade actions, but neither of those things are needed for these tests.
*/
public void startPreFinalizedDatanode() throws Exception {
// Set layout version.
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempFolder.getRoot().getAbsolutePath());
DatanodeLayoutStorage layoutStorage = new DatanodeLayoutStorage(conf, UUID.randomUUID().toString(), HDDSLayoutFeature.INITIAL_VERSION.layoutVersion());
layoutStorage.initialize();
// Build and start the datanode.
DatanodeDetails dd = ContainerTestUtils.createDatanodeDetails();
DatanodeStateMachine newDsm = new DatanodeStateMachine(dd, conf, null, null, null);
int actualMlv = newDsm.getLayoutVersionManager().getMetadataLayoutVersion();
Assert.assertEquals(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion(), actualMlv);
dsm = newDsm;
callVersionEndpointTask();
}
use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.
the class TestDatanodeUpgradeToScmHA method restartDatanode.
public void restartDatanode(int expectedMlv) throws Exception {
// Stop existing datanode.
DatanodeDetails dd = dsm.getDatanodeDetails();
dsm.close();
// Start new datanode with the same configuration.
dsm = new DatanodeStateMachine(dd, conf, null, null, null);
int mlv = dsm.getLayoutVersionManager().getMetadataLayoutVersion();
Assert.assertEquals(expectedMlv, mlv);
callVersionEndpointTask();
}
use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.
the class TestSimpleContainerDownloader method testGetContainerDataFromReplicasHappyPath.
@Test
public void testGetContainerDataFromReplicasHappyPath() throws Exception {
// GIVEN
List<DatanodeDetails> datanodes = createDatanodes();
SimpleContainerDownloader downloader = createDownloaderWithPredefinedFailures(true);
// WHEN
final Path result = downloader.getContainerDataFromReplicas(1L, datanodes);
// THEN
Assert.assertEquals(datanodes.get(0).getUuidString(), result.toString());
}
use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.
the class RpcClient method getKeysEveryReplicas.
@Override
public Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> getKeysEveryReplicas(String volumeName, String bucketName, String keyName) throws IOException {
Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> result = new LinkedHashMap<>();
verifyVolumeName(volumeName);
verifyBucketName(bucketName);
Preconditions.checkNotNull(keyName);
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true).setSortDatanodesInPipeline(topologyAwareReadEnabled).build();
OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
List<OmKeyLocationInfo> keyLocationInfos = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
for (OmKeyLocationInfo keyLocationInfo : keyLocationInfos) {
Map<DatanodeDetails, OzoneInputStream> blocks = new HashMap<>();
Pipeline pipelineBefore = keyLocationInfo.getPipeline();
List<DatanodeDetails> datanodes = pipelineBefore.getNodes();
for (DatanodeDetails dn : datanodes) {
List<DatanodeDetails> nodes = new ArrayList<>();
nodes.add(dn);
Pipeline pipeline = new Pipeline.Builder(pipelineBefore).setNodes(nodes).setId(PipelineID.randomId()).build();
keyLocationInfo.setPipeline(pipeline);
List<OmKeyLocationInfo> keyLocationInfoList = new ArrayList<>();
keyLocationInfoList.add(keyLocationInfo);
OmKeyLocationInfoGroup keyLocationInfoGroup = new OmKeyLocationInfoGroup(0, keyLocationInfoList);
List<OmKeyLocationInfoGroup> keyLocationInfoGroups = new ArrayList<>();
keyLocationInfoGroups.add(keyLocationInfoGroup);
keyInfo.setKeyLocationVersions(keyLocationInfoGroups);
OzoneInputStream is = createInputStream(keyInfo, Function.identity());
blocks.put(dn, is);
}
result.put(keyLocationInfo, blocks);
}
return result;
}
use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.
the class TestReplicatedFileChecksumHelper method testOneBlock.
@Test
public void testOneBlock() throws IOException {
// test the file checksum of a file with one block.
OzoneConfiguration conf = new OzoneConfiguration();
RpcClient mockRpcClient = Mockito.mock(RpcClient.class);
List<DatanodeDetails> dns = Arrays.asList(DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()).build());
Pipeline pipeline;
pipeline = Pipeline.newBuilder().setId(PipelineID.randomId()).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)).setState(Pipeline.PipelineState.CLOSED).setNodes(dns).build();
XceiverClientGrpc xceiverClientGrpc = new XceiverClientGrpc(pipeline, conf) {
@Override
public XceiverClientReply sendCommandAsync(ContainerProtos.ContainerCommandRequestProto request, DatanodeDetails dn) {
return buildValidResponse();
}
};
XceiverClientFactory factory = Mockito.mock(XceiverClientFactory.class);
when(factory.acquireClientForReadData(ArgumentMatchers.any())).thenReturn(xceiverClientGrpc);
when(mockRpcClient.getXceiverClientManager()).thenReturn(factory);
OzoneManagerProtocol om = Mockito.mock(OzoneManagerProtocol.class);
when(mockRpcClient.getOzoneManagerClient()).thenReturn(om);
BlockID blockID = new BlockID(1, 1);
OmKeyLocationInfo omKeyLocationInfo = new OmKeyLocationInfo.Builder().setPipeline(pipeline).setBlockID(blockID).build();
List<OmKeyLocationInfo> omKeyLocationInfoList = Arrays.asList(omKeyLocationInfo);
OmKeyInfo omKeyInfo = new OmKeyInfo.Builder().setVolumeName(null).setBucketName(null).setKeyName(null).setOmKeyLocationInfos(Collections.singletonList(new OmKeyLocationInfoGroup(0, omKeyLocationInfoList))).setCreationTime(Time.now()).setModificationTime(Time.now()).setDataSize(0).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).setFileEncryptionInfo(null).setAcls(null).build();
when(om.lookupKey(ArgumentMatchers.any())).thenReturn(omKeyInfo);
OzoneVolume mockVolume = Mockito.mock(OzoneVolume.class);
when(mockVolume.getName()).thenReturn("vol1");
OzoneBucket bucket = Mockito.mock(OzoneBucket.class);
when(bucket.getName()).thenReturn("bucket1");
ReplicatedFileChecksumHelper helper = new ReplicatedFileChecksumHelper(mockVolume, bucket, "dummy", 10, mockRpcClient);
helper.compute();
FileChecksum fileChecksum = helper.getFileChecksum();
assertTrue(fileChecksum instanceof MD5MD5CRC32GzipFileChecksum);
assertEquals(1, helper.getKeyLocationInfoList().size());
}
Aggregations