Search in sources :

Example 1 with DatanodeDetails

use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.

the class TestDatanodeUpgradeToScmHA method startPreFinalizedDatanode.

// / CLUSTER OPERATIONS ///
/**
 * Starts the datanode with the first layout version, and calls the version
 * endpoint task to get cluster ID and SCM ID.
 *
 * The daemon for the datanode state machine is not started in this test.
 * This greatly speeds up execution time.
 * It means we do not have heartbeat functionality or pre-finalize
 * upgrade actions, but neither of those things are needed for these tests.
 */
public void startPreFinalizedDatanode() throws Exception {
    // Set layout version.
    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempFolder.getRoot().getAbsolutePath());
    DatanodeLayoutStorage layoutStorage = new DatanodeLayoutStorage(conf, UUID.randomUUID().toString(), HDDSLayoutFeature.INITIAL_VERSION.layoutVersion());
    layoutStorage.initialize();
    // Build and start the datanode.
    DatanodeDetails dd = ContainerTestUtils.createDatanodeDetails();
    DatanodeStateMachine newDsm = new DatanodeStateMachine(dd, conf, null, null, null);
    int actualMlv = newDsm.getLayoutVersionManager().getMetadataLayoutVersion();
    Assert.assertEquals(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion(), actualMlv);
    dsm = newDsm;
    callVersionEndpointTask();
}
Also used : DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) DatanodeLayoutStorage(org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine)

Example 2 with DatanodeDetails

use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.

the class TestDatanodeUpgradeToScmHA method restartDatanode.

public void restartDatanode(int expectedMlv) throws Exception {
    // Stop existing datanode.
    DatanodeDetails dd = dsm.getDatanodeDetails();
    dsm.close();
    // Start new datanode with the same configuration.
    dsm = new DatanodeStateMachine(dd, conf, null, null, null);
    int mlv = dsm.getLayoutVersionManager().getMetadataLayoutVersion();
    Assert.assertEquals(expectedMlv, mlv);
    callVersionEndpointTask();
}
Also used : DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine)

Example 3 with DatanodeDetails

use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.

the class TestSimpleContainerDownloader method testGetContainerDataFromReplicasHappyPath.

@Test
public void testGetContainerDataFromReplicasHappyPath() throws Exception {
    // GIVEN
    List<DatanodeDetails> datanodes = createDatanodes();
    SimpleContainerDownloader downloader = createDownloaderWithPredefinedFailures(true);
    // WHEN
    final Path result = downloader.getContainerDataFromReplicas(1L, datanodes);
    // THEN
    Assert.assertEquals(datanodes.get(0).getUuidString(), result.toString());
}
Also used : Path(java.nio.file.Path) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) MockDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails) Test(org.junit.Test)

Example 4 with DatanodeDetails

use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.

the class RpcClient method getKeysEveryReplicas.

@Override
public Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> getKeysEveryReplicas(String volumeName, String bucketName, String keyName) throws IOException {
    Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> result = new LinkedHashMap<>();
    verifyVolumeName(volumeName);
    verifyBucketName(bucketName);
    Preconditions.checkNotNull(keyName);
    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true).setSortDatanodesInPipeline(topologyAwareReadEnabled).build();
    OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
    List<OmKeyLocationInfo> keyLocationInfos = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
    for (OmKeyLocationInfo keyLocationInfo : keyLocationInfos) {
        Map<DatanodeDetails, OzoneInputStream> blocks = new HashMap<>();
        Pipeline pipelineBefore = keyLocationInfo.getPipeline();
        List<DatanodeDetails> datanodes = pipelineBefore.getNodes();
        for (DatanodeDetails dn : datanodes) {
            List<DatanodeDetails> nodes = new ArrayList<>();
            nodes.add(dn);
            Pipeline pipeline = new Pipeline.Builder(pipelineBefore).setNodes(nodes).setId(PipelineID.randomId()).build();
            keyLocationInfo.setPipeline(pipeline);
            List<OmKeyLocationInfo> keyLocationInfoList = new ArrayList<>();
            keyLocationInfoList.add(keyLocationInfo);
            OmKeyLocationInfoGroup keyLocationInfoGroup = new OmKeyLocationInfoGroup(0, keyLocationInfoList);
            List<OmKeyLocationInfoGroup> keyLocationInfoGroups = new ArrayList<>();
            keyLocationInfoGroups.add(keyLocationInfoGroup);
            keyInfo.setKeyLocationVersions(keyLocationInfoGroups);
            OzoneInputStream is = createInputStream(keyInfo, Function.identity());
            blocks.put(dn, is);
        }
        result.put(keyLocationInfo, blocks);
    }
    return result;
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) CacheBuilder(com.google.common.cache.CacheBuilder) ArrayList(java.util.ArrayList) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) LinkedHashMap(java.util.LinkedHashMap) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) OmKeyLocationInfoGroup(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap)

Example 5 with DatanodeDetails

use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.

the class TestReplicatedFileChecksumHelper method testOneBlock.

@Test
public void testOneBlock() throws IOException {
    // test the file checksum of a file with one block.
    OzoneConfiguration conf = new OzoneConfiguration();
    RpcClient mockRpcClient = Mockito.mock(RpcClient.class);
    List<DatanodeDetails> dns = Arrays.asList(DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()).build());
    Pipeline pipeline;
    pipeline = Pipeline.newBuilder().setId(PipelineID.randomId()).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)).setState(Pipeline.PipelineState.CLOSED).setNodes(dns).build();
    XceiverClientGrpc xceiverClientGrpc = new XceiverClientGrpc(pipeline, conf) {

        @Override
        public XceiverClientReply sendCommandAsync(ContainerProtos.ContainerCommandRequestProto request, DatanodeDetails dn) {
            return buildValidResponse();
        }
    };
    XceiverClientFactory factory = Mockito.mock(XceiverClientFactory.class);
    when(factory.acquireClientForReadData(ArgumentMatchers.any())).thenReturn(xceiverClientGrpc);
    when(mockRpcClient.getXceiverClientManager()).thenReturn(factory);
    OzoneManagerProtocol om = Mockito.mock(OzoneManagerProtocol.class);
    when(mockRpcClient.getOzoneManagerClient()).thenReturn(om);
    BlockID blockID = new BlockID(1, 1);
    OmKeyLocationInfo omKeyLocationInfo = new OmKeyLocationInfo.Builder().setPipeline(pipeline).setBlockID(blockID).build();
    List<OmKeyLocationInfo> omKeyLocationInfoList = Arrays.asList(omKeyLocationInfo);
    OmKeyInfo omKeyInfo = new OmKeyInfo.Builder().setVolumeName(null).setBucketName(null).setKeyName(null).setOmKeyLocationInfos(Collections.singletonList(new OmKeyLocationInfoGroup(0, omKeyLocationInfoList))).setCreationTime(Time.now()).setModificationTime(Time.now()).setDataSize(0).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).setFileEncryptionInfo(null).setAcls(null).build();
    when(om.lookupKey(ArgumentMatchers.any())).thenReturn(omKeyInfo);
    OzoneVolume mockVolume = Mockito.mock(OzoneVolume.class);
    when(mockVolume.getName()).thenReturn("vol1");
    OzoneBucket bucket = Mockito.mock(OzoneBucket.class);
    when(bucket.getName()).thenReturn("bucket1");
    ReplicatedFileChecksumHelper helper = new ReplicatedFileChecksumHelper(mockVolume, bucket, "dummy", 10, mockRpcClient);
    helper.compute();
    FileChecksum fileChecksum = helper.getFileChecksum();
    assertTrue(fileChecksum instanceof MD5MD5CRC32GzipFileChecksum);
    assertEquals(1, helper.getKeyLocationInfoList().size());
}
Also used : OzoneManagerProtocol(org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) MD5MD5CRC32GzipFileChecksum(org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum) MockXceiverClientFactory(org.apache.hadoop.ozone.client.MockXceiverClientFactory) XceiverClientFactory(org.apache.hadoop.hdds.scm.XceiverClientFactory) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) MD5MD5CRC32GzipFileChecksum(org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum) FileChecksum(org.apache.hadoop.fs.FileChecksum) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OmKeyLocationInfoGroup(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) XceiverClientGrpc(org.apache.hadoop.hdds.scm.XceiverClientGrpc) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) BlockID(org.apache.hadoop.hdds.client.BlockID) RpcClient(org.apache.hadoop.ozone.client.rpc.RpcClient) Test(org.junit.Test)

Aggregations

DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)358 Test (org.junit.Test)203 MockDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails)108 ArrayList (java.util.ArrayList)84 MockDatanodeDetails.randomDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails)77 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)65 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)62 IOException (java.io.IOException)48 UUID (java.util.UUID)43 ContainerInfo (org.apache.hadoop.hdds.scm.container.ContainerInfo)43 MockDatanodeDetails.createDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.createDatanodeDetails)38 HddsProtos (org.apache.hadoop.hdds.protocol.proto.HddsProtos)32 Map (java.util.Map)27 HashMap (java.util.HashMap)26 ContainerID (org.apache.hadoop.hdds.scm.container.ContainerID)25 List (java.util.List)24 File (java.io.File)23 NodeManager (org.apache.hadoop.hdds.scm.node.NodeManager)21 NodeNotFoundException (org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException)21 NodeStatus (org.apache.hadoop.hdds.scm.node.NodeStatus)20