Search in sources :

Example 6 with DatanodeDetails

use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.

the class TestXceiverClientGrpc method testRandomFirstNodeIsCommandTarget.

@Test(timeout = 5000)
public void testRandomFirstNodeIsCommandTarget() throws IOException {
    final ArrayList<DatanodeDetails> allDNs = new ArrayList<>(dns);
    // is not happening.
    while (allDNs.size() > 0) {
        XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf) {

            @Override
            public XceiverClientReply sendCommandAsync(ContainerProtos.ContainerCommandRequestProto request, DatanodeDetails dn) {
                allDNs.remove(dn);
                return buildValidResponse();
            }
        };
        invokeXceiverClientGetBlock(client);
    }
}
Also used : DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) MockDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails) XceiverClientGrpc(org.apache.hadoop.hdds.scm.XceiverClientGrpc) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Example 7 with DatanodeDetails

use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.

the class TestXceiverClientGrpc method testFirstNodeIsCorrectWithTopologyForCommandTarget.

@Test
public void testFirstNodeIsCorrectWithTopologyForCommandTarget() throws IOException {
    final Set<DatanodeDetails> seenDNs = new HashSet<>();
    conf.setBoolean(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true);
    // only a single DN is ever seen after 100 calls.
    for (int i = 0; i < 100; i++) {
        XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf) {

            @Override
            public XceiverClientReply sendCommandAsync(ContainerProtos.ContainerCommandRequestProto request, DatanodeDetails dn) {
                seenDNs.add(dn);
                return buildValidResponse();
            }
        };
        invokeXceiverClientGetBlock(client);
    }
    Assert.assertEquals(1, seenDNs.size());
}
Also used : DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) MockDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails) XceiverClientGrpc(org.apache.hadoop.hdds.scm.XceiverClientGrpc) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 8 with DatanodeDetails

use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.

the class TestDecommissionAndMaintenance method testContainerIsReplicatedWhenAllNodesGotoMaintenance.

@Test
public // return, the excess replicas should be removed.
void testContainerIsReplicatedWhenAllNodesGotoMaintenance() throws Exception {
    // Generate some data on the empty cluster to create some containers
    generateData(20, "key", ReplicationFactor.THREE, ReplicationType.RATIS);
    // Locate any container and find its open pipeline
    final ContainerInfo container = waitForAndReturnContainer();
    Set<ContainerReplica> replicas = getContainerReplicas(container);
    List<DatanodeDetails> forMaintenance = new ArrayList<>();
    replicas.forEach(r -> forMaintenance.add(r.getDatanodeDetails()));
    scmClient.startMaintenanceNodes(forMaintenance.stream().map(d -> getDNHostAndPort(d)).collect(Collectors.toList()), 0);
    // Ensure all 3 DNs go to maintenance
    for (DatanodeDetails dn : forMaintenance) {
        waitForDnToReachPersistedOpState(dn, IN_MAINTENANCE);
    }
    // There should now be 5-6 replicas of the container we are tracking
    Set<ContainerReplica> newReplicas = cm.getContainerReplicas(container.containerID());
    assertTrue(newReplicas.size() >= 5);
    scmClient.recommissionNodes(forMaintenance.stream().map(d -> getDNHostAndPort(d)).collect(Collectors.toList()));
    // Ensure all 3 DNs go to maintenance
    for (DatanodeDetails dn : forMaintenance) {
        waitForDnToReachOpState(dn, IN_SERVICE);
    }
    waitForContainerReplicas(container, 3);
}
Also used : ContainerReplica(org.apache.hadoop.hdds.scm.container.ContainerReplica) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Example 9 with DatanodeDetails

use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.

the class TestDecommissionAndMaintenance method testNodeWithOpenPipelineCanBeDecommissionedAndRecommissioned.

@Test
public // be recommissioned.
void testNodeWithOpenPipelineCanBeDecommissionedAndRecommissioned() throws Exception {
    // Generate some data on the empty cluster to create some containers
    generateData(20, "key", ReplicationFactor.THREE, ReplicationType.RATIS);
    // Locate any container and find its open pipeline
    final ContainerInfo container = waitForAndReturnContainer();
    Pipeline pipeline = pm.getPipeline(container.getPipelineID());
    assertEquals(Pipeline.PipelineState.OPEN, pipeline.getPipelineState());
    Set<ContainerReplica> replicas = getContainerReplicas(container);
    final DatanodeDetails toDecommission = getOneDNHostingReplica(replicas);
    scmClient.decommissionNodes(Arrays.asList(getDNHostAndPort(toDecommission)));
    waitForDnToReachOpState(toDecommission, DECOMMISSIONED);
    // Ensure one node transitioned to DECOMMISSIONING
    List<DatanodeDetails> decomNodes = nm.getNodes(DECOMMISSIONED, HEALTHY);
    assertEquals(1, decomNodes.size());
    // Should now be 4 replicas online as the DN is still alive but
    // in the DECOMMISSIONED state.
    waitForContainerReplicas(container, 4);
    // Stop the decommissioned DN
    int dnIndex = cluster.getHddsDatanodeIndex(toDecommission);
    cluster.shutdownHddsDatanode(toDecommission);
    waitForDnToReachHealthState(toDecommission, DEAD);
    // Now the decommissioned node is dead, we should have
    // 3 replicas for the tracked container.
    waitForContainerReplicas(container, 3);
    cluster.restartHddsDatanode(dnIndex, true);
    scmClient.recommissionNodes(Arrays.asList(getDNHostAndPort(toDecommission)));
    waitForDnToReachOpState(toDecommission, IN_SERVICE);
    waitForDnToReachPersistedOpState(toDecommission, IN_SERVICE);
}
Also used : ContainerReplica(org.apache.hadoop.hdds.scm.container.ContainerReplica) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Test(org.junit.Test)

Example 10 with DatanodeDetails

use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.

the class ChunkKeyHandler method execute.

@Override
protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException {
    containerOperationClient = new ContainerOperationClient(createOzoneConfiguration());
    xceiverClientManager = containerOperationClient.getXceiverClientManager();
    ozoneManagerClient = client.getObjectStore().getClientProxy().getOzoneManagerClient();
    address.ensureKeyAddress();
    JsonElement element;
    JsonObject result = new JsonObject();
    String volumeName = address.getVolumeName();
    String bucketName = address.getBucketName();
    String keyName = address.getKeyName();
    List<ContainerProtos.ChunkInfo> tempchunks = null;
    List<ChunkDetails> chunkDetailsList = new ArrayList<ChunkDetails>();
    HashSet<String> chunkPaths = new HashSet<>();
    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true).build();
    OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
    // querying  the keyLocations.The OM is queried to get containerID and
    // localID pertaining to a given key
    List<OmKeyLocationInfo> locationInfos = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
    // for zero-sized key
    if (locationInfos.isEmpty()) {
        System.out.println("No Key Locations Found");
        return;
    }
    ContainerLayoutVersion containerLayoutVersion = ContainerLayoutVersion.getConfiguredVersion(getConf());
    JsonArray responseArrayList = new JsonArray();
    for (OmKeyLocationInfo keyLocation : locationInfos) {
        ContainerChunkInfo containerChunkInfoVerbose = new ContainerChunkInfo();
        ContainerChunkInfo containerChunkInfo = new ContainerChunkInfo();
        long containerId = keyLocation.getContainerID();
        chunkPaths.clear();
        Pipeline pipeline = keyLocation.getPipeline();
        if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) {
            pipeline = Pipeline.newBuilder(pipeline).setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)).build();
        }
        xceiverClient = xceiverClientManager.acquireClientForReadData(pipeline);
        // Datanode is queried to get chunk information.Thus querying the
        // OM,SCM and datanode helps us get chunk location information
        ContainerProtos.DatanodeBlockID datanodeBlockID = keyLocation.getBlockID().getDatanodeBlockIDProtobuf();
        // doing a getBlock on all nodes
        HashMap<DatanodeDetails, ContainerProtos.GetBlockResponseProto> responses = null;
        try {
            responses = ContainerProtocolCalls.getBlockFromAllNodes(xceiverClient, datanodeBlockID, keyLocation.getToken());
        } catch (InterruptedException e) {
            LOG.error("Execution interrupted due to " + e);
            Thread.currentThread().interrupt();
        }
        JsonArray responseFromAllNodes = new JsonArray();
        for (Map.Entry<DatanodeDetails, ContainerProtos.GetBlockResponseProto> entry : responses.entrySet()) {
            JsonObject jsonObj = new JsonObject();
            if (entry.getValue() == null) {
                LOG.error("Cant execute getBlock on this node");
                continue;
            }
            tempchunks = entry.getValue().getBlockData().getChunksList();
            ContainerProtos.ContainerDataProto containerData = containerOperationClient.readContainer(keyLocation.getContainerID(), keyLocation.getPipeline());
            for (ContainerProtos.ChunkInfo chunkInfo : tempchunks) {
                String fileName = containerLayoutVersion.getChunkFile(new File(getChunkLocationPath(containerData.getContainerPath())), keyLocation.getBlockID(), ChunkInfo.getFromProtoBuf(chunkInfo)).toString();
                chunkPaths.add(fileName);
                ChunkDetails chunkDetails = new ChunkDetails();
                chunkDetails.setChunkName(fileName);
                chunkDetails.setChunkOffset(chunkInfo.getOffset());
                chunkDetailsList.add(chunkDetails);
            }
            containerChunkInfoVerbose.setContainerPath(containerData.getContainerPath());
            containerChunkInfoVerbose.setPipeline(keyLocation.getPipeline());
            containerChunkInfoVerbose.setChunkInfos(chunkDetailsList);
            containerChunkInfo.setFiles(chunkPaths);
            containerChunkInfo.setPipelineID(keyLocation.getPipeline().getId().getId());
            Gson gson = new GsonBuilder().create();
            if (isVerbose()) {
                element = gson.toJsonTree(containerChunkInfoVerbose);
            } else {
                element = gson.toJsonTree(containerChunkInfo);
            }
            jsonObj.addProperty("Datanode-HostName", entry.getKey().getHostName());
            jsonObj.addProperty("Datanode-IP", entry.getKey().getIpAddress());
            jsonObj.addProperty("Container-ID", containerId);
            jsonObj.addProperty("Block-ID", keyLocation.getLocalID());
            jsonObj.add("Locations", element);
            responseFromAllNodes.add(jsonObj);
            xceiverClientManager.releaseClientForReadData(xceiverClient, false);
        }
        responseArrayList.add(responseFromAllNodes);
    }
    result.add("KeyLocations", responseArrayList);
    Gson gson2 = new GsonBuilder().setPrettyPrinting().create();
    String prettyJson = gson2.toJson(result);
    System.out.println(prettyJson);
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) ContainerLayoutVersion(org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion) ArrayList(java.util.ArrayList) JsonObject(com.google.gson.JsonObject) Gson(com.google.gson.Gson) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) HashSet(java.util.HashSet) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) GsonBuilder(com.google.gson.GsonBuilder) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) ContainerOperationClient(org.apache.hadoop.hdds.scm.cli.ContainerOperationClient) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) JsonArray(com.google.gson.JsonArray) JsonElement(com.google.gson.JsonElement) HashMap(java.util.HashMap) Map(java.util.Map) File(java.io.File)

Aggregations

DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)358 Test (org.junit.Test)203 MockDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails)108 ArrayList (java.util.ArrayList)84 MockDatanodeDetails.randomDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails)77 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)65 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)62 IOException (java.io.IOException)48 UUID (java.util.UUID)43 ContainerInfo (org.apache.hadoop.hdds.scm.container.ContainerInfo)43 MockDatanodeDetails.createDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.createDatanodeDetails)38 HddsProtos (org.apache.hadoop.hdds.protocol.proto.HddsProtos)32 Map (java.util.Map)27 HashMap (java.util.HashMap)26 ContainerID (org.apache.hadoop.hdds.scm.container.ContainerID)25 List (java.util.List)24 File (java.io.File)23 NodeManager (org.apache.hadoop.hdds.scm.node.NodeManager)21 NodeNotFoundException (org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException)21 NodeStatus (org.apache.hadoop.hdds.scm.node.NodeStatus)20