use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.
the class TestXceiverClientGrpc method testRandomFirstNodeIsCommandTarget.
@Test(timeout = 5000)
public void testRandomFirstNodeIsCommandTarget() throws IOException {
final ArrayList<DatanodeDetails> allDNs = new ArrayList<>(dns);
// is not happening.
while (allDNs.size() > 0) {
XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf) {
@Override
public XceiverClientReply sendCommandAsync(ContainerProtos.ContainerCommandRequestProto request, DatanodeDetails dn) {
allDNs.remove(dn);
return buildValidResponse();
}
};
invokeXceiverClientGetBlock(client);
}
}
use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.
the class TestXceiverClientGrpc method testFirstNodeIsCorrectWithTopologyForCommandTarget.
@Test
public void testFirstNodeIsCorrectWithTopologyForCommandTarget() throws IOException {
final Set<DatanodeDetails> seenDNs = new HashSet<>();
conf.setBoolean(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true);
// only a single DN is ever seen after 100 calls.
for (int i = 0; i < 100; i++) {
XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf) {
@Override
public XceiverClientReply sendCommandAsync(ContainerProtos.ContainerCommandRequestProto request, DatanodeDetails dn) {
seenDNs.add(dn);
return buildValidResponse();
}
};
invokeXceiverClientGetBlock(client);
}
Assert.assertEquals(1, seenDNs.size());
}
use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.
the class TestDecommissionAndMaintenance method testContainerIsReplicatedWhenAllNodesGotoMaintenance.
@Test
public // return, the excess replicas should be removed.
void testContainerIsReplicatedWhenAllNodesGotoMaintenance() throws Exception {
// Generate some data on the empty cluster to create some containers
generateData(20, "key", ReplicationFactor.THREE, ReplicationType.RATIS);
// Locate any container and find its open pipeline
final ContainerInfo container = waitForAndReturnContainer();
Set<ContainerReplica> replicas = getContainerReplicas(container);
List<DatanodeDetails> forMaintenance = new ArrayList<>();
replicas.forEach(r -> forMaintenance.add(r.getDatanodeDetails()));
scmClient.startMaintenanceNodes(forMaintenance.stream().map(d -> getDNHostAndPort(d)).collect(Collectors.toList()), 0);
// Ensure all 3 DNs go to maintenance
for (DatanodeDetails dn : forMaintenance) {
waitForDnToReachPersistedOpState(dn, IN_MAINTENANCE);
}
// There should now be 5-6 replicas of the container we are tracking
Set<ContainerReplica> newReplicas = cm.getContainerReplicas(container.containerID());
assertTrue(newReplicas.size() >= 5);
scmClient.recommissionNodes(forMaintenance.stream().map(d -> getDNHostAndPort(d)).collect(Collectors.toList()));
// Ensure all 3 DNs go to maintenance
for (DatanodeDetails dn : forMaintenance) {
waitForDnToReachOpState(dn, IN_SERVICE);
}
waitForContainerReplicas(container, 3);
}
use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.
the class TestDecommissionAndMaintenance method testNodeWithOpenPipelineCanBeDecommissionedAndRecommissioned.
@Test
public // be recommissioned.
void testNodeWithOpenPipelineCanBeDecommissionedAndRecommissioned() throws Exception {
// Generate some data on the empty cluster to create some containers
generateData(20, "key", ReplicationFactor.THREE, ReplicationType.RATIS);
// Locate any container and find its open pipeline
final ContainerInfo container = waitForAndReturnContainer();
Pipeline pipeline = pm.getPipeline(container.getPipelineID());
assertEquals(Pipeline.PipelineState.OPEN, pipeline.getPipelineState());
Set<ContainerReplica> replicas = getContainerReplicas(container);
final DatanodeDetails toDecommission = getOneDNHostingReplica(replicas);
scmClient.decommissionNodes(Arrays.asList(getDNHostAndPort(toDecommission)));
waitForDnToReachOpState(toDecommission, DECOMMISSIONED);
// Ensure one node transitioned to DECOMMISSIONING
List<DatanodeDetails> decomNodes = nm.getNodes(DECOMMISSIONED, HEALTHY);
assertEquals(1, decomNodes.size());
// Should now be 4 replicas online as the DN is still alive but
// in the DECOMMISSIONED state.
waitForContainerReplicas(container, 4);
// Stop the decommissioned DN
int dnIndex = cluster.getHddsDatanodeIndex(toDecommission);
cluster.shutdownHddsDatanode(toDecommission);
waitForDnToReachHealthState(toDecommission, DEAD);
// Now the decommissioned node is dead, we should have
// 3 replicas for the tracked container.
waitForContainerReplicas(container, 3);
cluster.restartHddsDatanode(dnIndex, true);
scmClient.recommissionNodes(Arrays.asList(getDNHostAndPort(toDecommission)));
waitForDnToReachOpState(toDecommission, IN_SERVICE);
waitForDnToReachPersistedOpState(toDecommission, IN_SERVICE);
}
use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.
the class ChunkKeyHandler method execute.
@Override
protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException {
containerOperationClient = new ContainerOperationClient(createOzoneConfiguration());
xceiverClientManager = containerOperationClient.getXceiverClientManager();
ozoneManagerClient = client.getObjectStore().getClientProxy().getOzoneManagerClient();
address.ensureKeyAddress();
JsonElement element;
JsonObject result = new JsonObject();
String volumeName = address.getVolumeName();
String bucketName = address.getBucketName();
String keyName = address.getKeyName();
List<ContainerProtos.ChunkInfo> tempchunks = null;
List<ChunkDetails> chunkDetailsList = new ArrayList<ChunkDetails>();
HashSet<String> chunkPaths = new HashSet<>();
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true).build();
OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
// querying the keyLocations.The OM is queried to get containerID and
// localID pertaining to a given key
List<OmKeyLocationInfo> locationInfos = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
// for zero-sized key
if (locationInfos.isEmpty()) {
System.out.println("No Key Locations Found");
return;
}
ContainerLayoutVersion containerLayoutVersion = ContainerLayoutVersion.getConfiguredVersion(getConf());
JsonArray responseArrayList = new JsonArray();
for (OmKeyLocationInfo keyLocation : locationInfos) {
ContainerChunkInfo containerChunkInfoVerbose = new ContainerChunkInfo();
ContainerChunkInfo containerChunkInfo = new ContainerChunkInfo();
long containerId = keyLocation.getContainerID();
chunkPaths.clear();
Pipeline pipeline = keyLocation.getPipeline();
if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) {
pipeline = Pipeline.newBuilder(pipeline).setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)).build();
}
xceiverClient = xceiverClientManager.acquireClientForReadData(pipeline);
// Datanode is queried to get chunk information.Thus querying the
// OM,SCM and datanode helps us get chunk location information
ContainerProtos.DatanodeBlockID datanodeBlockID = keyLocation.getBlockID().getDatanodeBlockIDProtobuf();
// doing a getBlock on all nodes
HashMap<DatanodeDetails, ContainerProtos.GetBlockResponseProto> responses = null;
try {
responses = ContainerProtocolCalls.getBlockFromAllNodes(xceiverClient, datanodeBlockID, keyLocation.getToken());
} catch (InterruptedException e) {
LOG.error("Execution interrupted due to " + e);
Thread.currentThread().interrupt();
}
JsonArray responseFromAllNodes = new JsonArray();
for (Map.Entry<DatanodeDetails, ContainerProtos.GetBlockResponseProto> entry : responses.entrySet()) {
JsonObject jsonObj = new JsonObject();
if (entry.getValue() == null) {
LOG.error("Cant execute getBlock on this node");
continue;
}
tempchunks = entry.getValue().getBlockData().getChunksList();
ContainerProtos.ContainerDataProto containerData = containerOperationClient.readContainer(keyLocation.getContainerID(), keyLocation.getPipeline());
for (ContainerProtos.ChunkInfo chunkInfo : tempchunks) {
String fileName = containerLayoutVersion.getChunkFile(new File(getChunkLocationPath(containerData.getContainerPath())), keyLocation.getBlockID(), ChunkInfo.getFromProtoBuf(chunkInfo)).toString();
chunkPaths.add(fileName);
ChunkDetails chunkDetails = new ChunkDetails();
chunkDetails.setChunkName(fileName);
chunkDetails.setChunkOffset(chunkInfo.getOffset());
chunkDetailsList.add(chunkDetails);
}
containerChunkInfoVerbose.setContainerPath(containerData.getContainerPath());
containerChunkInfoVerbose.setPipeline(keyLocation.getPipeline());
containerChunkInfoVerbose.setChunkInfos(chunkDetailsList);
containerChunkInfo.setFiles(chunkPaths);
containerChunkInfo.setPipelineID(keyLocation.getPipeline().getId().getId());
Gson gson = new GsonBuilder().create();
if (isVerbose()) {
element = gson.toJsonTree(containerChunkInfoVerbose);
} else {
element = gson.toJsonTree(containerChunkInfo);
}
jsonObj.addProperty("Datanode-HostName", entry.getKey().getHostName());
jsonObj.addProperty("Datanode-IP", entry.getKey().getIpAddress());
jsonObj.addProperty("Container-ID", containerId);
jsonObj.addProperty("Block-ID", keyLocation.getLocalID());
jsonObj.add("Locations", element);
responseFromAllNodes.add(jsonObj);
xceiverClientManager.releaseClientForReadData(xceiverClient, false);
}
responseArrayList.add(responseFromAllNodes);
}
result.add("KeyLocations", responseArrayList);
Gson gson2 = new GsonBuilder().setPrettyPrinting().create();
String prettyJson = gson2.toJson(result);
System.out.println(prettyJson);
}
Aggregations