Search in sources :

Example 1 with DatanodeCommandProto

use of org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto in project hadoop by apache.

the class DatanodeProtocolClientSideTranslatorPB method sendHeartbeat.

@Override
public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration, StorageReport[] reports, long cacheCapacity, long cacheUsed, int xmitsInProgress, int xceiverCount, int failedVolumes, VolumeFailureSummary volumeFailureSummary, boolean requestFullBlockReportLease, @Nonnull SlowPeerReports slowPeers) throws IOException {
    HeartbeatRequestProto.Builder builder = HeartbeatRequestProto.newBuilder().setRegistration(PBHelper.convert(registration)).setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount).setFailedVolumes(failedVolumes).setRequestFullBlockReportLease(requestFullBlockReportLease);
    builder.addAllReports(PBHelperClient.convertStorageReports(reports));
    if (cacheCapacity != 0) {
        builder.setCacheCapacity(cacheCapacity);
    }
    if (cacheUsed != 0) {
        builder.setCacheUsed(cacheUsed);
    }
    if (volumeFailureSummary != null) {
        builder.setVolumeFailureSummary(PBHelper.convertVolumeFailureSummary(volumeFailureSummary));
    }
    if (slowPeers.haveSlowPeers()) {
        builder.addAllSlowPeers(PBHelper.convertSlowPeerInfo(slowPeers));
    }
    HeartbeatResponseProto resp;
    try {
        resp = rpcProxy.sendHeartbeat(NULL_CONTROLLER, builder.build());
    } catch (ServiceException se) {
        throw ProtobufHelper.getRemoteException(se);
    }
    DatanodeCommand[] cmds = new DatanodeCommand[resp.getCmdsList().size()];
    int index = 0;
    for (DatanodeCommandProto p : resp.getCmdsList()) {
        cmds[index] = PBHelper.convert(p);
        index++;
    }
    RollingUpgradeStatus rollingUpdateStatus = null;
    // Use v2 semantics if available.
    if (resp.hasRollingUpgradeStatusV2()) {
        rollingUpdateStatus = PBHelperClient.convert(resp.getRollingUpgradeStatusV2());
    } else if (resp.hasRollingUpgradeStatus()) {
        rollingUpdateStatus = PBHelperClient.convert(resp.getRollingUpgradeStatus());
    }
    return new HeartbeatResponse(cmds, PBHelper.convert(resp.getHaStatus()), rollingUpdateStatus, resp.getFullBlockReportLeaseId());
}
Also used : DatanodeCommandProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto) HeartbeatResponse(org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse) DatanodeCommand(org.apache.hadoop.hdfs.server.protocol.DatanodeCommand) ServiceException(com.google.protobuf.ServiceException) HeartbeatRequestProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto) RollingUpgradeStatus(org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus) HeartbeatResponseProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto)

Aggregations

ServiceException (com.google.protobuf.ServiceException)1 RollingUpgradeStatus (org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus)1 DatanodeCommandProto (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto)1 HeartbeatRequestProto (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto)1 HeartbeatResponseProto (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto)1 DatanodeCommand (org.apache.hadoop.hdfs.server.protocol.DatanodeCommand)1 HeartbeatResponse (org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse)1