Search in sources :

Example 16 with NodeControllerInfo

use of org.apache.hyracks.api.client.NodeControllerInfo in project asterixdb by apache.

the class IndexingScheduler method loadIPAddressToNCMap.

/**
     * Load the IP-address-to-NC map from the NCNameToNCInfoMap
     *
     * @param ncNameToNcInfos
     * @throws HyracksException
     */
private void loadIPAddressToNCMap(Map<String, NodeControllerInfo> ncNameToNcInfos) throws HyracksException {
    try {
        NCs = new String[ncNameToNcInfos.size()];
        ipToNcMapping.clear();
        ncNameToIndex.clear();
        int i = 0;
        /**
             * build the IP address to NC map
             */
        for (Map.Entry<String, NodeControllerInfo> entry : ncNameToNcInfos.entrySet()) {
            String ipAddr = InetAddress.getByAddress(entry.getValue().getNetworkAddress().lookupIpAddress()).getHostAddress();
            List<String> matchedNCs = ipToNcMapping.get(ipAddr);
            if (matchedNCs == null) {
                matchedNCs = new ArrayList<String>();
                ipToNcMapping.put(ipAddr, matchedNCs);
            }
            matchedNCs.add(entry.getKey());
            NCs[i] = entry.getKey();
            i++;
        }
        /**
             * set up the NC name to index mapping
             */
        for (i = 0; i < NCs.length; i++) {
            ncNameToIndex.put(NCs[i], i);
        }
    } catch (Exception e) {
        throw new HyracksException(e);
    }
}
Also used : NodeControllerInfo(org.apache.hyracks.api.client.NodeControllerInfo) HyracksException(org.apache.hyracks.api.exceptions.HyracksException) HashMap(java.util.HashMap) Map(java.util.Map) HyracksDataException(org.apache.hyracks.api.exceptions.HyracksDataException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) HyracksException(org.apache.hyracks.api.exceptions.HyracksException)

Example 17 with NodeControllerInfo

use of org.apache.hyracks.api.client.NodeControllerInfo in project asterixdb by apache.

the class Scheduler method loadIPAddressToNCMap.

/**
     * Load the IP-address-to-NC map from the NCNameToNCInfoMap
     *
     * @param ncNameToNcInfos
     * @throws HyracksException
     */
private void loadIPAddressToNCMap(Map<String, NodeControllerInfo> ncNameToNcInfos) throws HyracksException {
    try {
        NCs = new String[ncNameToNcInfos.size()];
        ipToNcMapping.clear();
        ncNameToIndex.clear();
        int i = 0;
        /**
             * build the IP address to NC map
             */
        for (Map.Entry<String, NodeControllerInfo> entry : ncNameToNcInfos.entrySet()) {
            String ipAddr = InetAddress.getByAddress(entry.getValue().getNetworkAddress().lookupIpAddress()).getHostAddress();
            List<String> matchedNCs = ipToNcMapping.get(ipAddr);
            if (matchedNCs == null) {
                matchedNCs = new ArrayList<String>();
                ipToNcMapping.put(ipAddr, matchedNCs);
            }
            matchedNCs.add(entry.getKey());
            NCs[i] = entry.getKey();
            i++;
        }
        /**
             * set up the NC name to index mapping
             */
        for (i = 0; i < NCs.length; i++) {
            ncNameToIndex.put(NCs[i], i);
        }
    } catch (Exception e) {
        throw new HyracksException(e);
    }
}
Also used : NodeControllerInfo(org.apache.hyracks.api.client.NodeControllerInfo) HyracksException(org.apache.hyracks.api.exceptions.HyracksException) HashMap(java.util.HashMap) Map(java.util.Map) HyracksDataException(org.apache.hyracks.api.exceptions.HyracksDataException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) HyracksException(org.apache.hyracks.api.exceptions.HyracksException)

Example 18 with NodeControllerInfo

use of org.apache.hyracks.api.client.NodeControllerInfo in project asterixdb by apache.

the class SchedulerTest method testSchedulerSmallerHDFS.

/**
     * Test the case where the HDFS cluster is a larger than the Hyracks cluster
     *
     * @throws Exception
     */
public void testSchedulerSmallerHDFS() throws Exception {
    Map<String, NodeControllerInfo> ncNameToNcInfos = TestUtils.generateNodeControllerInfo(6, "nc", "10.0.0.", 5099, 5098, 5097);
    List<InputSplit> fileSplits = new ArrayList<>();
    fileSplits.add(new FileSplit(new Path("part-1"), 0, 0, new String[] { "10.0.0.1", "10.0.0.2", "10.0.0.3" }));
    fileSplits.add(new FileSplit(new Path("part-2"), 0, 0, new String[] { "10.0.0.3", "10.0.0.4", "10.0.0.5" }));
    fileSplits.add(new FileSplit(new Path("part-3"), 0, 0, new String[] { "10.0.0.4", "10.0.0.5", "10.0.0.3" }));
    fileSplits.add(new FileSplit(new Path("part-4"), 0, 0, new String[] { "10.0.0.2", "10.0.0.1", "10.0.0.3" }));
    fileSplits.add(new FileSplit(new Path("part-5"), 0, 0, new String[] { "10.0.0.3", "10.0.0.4", "10.0.0.5" }));
    fileSplits.add(new FileSplit(new Path("part-6"), 0, 0, new String[] { "10.0.0.2", "10.0.0.3", "10.0.0.5" }));
    fileSplits.add(new FileSplit(new Path("part-7"), 0, 0, new String[] { "10.0.0.1", "10.0.0.2", "10.0.0.3" }));
    fileSplits.add(new FileSplit(new Path("part-8"), 0, 0, new String[] { "10.0.0.3", "10.0.0.4", "10.0.0.5" }));
    fileSplits.add(new FileSplit(new Path("part-9"), 0, 0, new String[] { "10.0.0.4", "10.0.0.5", "10.0.0.1" }));
    fileSplits.add(new FileSplit(new Path("part-10"), 0, 0, new String[] { "10.0.0.2", "10.0.0.1", "10.0.0.2" }));
    fileSplits.add(new FileSplit(new Path("part-11"), 0, 0, new String[] { "10.0.0.3", "10.0.0.4", "10.0.0.5" }));
    fileSplits.add(new FileSplit(new Path("part-12"), 0, 0, new String[] { "10.0.0.2", "10.0.0.3", "10.0.0.5" }));
    Scheduler scheduler = new Scheduler(ncNameToNcInfos);
    String[] locationConstraints = scheduler.getLocationConstraints(fileSplits);
    String[] expectedResults = new String[] { "nc1", "nc4", "nc4", "nc1", "nc3", "nc2", "nc2", "nc3", "nc5", "nc6", "nc5", "nc6" };
    for (int i = 0; i < locationConstraints.length; i++) {
        Assert.assertEquals(locationConstraints[i], expectedResults[i]);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NodeControllerInfo(org.apache.hyracks.api.client.NodeControllerInfo) ArrayList(java.util.ArrayList) FileSplit(org.apache.hadoop.mapreduce.lib.input.FileSplit) InputSplit(org.apache.hadoop.mapreduce.InputSplit)

Example 19 with NodeControllerInfo

use of org.apache.hyracks.api.client.NodeControllerInfo in project asterixdb by apache.

the class ClusterControllerIPCI method deliverIncomingMessage.

@Override
public void deliverIncomingMessage(final IIPCHandle handle, long mid, long rmid, Object payload, Exception exception) {
    CCNCFunctions.Function fn = (Function) payload;
    switch(fn.getFunctionId()) {
        case REGISTER_NODE:
            CCNCFunctions.RegisterNodeFunction rnf = (CCNCFunctions.RegisterNodeFunction) fn;
            ccs.getWorkQueue().schedule(new RegisterNodeWork(ccs, rnf.getNodeRegistration()));
            break;
        case UNREGISTER_NODE:
            CCNCFunctions.UnregisterNodeFunction unf = (CCNCFunctions.UnregisterNodeFunction) fn;
            ccs.getWorkQueue().schedule(new UnregisterNodeWork(ccs.getNodeManager(), unf.getNodeId()));
            break;
        case NODE_HEARTBEAT:
            CCNCFunctions.NodeHeartbeatFunction nhf = (CCNCFunctions.NodeHeartbeatFunction) fn;
            ccs.getWorkQueue().schedule(new NodeHeartbeatWork(ccs, nhf.getNodeId(), nhf.getHeartbeatData()));
            break;
        case NOTIFY_JOBLET_CLEANUP:
            CCNCFunctions.NotifyJobletCleanupFunction njcf = (CCNCFunctions.NotifyJobletCleanupFunction) fn;
            ccs.getWorkQueue().schedule(new JobletCleanupNotificationWork(ccs, njcf.getJobId(), njcf.getNodeId()));
            break;
        case NOTIFY_DEPLOY_BINARY:
            CCNCFunctions.NotifyDeployBinaryFunction ndbf = (CCNCFunctions.NotifyDeployBinaryFunction) fn;
            ccs.getWorkQueue().schedule(new NotifyDeployBinaryWork(ccs, ndbf.getDeploymentId(), ndbf.getNodeId(), ndbf.getDeploymentStatus()));
            break;
        case REPORT_PROFILE:
            CCNCFunctions.ReportProfileFunction rpf = (CCNCFunctions.ReportProfileFunction) fn;
            ccs.getWorkQueue().schedule(new ReportProfilesWork(ccs.getJobManager(), rpf.getProfiles()));
            break;
        case NOTIFY_TASK_COMPLETE:
            CCNCFunctions.NotifyTaskCompleteFunction ntcf = (CCNCFunctions.NotifyTaskCompleteFunction) fn;
            ccs.getWorkQueue().schedule(new TaskCompleteWork(ccs, ntcf.getJobId(), ntcf.getTaskId(), ntcf.getNodeId(), ntcf.getStatistics()));
            break;
        case NOTIFY_TASK_FAILURE:
            CCNCFunctions.NotifyTaskFailureFunction ntff = (CCNCFunctions.NotifyTaskFailureFunction) fn;
            ccs.getWorkQueue().schedule(new TaskFailureWork(ccs, ntff.getJobId(), ntff.getTaskId(), ntff.getNodeId(), ntff.getExceptions()));
            break;
        case DISTRIBUTED_JOB_FAILURE:
            CCNCFunctions.ReportDistributedJobFailureFunction rdjf = (CCNCFunctions.ReportDistributedJobFailureFunction) fn;
            ccs.getWorkQueue().schedule(new DistributedJobFailureWork(rdjf.getJobId(), rdjf.getNodeId()));
            break;
        case REGISTER_PARTITION_PROVIDER:
            CCNCFunctions.RegisterPartitionProviderFunction rppf = (CCNCFunctions.RegisterPartitionProviderFunction) fn;
            ccs.getWorkQueue().schedule(new RegisterPartitionAvailibilityWork(ccs, rppf.getPartitionDescriptor()));
            break;
        case REGISTER_PARTITION_REQUEST:
            CCNCFunctions.RegisterPartitionRequestFunction rprf = (CCNCFunctions.RegisterPartitionRequestFunction) fn;
            ccs.getWorkQueue().schedule(new RegisterPartitionRequestWork(ccs, rprf.getPartitionRequest()));
            break;
        case REGISTER_RESULT_PARTITION_LOCATION:
            CCNCFunctions.RegisterResultPartitionLocationFunction rrplf = (CCNCFunctions.RegisterResultPartitionLocationFunction) fn;
            ccs.getWorkQueue().schedule(new RegisterResultPartitionLocationWork(ccs, rrplf.getJobId(), rrplf.getResultSetId(), rrplf.getOrderedResult(), rrplf.getEmptyResult(), rrplf.getPartition(), rrplf.getNPartitions(), rrplf.getNetworkAddress()));
            break;
        case REPORT_RESULT_PARTITION_WRITE_COMPLETION:
            CCNCFunctions.ReportResultPartitionWriteCompletionFunction rrpwc = (CCNCFunctions.ReportResultPartitionWriteCompletionFunction) fn;
            ccs.getWorkQueue().schedule(new ReportResultPartitionWriteCompletionWork(ccs, rrpwc.getJobId(), rrpwc.getResultSetId(), rrpwc.getPartition()));
            break;
        case REPORT_RESULT_PARTITION_FAILURE:
            CCNCFunctions.ReportResultPartitionFailureFunction rrpf = (CCNCFunctions.ReportResultPartitionFailureFunction) fn;
            ccs.getWorkQueue().schedule(new ReportResultPartitionFailureWork(ccs, rrpf.getJobId(), rrpf.getResultSetId(), rrpf.getPartition()));
            break;
        case SEND_APPLICATION_MESSAGE:
            CCNCFunctions.SendApplicationMessageFunction rsf = (CCNCFunctions.SendApplicationMessageFunction) fn;
            ccs.getWorkQueue().schedule(new ApplicationMessageWork(ccs, rsf.getMessage(), rsf.getDeploymentId(), rsf.getNodeId()));
            break;
        case GET_NODE_CONTROLLERS_INFO:
            ccs.getWorkQueue().schedule(new GetNodeControllersInfoWork(ccs.getNodeManager(), new IResultCallback<Map<String, NodeControllerInfo>>() {

                @Override
                public void setValue(Map<String, NodeControllerInfo> result) {
                    new IPCResponder<CCNCFunctions.GetNodeControllersInfoResponseFunction>(handle, -1).setValue(new CCNCFunctions.GetNodeControllersInfoResponseFunction(result));
                }

                @Override
                public void setException(Exception e) {
                }
            }));
            break;
        case STATE_DUMP_RESPONSE:
            CCNCFunctions.StateDumpResponseFunction dsrf = (StateDumpResponseFunction) fn;
            ccs.getWorkQueue().schedule(new NotifyStateDumpResponse(ccs, dsrf.getNodeId(), dsrf.getStateDumpId(), dsrf.getState()));
            break;
        case SHUTDOWN_RESPONSE:
            CCNCFunctions.ShutdownResponseFunction sdrf = (ShutdownResponseFunction) fn;
            ccs.getWorkQueue().schedule(new NotifyShutdownWork(ccs, sdrf.getNodeId()));
            break;
        case THREAD_DUMP_RESPONSE:
            CCNCFunctions.ThreadDumpResponseFunction tdrf = (CCNCFunctions.ThreadDumpResponseFunction) fn;
            ccs.getWorkQueue().schedule(new NotifyThreadDumpResponse(ccs, tdrf.getRequestId(), tdrf.getThreadDumpJSON()));
            break;
        default:
            LOGGER.warning("Unknown function: " + fn.getFunctionId());
    }
}
Also used : NotifyStateDumpResponse(org.apache.hyracks.control.cc.work.NotifyStateDumpResponse) RegisterPartitionAvailibilityWork(org.apache.hyracks.control.cc.work.RegisterPartitionAvailibilityWork) NodeControllerInfo(org.apache.hyracks.api.client.NodeControllerInfo) Function(org.apache.hyracks.control.common.ipc.CCNCFunctions.Function) IResultCallback(org.apache.hyracks.control.common.work.IResultCallback) NotifyThreadDumpResponse(org.apache.hyracks.control.cc.work.NotifyThreadDumpResponse) ApplicationMessageWork(org.apache.hyracks.control.cc.work.ApplicationMessageWork) ReportProfilesWork(org.apache.hyracks.control.cc.work.ReportProfilesWork) ShutdownResponseFunction(org.apache.hyracks.control.common.ipc.CCNCFunctions.ShutdownResponseFunction) ReportResultPartitionWriteCompletionWork(org.apache.hyracks.control.cc.work.ReportResultPartitionWriteCompletionWork) NotifyShutdownWork(org.apache.hyracks.control.cc.work.NotifyShutdownWork) NodeHeartbeatWork(org.apache.hyracks.control.cc.work.NodeHeartbeatWork) UnregisterNodeWork(org.apache.hyracks.control.cc.work.UnregisterNodeWork) GetNodeControllersInfoWork(org.apache.hyracks.control.cc.work.GetNodeControllersInfoWork) Map(java.util.Map) ReportResultPartitionFailureWork(org.apache.hyracks.control.cc.work.ReportResultPartitionFailureWork) CCNCFunctions(org.apache.hyracks.control.common.ipc.CCNCFunctions) NotifyDeployBinaryWork(org.apache.hyracks.control.cc.work.NotifyDeployBinaryWork) StateDumpResponseFunction(org.apache.hyracks.control.common.ipc.CCNCFunctions.StateDumpResponseFunction) RegisterResultPartitionLocationWork(org.apache.hyracks.control.cc.work.RegisterResultPartitionLocationWork) ShutdownResponseFunction(org.apache.hyracks.control.common.ipc.CCNCFunctions.ShutdownResponseFunction) Function(org.apache.hyracks.control.common.ipc.CCNCFunctions.Function) StateDumpResponseFunction(org.apache.hyracks.control.common.ipc.CCNCFunctions.StateDumpResponseFunction) TaskFailureWork(org.apache.hyracks.control.cc.work.TaskFailureWork) ShutdownResponseFunction(org.apache.hyracks.control.common.ipc.CCNCFunctions.ShutdownResponseFunction) RegisterNodeWork(org.apache.hyracks.control.cc.work.RegisterNodeWork) DistributedJobFailureWork(org.apache.hyracks.control.cc.work.DistributedJobFailureWork) JobletCleanupNotificationWork(org.apache.hyracks.control.cc.work.JobletCleanupNotificationWork) TaskCompleteWork(org.apache.hyracks.control.cc.work.TaskCompleteWork) StateDumpResponseFunction(org.apache.hyracks.control.common.ipc.CCNCFunctions.StateDumpResponseFunction) RegisterPartitionRequestWork(org.apache.hyracks.control.cc.work.RegisterPartitionRequestWork)

Example 20 with NodeControllerInfo

use of org.apache.hyracks.api.client.NodeControllerInfo in project asterixdb by apache.

the class NodeManager method getNodeControllerInfoMap.

@Override
public Map<String, NodeControllerInfo> getNodeControllerInfoMap() {
    Map<String, NodeControllerInfo> result = new LinkedHashMap<>();
    for (Map.Entry<String, NodeControllerState> e : nodeRegistry.entrySet()) {
        NodeControllerState ncState = e.getValue();
        result.put(e.getKey(), new NodeControllerInfo(e.getKey(), NodeStatus.ALIVE, ncState.getDataPort(), ncState.getDatasetPort(), ncState.getMessagingPort(), ncState.getCapacity().getCores()));
    }
    return result;
}
Also used : NodeControllerInfo(org.apache.hyracks.api.client.NodeControllerInfo) NodeControllerState(org.apache.hyracks.control.cc.NodeControllerState) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap)

Aggregations

NodeControllerInfo (org.apache.hyracks.api.client.NodeControllerInfo)21 HashMap (java.util.HashMap)8 Path (org.apache.hadoop.fs.Path)8 Map (java.util.Map)6 ArrayList (java.util.ArrayList)5 InputSplit (org.apache.hadoop.mapred.InputSplit)5 NetworkAddress (org.apache.hyracks.api.comm.NetworkAddress)5 ClusterTopology (org.apache.hyracks.api.topology.ClusterTopology)5 FileSplit (org.apache.hadoop.mapred.FileSplit)4 InputSplit (org.apache.hadoop.mapreduce.InputSplit)4 FileSplit (org.apache.hadoop.mapreduce.lib.input.FileSplit)4 AlgebricksAbsolutePartitionConstraint (org.apache.hyracks.algebricks.common.constraints.AlgebricksAbsolutePartitionConstraint)4 Test (org.junit.Test)4 HyracksException (org.apache.hyracks.api.exceptions.HyracksException)3 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)2 ArrayNode (com.fasterxml.jackson.databind.node.ArrayNode)2 ObjectNode (com.fasterxml.jackson.databind.node.ObjectNode)2 IOException (java.io.IOException)2 UnknownHostException (java.net.UnknownHostException)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2