Search in sources :

Example 11 with NodeControllerInfo

use of org.apache.hyracks.api.client.NodeControllerInfo in project asterixdb by apache.

the class SchedulerTest method testSchedulerLargerHDFS.

/**
     * Test the case where the HDFS cluster is a larger than the Hyracks cluster
     *
     * @throws Exception
     */
public void testSchedulerLargerHDFS() throws Exception {
    Map<String, NodeControllerInfo> ncNameToNcInfos = TestUtils.generateNodeControllerInfo(6, "nc", "10.0.0.", 5099, 5098, 5097);
    List<InputSplit> fileSplits = new ArrayList<>();
    fileSplits.add(new FileSplit(new Path("part-1"), 0, 0, new String[] { "10.0.0.1", "10.0.0.2", "10.0.0.3" }));
    fileSplits.add(new FileSplit(new Path("part-2"), 0, 0, new String[] { "10.0.0.3", "10.0.0.4", "10.0.0.5" }));
    fileSplits.add(new FileSplit(new Path("part-3"), 0, 0, new String[] { "10.0.0.4", "10.0.0.5", "10.0.0.6" }));
    fileSplits.add(new FileSplit(new Path("part-4"), 0, 0, new String[] { "10.0.0.2", "10.0.0.1", "10.0.0.6" }));
    fileSplits.add(new FileSplit(new Path("part-5"), 0, 0, new String[] { "10.0.0.3", "10.0.0.4", "10.0.0.5" }));
    fileSplits.add(new FileSplit(new Path("part-6"), 0, 0, new String[] { "10.0.0.2", "10.0.0.3", "10.0.0.5" }));
    fileSplits.add(new FileSplit(new Path("part-7"), 0, 0, new String[] { "10.0.0.1", "10.0.0.2", "10.0.0.3" }));
    fileSplits.add(new FileSplit(new Path("part-8"), 0, 0, new String[] { "10.0.0.3", "10.0.0.4", "10.0.0.5" }));
    fileSplits.add(new FileSplit(new Path("part-9"), 0, 0, new String[] { "10.0.0.4", "10.0.0.5", "10.0.0.6" }));
    fileSplits.add(new FileSplit(new Path("part-10"), 0, 0, new String[] { "10.0.0.2", "10.0.0.1", "10.0.0.6" }));
    fileSplits.add(new FileSplit(new Path("part-11"), 0, 0, new String[] { "10.0.0.3", "10.0.0.4", "10.0.0.7" }));
    fileSplits.add(new FileSplit(new Path("part-12"), 0, 0, new String[] { "10.0.0.2", "10.0.0.3", "10.0.0.5" }));
    Scheduler scheduler = new Scheduler(ncNameToNcInfos);
    String[] locationConstraints = scheduler.getLocationConstraints(fileSplits);
    String[] expectedResults = new String[] { "nc1", "nc4", "nc6", "nc1", "nc4", "nc2", "nc2", "nc3", "nc6", "nc5", "nc3", "nc5" };
    for (int i = 0; i < locationConstraints.length; i++) {
        Assert.assertEquals(locationConstraints[i], expectedResults[i]);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NodeControllerInfo(org.apache.hyracks.api.client.NodeControllerInfo) ArrayList(java.util.ArrayList) FileSplit(org.apache.hadoop.mapreduce.lib.input.FileSplit) InputSplit(org.apache.hadoop.mapreduce.InputSplit)

Example 12 with NodeControllerInfo

use of org.apache.hyracks.api.client.NodeControllerInfo in project asterixdb by apache.

the class TestUtils method generateNodeControllerInfo.

public static Map<String, NodeControllerInfo> generateNodeControllerInfo(int numberOfNodes, String ncNamePrefix, String addressPrefix, int netPort, int dataPort, int messagingPort) {
    Map<String, NodeControllerInfo> ncNameToNcInfos = new HashMap<>();
    for (int i = 1; i <= numberOfNodes; i++) {
        String ncId = ncNamePrefix + i;
        String ncAddress = addressPrefix + i;
        ncNameToNcInfos.put(ncId, new NodeControllerInfo(ncId, NodeStatus.ALIVE, new NetworkAddress(ncAddress, netPort), new NetworkAddress(ncAddress, dataPort), new NetworkAddress(ncAddress, messagingPort), 2));
    }
    return ncNameToNcInfos;
}
Also used : NetworkAddress(org.apache.hyracks.api.comm.NetworkAddress) HashMap(java.util.HashMap) NodeControllerInfo(org.apache.hyracks.api.client.NodeControllerInfo)

Example 13 with NodeControllerInfo

use of org.apache.hyracks.api.client.NodeControllerInfo in project asterixdb by apache.

the class APIFramework method chooseLocations.

// Chooses the location constraints, i.e., whether to use storage parallelism or use a user-sepcified number
// of cores.
private static AlgebricksAbsolutePartitionConstraint chooseLocations(IClusterInfoCollector clusterInfoCollector, int parallelismHint, AlgebricksAbsolutePartitionConstraint storageLocations) throws AlgebricksException {
    try {
        Map<String, NodeControllerInfo> ncMap = clusterInfoCollector.getNodeControllerInfos();
        // Gets total number of cores in the cluster.
        int totalNumCores = getTotalNumCores(ncMap);
        // Otherwise, we will use all available cores.
        if (parallelismHint == CompilerProperties.COMPILER_PARALLELISM_AS_STORAGE && storageLocations.getLocations().length <= totalNumCores) {
            return storageLocations;
        }
        return getComputationLocations(ncMap, parallelismHint);
    } catch (HyracksException e) {
        throw new AlgebricksException(e);
    }
}
Also used : NodeControllerInfo(org.apache.hyracks.api.client.NodeControllerInfo) AlgebricksException(org.apache.hyracks.algebricks.common.exceptions.AlgebricksException) HyracksException(org.apache.hyracks.api.exceptions.HyracksException) AlgebricksPartitionConstraint(org.apache.hyracks.algebricks.common.constraints.AlgebricksPartitionConstraint) AlgebricksAbsolutePartitionConstraint(org.apache.hyracks.algebricks.common.constraints.AlgebricksAbsolutePartitionConstraint)

Example 14 with NodeControllerInfo

use of org.apache.hyracks.api.client.NodeControllerInfo in project asterixdb by apache.

the class APIFrameworkTest method testChooseLocations.

@Test
public void testChooseLocations() throws Exception {
    // Mocks cluster info collector.
    IClusterInfoCollector clusterInfoCollector = mock(IClusterInfoCollector.class);
    // Constructs mocked cluster nodes.
    Map<String, NodeControllerInfo> map = new HashMap<>();
    NodeControllerInfo nc1Info = mock(NodeControllerInfo.class);
    when(nc1Info.getNumAvailableCores()).thenReturn(1);
    NodeControllerInfo nc2Info = mock(NodeControllerInfo.class);
    when(nc2Info.getNumAvailableCores()).thenReturn(1);
    String nc1 = "nc1";
    String nc2 = "nc2";
    map.put(nc1, nc1Info);
    map.put(nc2, nc2Info);
    when(clusterInfoCollector.getNodeControllerInfos()).thenReturn(map);
    // Creates an APIFramework.
    APIFramework apiFramework = new APIFramework(mock(ILangCompilationProvider.class));
    // Tests large storage locations.
    AlgebricksAbsolutePartitionConstraint storageLocations = new AlgebricksAbsolutePartitionConstraint(new String[] { "node1", "node1", "node2" });
    AlgebricksAbsolutePartitionConstraint computationLocations = (AlgebricksAbsolutePartitionConstraint) PA.invokeMethod(apiFramework, "chooseLocations(" + IClusterInfoCollector.class.getName() + ",int," + AlgebricksAbsolutePartitionConstraint.class.getName() + ")", clusterInfoCollector, CompilerProperties.COMPILER_PARALLELISM_AS_STORAGE, storageLocations);
    Assert.assertTrue(computationLocations.getLocations().length == 2);
    // Tests suitable storage locations.
    storageLocations = new AlgebricksAbsolutePartitionConstraint(new String[] { "node1", "node2" });
    computationLocations = (AlgebricksAbsolutePartitionConstraint) PA.invokeMethod(apiFramework, "chooseLocations(" + IClusterInfoCollector.class.getName() + ",int," + AlgebricksAbsolutePartitionConstraint.class.getName() + ")", clusterInfoCollector, CompilerProperties.COMPILER_PARALLELISM_AS_STORAGE, storageLocations);
    Assert.assertTrue(computationLocations.getLocations().length == 2);
    // Tests small storage locations.
    storageLocations = new AlgebricksAbsolutePartitionConstraint(new String[] { "node1" });
    computationLocations = (AlgebricksAbsolutePartitionConstraint) PA.invokeMethod(apiFramework, "chooseLocations(" + IClusterInfoCollector.class.getName() + ",int," + AlgebricksAbsolutePartitionConstraint.class.getName() + ")", clusterInfoCollector, CompilerProperties.COMPILER_PARALLELISM_AS_STORAGE, storageLocations);
    Assert.assertTrue(computationLocations.getLocations().length == 1);
    // Verifies the number of calls on clusterInfoCollector.getNodeControllerInfos() in
    // APIFramework.chooseLocations(...).
    verify(clusterInfoCollector, times(3)).getNodeControllerInfos();
}
Also used : HashMap(java.util.HashMap) AlgebricksAbsolutePartitionConstraint(org.apache.hyracks.algebricks.common.constraints.AlgebricksAbsolutePartitionConstraint) NodeControllerInfo(org.apache.hyracks.api.client.NodeControllerInfo) ILangCompilationProvider(org.apache.asterix.compiler.provider.ILangCompilationProvider) IClusterInfoCollector(org.apache.hyracks.api.client.IClusterInfoCollector) Test(org.junit.Test)

Example 15 with NodeControllerInfo

use of org.apache.hyracks.api.client.NodeControllerInfo in project asterixdb by apache.

the class ConnectorApiServletTest method testFormResponseObject.

@Test
public void testFormResponseObject() throws Exception {
    ConnectorApiServlet let = new ConnectorApiServlet(new ConcurrentHashMap<>(), new String[] { "/" }, (ICcApplicationContext) ExecutionTestUtil.integrationUtil.cc.getApplicationContext());
    ObjectMapper om = new ObjectMapper();
    ObjectNode actualResponse = om.createObjectNode();
    FileSplit[] splits = new FileSplit[2];
    splits[0] = new ManagedFileSplit("asterix_nc1", "foo1");
    splits[1] = new ManagedFileSplit("asterix_nc2", "foo2");
    Map<String, NodeControllerInfo> nodeMap = new HashMap<>();
    NodeControllerInfo mockInfo1 = mock(NodeControllerInfo.class);
    NodeControllerInfo mockInfo2 = mock(NodeControllerInfo.class);
    // Sets up mock returns.
    when(mockInfo1.getNetworkAddress()).thenReturn(new NetworkAddress("127.0.0.1", 3099));
    when(mockInfo2.getNetworkAddress()).thenReturn(new NetworkAddress("127.0.0.2", 3099));
    String[] fieldNames = new String[] { "a1", "a2" };
    IAType[] fieldTypes = new IAType[] { BuiltinType.ABOOLEAN, BuiltinType.ADAYTIMEDURATION };
    ARecordType recordType = new ARecordType("record", fieldNames, fieldTypes, true);
    String primaryKey = "a1";
    // Calls ConnectorAPIServlet.formResponseObject.
    nodeMap.put("asterix_nc1", mockInfo1);
    nodeMap.put("asterix_nc2", mockInfo2);
    PA.invokeMethod(let, "formResponseObject(" + ObjectNode.class.getName() + ", " + FileSplit.class.getName() + "[], " + ARecordType.class.getName() + ", " + String.class.getName() + ", boolean, " + Map.class.getName() + ")", actualResponse, splits, recordType, primaryKey, true, nodeMap);
    // Constructs expected response.
    ObjectNode expectedResponse = om.createObjectNode();
    expectedResponse.put("temp", true);
    expectedResponse.put("keys", primaryKey);
    expectedResponse.set("type", recordType.toJSON());
    ArrayNode splitsArray = om.createArrayNode();
    ObjectNode element1 = om.createObjectNode();
    element1.put("ip", "127.0.0.1");
    element1.put("path", splits[0].getPath());
    ObjectNode element2 = om.createObjectNode();
    element2.put("ip", "127.0.0.2");
    element2.put("path", splits[1].getPath());
    splitsArray.add(element1);
    splitsArray.add(element2);
    expectedResponse.set("splits", splitsArray);
    // Checks results.
    Assert.assertEquals(actualResponse.toString(), expectedResponse.toString());
}
Also used : ObjectNode(com.fasterxml.jackson.databind.node.ObjectNode) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ManagedFileSplit(org.apache.hyracks.api.io.ManagedFileSplit) FileSplit(org.apache.hyracks.api.io.FileSplit) ManagedFileSplit(org.apache.hyracks.api.io.ManagedFileSplit) NetworkAddress(org.apache.hyracks.api.comm.NetworkAddress) NodeControllerInfo(org.apache.hyracks.api.client.NodeControllerInfo) ArrayNode(com.fasterxml.jackson.databind.node.ArrayNode) ConnectorApiServlet(org.apache.asterix.api.http.server.ConnectorApiServlet) ARecordType(org.apache.asterix.om.types.ARecordType) HashMap(java.util.HashMap) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) IAType(org.apache.asterix.om.types.IAType) Test(org.junit.Test) SqlppExecutionTest(org.apache.asterix.test.runtime.SqlppExecutionTest)

Aggregations

NodeControllerInfo (org.apache.hyracks.api.client.NodeControllerInfo)21 HashMap (java.util.HashMap)8 Path (org.apache.hadoop.fs.Path)8 Map (java.util.Map)6 ArrayList (java.util.ArrayList)5 InputSplit (org.apache.hadoop.mapred.InputSplit)5 NetworkAddress (org.apache.hyracks.api.comm.NetworkAddress)5 ClusterTopology (org.apache.hyracks.api.topology.ClusterTopology)5 FileSplit (org.apache.hadoop.mapred.FileSplit)4 InputSplit (org.apache.hadoop.mapreduce.InputSplit)4 FileSplit (org.apache.hadoop.mapreduce.lib.input.FileSplit)4 AlgebricksAbsolutePartitionConstraint (org.apache.hyracks.algebricks.common.constraints.AlgebricksAbsolutePartitionConstraint)4 Test (org.junit.Test)4 HyracksException (org.apache.hyracks.api.exceptions.HyracksException)3 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)2 ArrayNode (com.fasterxml.jackson.databind.node.ArrayNode)2 ObjectNode (com.fasterxml.jackson.databind.node.ObjectNode)2 IOException (java.io.IOException)2 UnknownHostException (java.net.UnknownHostException)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2