use of org.apache.hyracks.api.client.NodeControllerInfo in project asterixdb by apache.
the class SchedulerTest method testSchedulerLargerHDFS.
/**
* Test the case where the HDFS cluster is a larger than the Hyracks cluster
*
* @throws Exception
*/
public void testSchedulerLargerHDFS() throws Exception {
Map<String, NodeControllerInfo> ncNameToNcInfos = TestUtils.generateNodeControllerInfo(6, "nc", "10.0.0.", 5099, 5098, 5097);
List<InputSplit> fileSplits = new ArrayList<>();
fileSplits.add(new FileSplit(new Path("part-1"), 0, 0, new String[] { "10.0.0.1", "10.0.0.2", "10.0.0.3" }));
fileSplits.add(new FileSplit(new Path("part-2"), 0, 0, new String[] { "10.0.0.3", "10.0.0.4", "10.0.0.5" }));
fileSplits.add(new FileSplit(new Path("part-3"), 0, 0, new String[] { "10.0.0.4", "10.0.0.5", "10.0.0.6" }));
fileSplits.add(new FileSplit(new Path("part-4"), 0, 0, new String[] { "10.0.0.2", "10.0.0.1", "10.0.0.6" }));
fileSplits.add(new FileSplit(new Path("part-5"), 0, 0, new String[] { "10.0.0.3", "10.0.0.4", "10.0.0.5" }));
fileSplits.add(new FileSplit(new Path("part-6"), 0, 0, new String[] { "10.0.0.2", "10.0.0.3", "10.0.0.5" }));
fileSplits.add(new FileSplit(new Path("part-7"), 0, 0, new String[] { "10.0.0.1", "10.0.0.2", "10.0.0.3" }));
fileSplits.add(new FileSplit(new Path("part-8"), 0, 0, new String[] { "10.0.0.3", "10.0.0.4", "10.0.0.5" }));
fileSplits.add(new FileSplit(new Path("part-9"), 0, 0, new String[] { "10.0.0.4", "10.0.0.5", "10.0.0.6" }));
fileSplits.add(new FileSplit(new Path("part-10"), 0, 0, new String[] { "10.0.0.2", "10.0.0.1", "10.0.0.6" }));
fileSplits.add(new FileSplit(new Path("part-11"), 0, 0, new String[] { "10.0.0.3", "10.0.0.4", "10.0.0.7" }));
fileSplits.add(new FileSplit(new Path("part-12"), 0, 0, new String[] { "10.0.0.2", "10.0.0.3", "10.0.0.5" }));
Scheduler scheduler = new Scheduler(ncNameToNcInfos);
String[] locationConstraints = scheduler.getLocationConstraints(fileSplits);
String[] expectedResults = new String[] { "nc1", "nc4", "nc6", "nc1", "nc4", "nc2", "nc2", "nc3", "nc6", "nc5", "nc3", "nc5" };
for (int i = 0; i < locationConstraints.length; i++) {
Assert.assertEquals(locationConstraints[i], expectedResults[i]);
}
}
use of org.apache.hyracks.api.client.NodeControllerInfo in project asterixdb by apache.
the class TestUtils method generateNodeControllerInfo.
public static Map<String, NodeControllerInfo> generateNodeControllerInfo(int numberOfNodes, String ncNamePrefix, String addressPrefix, int netPort, int dataPort, int messagingPort) {
Map<String, NodeControllerInfo> ncNameToNcInfos = new HashMap<>();
for (int i = 1; i <= numberOfNodes; i++) {
String ncId = ncNamePrefix + i;
String ncAddress = addressPrefix + i;
ncNameToNcInfos.put(ncId, new NodeControllerInfo(ncId, NodeStatus.ALIVE, new NetworkAddress(ncAddress, netPort), new NetworkAddress(ncAddress, dataPort), new NetworkAddress(ncAddress, messagingPort), 2));
}
return ncNameToNcInfos;
}
use of org.apache.hyracks.api.client.NodeControllerInfo in project asterixdb by apache.
the class APIFramework method chooseLocations.
// Chooses the location constraints, i.e., whether to use storage parallelism or use a user-sepcified number
// of cores.
private static AlgebricksAbsolutePartitionConstraint chooseLocations(IClusterInfoCollector clusterInfoCollector, int parallelismHint, AlgebricksAbsolutePartitionConstraint storageLocations) throws AlgebricksException {
try {
Map<String, NodeControllerInfo> ncMap = clusterInfoCollector.getNodeControllerInfos();
// Gets total number of cores in the cluster.
int totalNumCores = getTotalNumCores(ncMap);
// Otherwise, we will use all available cores.
if (parallelismHint == CompilerProperties.COMPILER_PARALLELISM_AS_STORAGE && storageLocations.getLocations().length <= totalNumCores) {
return storageLocations;
}
return getComputationLocations(ncMap, parallelismHint);
} catch (HyracksException e) {
throw new AlgebricksException(e);
}
}
use of org.apache.hyracks.api.client.NodeControllerInfo in project asterixdb by apache.
the class APIFrameworkTest method testChooseLocations.
@Test
public void testChooseLocations() throws Exception {
// Mocks cluster info collector.
IClusterInfoCollector clusterInfoCollector = mock(IClusterInfoCollector.class);
// Constructs mocked cluster nodes.
Map<String, NodeControllerInfo> map = new HashMap<>();
NodeControllerInfo nc1Info = mock(NodeControllerInfo.class);
when(nc1Info.getNumAvailableCores()).thenReturn(1);
NodeControllerInfo nc2Info = mock(NodeControllerInfo.class);
when(nc2Info.getNumAvailableCores()).thenReturn(1);
String nc1 = "nc1";
String nc2 = "nc2";
map.put(nc1, nc1Info);
map.put(nc2, nc2Info);
when(clusterInfoCollector.getNodeControllerInfos()).thenReturn(map);
// Creates an APIFramework.
APIFramework apiFramework = new APIFramework(mock(ILangCompilationProvider.class));
// Tests large storage locations.
AlgebricksAbsolutePartitionConstraint storageLocations = new AlgebricksAbsolutePartitionConstraint(new String[] { "node1", "node1", "node2" });
AlgebricksAbsolutePartitionConstraint computationLocations = (AlgebricksAbsolutePartitionConstraint) PA.invokeMethod(apiFramework, "chooseLocations(" + IClusterInfoCollector.class.getName() + ",int," + AlgebricksAbsolutePartitionConstraint.class.getName() + ")", clusterInfoCollector, CompilerProperties.COMPILER_PARALLELISM_AS_STORAGE, storageLocations);
Assert.assertTrue(computationLocations.getLocations().length == 2);
// Tests suitable storage locations.
storageLocations = new AlgebricksAbsolutePartitionConstraint(new String[] { "node1", "node2" });
computationLocations = (AlgebricksAbsolutePartitionConstraint) PA.invokeMethod(apiFramework, "chooseLocations(" + IClusterInfoCollector.class.getName() + ",int," + AlgebricksAbsolutePartitionConstraint.class.getName() + ")", clusterInfoCollector, CompilerProperties.COMPILER_PARALLELISM_AS_STORAGE, storageLocations);
Assert.assertTrue(computationLocations.getLocations().length == 2);
// Tests small storage locations.
storageLocations = new AlgebricksAbsolutePartitionConstraint(new String[] { "node1" });
computationLocations = (AlgebricksAbsolutePartitionConstraint) PA.invokeMethod(apiFramework, "chooseLocations(" + IClusterInfoCollector.class.getName() + ",int," + AlgebricksAbsolutePartitionConstraint.class.getName() + ")", clusterInfoCollector, CompilerProperties.COMPILER_PARALLELISM_AS_STORAGE, storageLocations);
Assert.assertTrue(computationLocations.getLocations().length == 1);
// Verifies the number of calls on clusterInfoCollector.getNodeControllerInfos() in
// APIFramework.chooseLocations(...).
verify(clusterInfoCollector, times(3)).getNodeControllerInfos();
}
use of org.apache.hyracks.api.client.NodeControllerInfo in project asterixdb by apache.
the class ConnectorApiServletTest method testFormResponseObject.
@Test
public void testFormResponseObject() throws Exception {
ConnectorApiServlet let = new ConnectorApiServlet(new ConcurrentHashMap<>(), new String[] { "/" }, (ICcApplicationContext) ExecutionTestUtil.integrationUtil.cc.getApplicationContext());
ObjectMapper om = new ObjectMapper();
ObjectNode actualResponse = om.createObjectNode();
FileSplit[] splits = new FileSplit[2];
splits[0] = new ManagedFileSplit("asterix_nc1", "foo1");
splits[1] = new ManagedFileSplit("asterix_nc2", "foo2");
Map<String, NodeControllerInfo> nodeMap = new HashMap<>();
NodeControllerInfo mockInfo1 = mock(NodeControllerInfo.class);
NodeControllerInfo mockInfo2 = mock(NodeControllerInfo.class);
// Sets up mock returns.
when(mockInfo1.getNetworkAddress()).thenReturn(new NetworkAddress("127.0.0.1", 3099));
when(mockInfo2.getNetworkAddress()).thenReturn(new NetworkAddress("127.0.0.2", 3099));
String[] fieldNames = new String[] { "a1", "a2" };
IAType[] fieldTypes = new IAType[] { BuiltinType.ABOOLEAN, BuiltinType.ADAYTIMEDURATION };
ARecordType recordType = new ARecordType("record", fieldNames, fieldTypes, true);
String primaryKey = "a1";
// Calls ConnectorAPIServlet.formResponseObject.
nodeMap.put("asterix_nc1", mockInfo1);
nodeMap.put("asterix_nc2", mockInfo2);
PA.invokeMethod(let, "formResponseObject(" + ObjectNode.class.getName() + ", " + FileSplit.class.getName() + "[], " + ARecordType.class.getName() + ", " + String.class.getName() + ", boolean, " + Map.class.getName() + ")", actualResponse, splits, recordType, primaryKey, true, nodeMap);
// Constructs expected response.
ObjectNode expectedResponse = om.createObjectNode();
expectedResponse.put("temp", true);
expectedResponse.put("keys", primaryKey);
expectedResponse.set("type", recordType.toJSON());
ArrayNode splitsArray = om.createArrayNode();
ObjectNode element1 = om.createObjectNode();
element1.put("ip", "127.0.0.1");
element1.put("path", splits[0].getPath());
ObjectNode element2 = om.createObjectNode();
element2.put("ip", "127.0.0.2");
element2.put("path", splits[1].getPath());
splitsArray.add(element1);
splitsArray.add(element2);
expectedResponse.set("splits", splitsArray);
// Checks results.
Assert.assertEquals(actualResponse.toString(), expectedResponse.toString());
}
Aggregations