use of org.apache.hyracks.api.exceptions.HyracksException in project asterixdb by apache.
the class IndexingScheduler method getLocationConstraints.
/**
* Set location constraints for a file scan operator with a list of file
* splits. It tries to assign splits to their local machines fairly
* Locality is more important than fairness
*
* @throws HyracksDataException
*/
public String[] getLocationConstraints(InputSplit[] splits) throws HyracksException {
if (splits == null) {
/** deal the case when the splits array is null */
return new String[] {};
}
int[] workloads = new int[NCs.length];
Arrays.fill(workloads, 0);
String[] locations = new String[splits.length];
Map<String, IntWritable> locationToNumOfSplits = new HashMap<String, IntWritable>();
/**
* upper bound is number of splits
*/
int upperBoundSlots = splits.length;
try {
Random random = new Random(System.currentTimeMillis());
boolean[] scheduled = new boolean[splits.length];
Arrays.fill(scheduled, false);
/**
* scan the splits and build the popularity map
* give the machines with less local splits more scheduling priority
*/
buildPopularityMap(splits, locationToNumOfSplits);
HashMap<String, Integer> locationToNumOfAssignement = new HashMap<String, Integer>();
for (String location : locationToNumOfSplits.keySet()) {
locationToNumOfAssignement.put(location, 0);
}
/**
* push data-local upper-bounds slots to each machine
*/
scheduleLocalSlots(splits, workloads, locations, upperBoundSlots, random, scheduled, locationToNumOfSplits, locationToNumOfAssignement);
int dataLocalCount = 0;
for (int i = 0; i < scheduled.length; i++) {
if (scheduled[i] == true) {
dataLocalCount++;
}
}
LOGGER.info("Data local rate: " + (scheduled.length == 0 ? 0.0 : ((float) dataLocalCount / (float) (scheduled.length))));
/**
* push non-data-local upper-bounds slots to each machine
*/
locationToNumOfAssignement.clear();
for (String nc : NCs) {
locationToNumOfAssignement.put(nc, 0);
}
for (int i = 0; i < scheduled.length; i++) {
if (scheduled[i]) {
locationToNumOfAssignement.put(locations[i], locationToNumOfAssignement.get(locations[i]) + 1);
}
}
scheduleNonLocalSlots(splits, workloads, locations, upperBoundSlots, scheduled, locationToNumOfAssignement);
return locations;
} catch (IOException e) {
throw new HyracksException(e);
}
}
use of org.apache.hyracks.api.exceptions.HyracksException in project asterixdb by apache.
the class IndexingScheduler method loadIPAddressToNCMap.
/**
* Load the IP-address-to-NC map from the NCNameToNCInfoMap
*
* @param ncNameToNcInfos
* @throws HyracksException
*/
private void loadIPAddressToNCMap(Map<String, NodeControllerInfo> ncNameToNcInfos) throws HyracksException {
try {
NCs = new String[ncNameToNcInfos.size()];
ipToNcMapping.clear();
ncNameToIndex.clear();
int i = 0;
/**
* build the IP address to NC map
*/
for (Map.Entry<String, NodeControllerInfo> entry : ncNameToNcInfos.entrySet()) {
String ipAddr = InetAddress.getByAddress(entry.getValue().getNetworkAddress().lookupIpAddress()).getHostAddress();
List<String> matchedNCs = ipToNcMapping.get(ipAddr);
if (matchedNCs == null) {
matchedNCs = new ArrayList<String>();
ipToNcMapping.put(ipAddr, matchedNCs);
}
matchedNCs.add(entry.getKey());
NCs[i] = entry.getKey();
i++;
}
/**
* set up the NC name to index mapping
*/
for (i = 0; i < NCs.length; i++) {
ncNameToIndex.put(NCs[i], i);
}
} catch (Exception e) {
throw new HyracksException(e);
}
}
use of org.apache.hyracks.api.exceptions.HyracksException in project asterixdb by apache.
the class PartitionManager method registerPartitionRequest.
public synchronized void registerPartitionRequest(PartitionId partitionId, NetworkOutputChannel writer) throws HyracksException {
try {
List<IPartition> pList = availablePartitionMap.get(partitionId);
if (pList != null && !pList.isEmpty()) {
IPartition partition = pList.get(0);
writer.setFrameSize(partition.getTaskContext().getInitialFrameSize());
partition.writeTo(writer);
if (!partition.isReusable()) {
availablePartitionMap.remove(partitionId);
}
} else {
//throw new HyracksException("Request for unknown partition " + partitionId);
partitionRequests.put(partitionId, writer);
}
} catch (Exception e) {
throw new HyracksDataException(e);
}
}
use of org.apache.hyracks.api.exceptions.HyracksException in project asterixdb by apache.
the class DistributeJobWork method run.
@Override
public void run() {
try {
ncs.checkForDuplicateDistributedJob(jobId);
ActivityClusterGraph acg = (ActivityClusterGraph) DeploymentUtils.deserialize(acgBytes, null, ncs.getContext());
ncs.storeActivityClusterGraph(jobId, acg);
} catch (HyracksException e) {
try {
ncs.getClusterController().notifyDistributedJobFailure(jobId, ncs.getId());
} catch (Exception e1) {
e1.printStackTrace();
}
}
}
use of org.apache.hyracks.api.exceptions.HyracksException in project asterixdb by apache.
the class TestUtils method create.
public static IHyracksTaskContext create(int frameSize) {
try {
IOManager ioManager = createIoManager();
INCServiceContext serviceCtx = new TestNCServiceContext(ioManager, null);
TestJobletContext jobletCtx = new TestJobletContext(frameSize, serviceCtx, new JobId(0));
TaskAttemptId tid = new TaskAttemptId(new TaskId(new ActivityId(new OperatorDescriptorId(0), 0), 0), 0);
IHyracksTaskContext taskCtx = new TestTaskContext(jobletCtx, tid);
return taskCtx;
} catch (HyracksException e) {
throw new RuntimeException(e);
}
}
Aggregations