use of org.apache.hyracks.api.job.JobId in project asterixdb by apache.
the class PredistributedJobsTest method DistributedTest.
@Test
public void DistributedTest() throws Exception {
JobSpecification spec1 = UnionTest.createUnionJobSpec();
JobSpecification spec2 = HeapSortMergeTest.createSortMergeJobSpec();
//distribute both jobs
JobId jobId1 = hcc.distributeJob(spec1);
JobId jobId2 = hcc.distributeJob(spec2);
//make sure it finished
//cc will get the store once to check for duplicate insertion and once to insert per job
verify(cc, Mockito.timeout(5000).times(4)).getPreDistributedJobStore();
verify(nc1, Mockito.timeout(5000).times(2)).storeActivityClusterGraph(any(), any());
verify(nc2, Mockito.timeout(5000).times(2)).storeActivityClusterGraph(any(), any());
verify(nc1, Mockito.timeout(5000).times(2)).checkForDuplicateDistributedJob(any());
verify(nc2, Mockito.timeout(5000).times(2)).checkForDuplicateDistributedJob(any());
//confirm that both jobs are distributed
Assert.assertTrue(nc1.getActivityClusterGraph(jobId1) != null && nc2.getActivityClusterGraph(jobId1) != null);
Assert.assertTrue(nc1.getActivityClusterGraph(jobId2) != null && nc2.getActivityClusterGraph(jobId2) != null);
Assert.assertTrue(cc.getPreDistributedJobStore().getDistributedJobDescriptor(jobId1) != null);
Assert.assertTrue(cc.getPreDistributedJobStore().getDistributedJobDescriptor(jobId2) != null);
//run the first job
hcc.startJob(jobId1);
hcc.waitForCompletion(jobId1);
//destroy the first job
hcc.destroyJob(jobId1);
//make sure it finished
verify(cc, Mockito.timeout(5000).times(8)).getPreDistributedJobStore();
verify(nc1, Mockito.timeout(5000).times(1)).removeActivityClusterGraph(any());
verify(nc2, Mockito.timeout(5000).times(1)).removeActivityClusterGraph(any());
//confirm the first job is destroyed
Assert.assertTrue(nc1.getActivityClusterGraph(jobId1) == null && nc2.getActivityClusterGraph(jobId1) == null);
cc.getPreDistributedJobStore().checkForExistingDistributedJobDescriptor(jobId1);
//run the second job
hcc.startJob(jobId2);
hcc.waitForCompletion(jobId2);
//wait ten seconds to ensure the result sweeper does not break the job
//The result sweeper runs every 5 seconds during the tests
Thread.sleep(10000);
//run the second job again
hcc.startJob(jobId2);
hcc.waitForCompletion(jobId2);
//destroy the second job
hcc.destroyJob(jobId2);
//make sure it finished
verify(cc, Mockito.timeout(5000).times(12)).getPreDistributedJobStore();
verify(nc1, Mockito.timeout(5000).times(2)).removeActivityClusterGraph(any());
verify(nc2, Mockito.timeout(5000).times(2)).removeActivityClusterGraph(any());
//confirm the second job is destroyed
Assert.assertTrue(nc1.getActivityClusterGraph(jobId2) == null && nc2.getActivityClusterGraph(jobId2) == null);
cc.getPreDistributedJobStore().checkForExistingDistributedJobDescriptor(jobId2);
}
use of org.apache.hyracks.api.job.JobId in project asterixdb by apache.
the class JobStatusAPIIntegrationTest method testJobActivityGraph.
@Test
public void testJobActivityGraph() throws Exception {
JobId jId = startJob();
ObjectNode res = getJobActivityGraph(jId);
Assert.assertTrue(res.has("result"));
ObjectNode actGraph = (ObjectNode) res.get("result");
Assert.assertTrue(actGraph.has("version"));
checkActivityCluster(actGraph);
stopJob(jId);
Assert.assertEquals("TERMINATED", getJobStatus(jId));
}
use of org.apache.hyracks.api.job.JobId in project asterixdb by apache.
the class RemoveDeadNodesWork method run.
@Override
public void run() {
try {
INodeManager nodeManager = ccs.getNodeManager();
Pair<Collection<String>, Collection<JobId>> result = nodeManager.removeDeadNodes();
Collection<String> deadNodes = result.getLeft();
Collection<JobId> affectedJobIds = result.getRight();
int size = affectedJobIds.size();
if (size > 0) {
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("Number of affected jobs: " + size);
}
IJobManager jobManager = ccs.getJobManager();
for (JobId jobId : affectedJobIds) {
JobRun run = jobManager.get(jobId);
if (run != null) {
run.getExecutor().notifyNodeFailures(deadNodes);
}
}
}
if (!deadNodes.isEmpty()) {
ccs.getContext().notifyNodeFailure(deadNodes);
}
} catch (HyracksException e) {
LOGGER.log(Level.WARNING, "Uncaught exception on notifyNodeFailure", e);
}
}
use of org.apache.hyracks.api.job.JobId in project asterixdb by apache.
the class SecondaryIndexBulkLoadExample method main.
public static void main(String[] args) throws Exception {
Options options = new Options();
CmdLineParser parser = new CmdLineParser(options);
parser.parseArgument(args);
IHyracksClientConnection hcc = new HyracksConnection(options.host, options.port);
JobSpecification job = createJob(options);
long start = System.currentTimeMillis();
JobId jobId = hcc.startJob(job);
hcc.waitForCompletion(jobId);
long end = System.currentTimeMillis();
System.err.println(start + " " + end + " " + (end - start));
}
use of org.apache.hyracks.api.job.JobId in project asterixdb by apache.
the class InsertPipelineExample method main.
public static void main(String[] args) throws Exception {
Options options = new Options();
CmdLineParser parser = new CmdLineParser(options);
parser.parseArgument(args);
IHyracksClientConnection hcc = new HyracksConnection(options.host, options.port);
JobSpecification job = createJob(options);
long start = System.currentTimeMillis();
JobId jobId = hcc.startJob(job);
hcc.waitForCompletion(jobId);
long end = System.currentTimeMillis();
System.err.println(start + " " + end + " " + (end - start));
}
Aggregations