use of java.util.concurrent.ArrayBlockingQueue in project incubator-gobblin by apache.
the class JobBrokerInjectionTest method testBrokerIsAcquiredAndShared.
@Test
public void testBrokerIsAcquiredAndShared() throws Exception {
StandardGobblinInstanceLauncher.Builder instanceLauncherBuilder = StandardGobblinInstanceLauncher.builder().withInstanceName("testSubmitToJobCatalog");
instanceLauncherBuilder.driver();
StandardGobblinInstanceLauncher instanceLauncher = instanceLauncherBuilder.build();
instanceLauncher.startAsync();
instanceLauncher.awaitRunning(5, TimeUnit.SECONDS);
JobSpec js1 = JobSpec.builder().withConfig(ConfigFactory.parseResources("brokerTest/SimpleHelloWorldJob.jobconf")).build();
final String eventBusId = js1.getConfig().resolve().getString(GobblinTestEventBusWriter.FULL_EVENTBUSID_KEY);
TestingEventBusAsserter asserter = new TestingEventBusAsserter(eventBusId);
final StandardGobblinInstanceDriver instance = (StandardGobblinInstanceDriver) instanceLauncher.getDriver();
final ArrayBlockingQueue<JobExecutionDriver> jobDrivers = new ArrayBlockingQueue<>(1);
JobLifecycleListener js1Listener = new FilteredJobLifecycleListener(JobSpecFilter.eqJobSpecURI(js1.getUri()), new DefaultJobLifecycleListenerImpl(instance.getLog()) {
@Override
public void onJobLaunch(JobExecutionDriver jobDriver) {
super.onJobLaunch(jobDriver);
try {
jobDrivers.offer(jobDriver, 5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
instance.getLog().error("Offer interrupted.");
}
}
});
instance.registerWeakJobLifecycleListener(js1Listener);
instance.getMutableJobCatalog().put(js1);
JobExecutionDriver jobDriver = jobDrivers.poll(10, TimeUnit.SECONDS);
Assert.assertNotNull(jobDriver);
JobExecutionResult jobResult = jobDriver.get(100000, TimeUnit.SECONDS);
Assert.assertTrue(jobResult.isSuccessful());
Queue<TestingEventBuses.Event> events = asserter.getEvents();
Set<Long> seenInstanceObjectIds = Sets.newHashSet();
Set<Long> seenJobObjectIds = Sets.newHashSet();
Set<Long> seenTaskObjectIds = Sets.newHashSet();
for (TestingEventBuses.Event event : events) {
MyRecord record = (MyRecord) event.getValue();
seenInstanceObjectIds.add(record.getInstanceSharedObjectId());
seenJobObjectIds.add(record.getJobSharedObjectId());
seenTaskObjectIds.add(record.getTaskSharedObjectId());
}
// Should see same instance and job id (only 1 id in the set), but 5 different task ids for each task
Assert.assertEquals(seenInstanceObjectIds.size(), 1);
Assert.assertEquals(seenJobObjectIds.size(), 1);
Assert.assertEquals(seenTaskObjectIds.size(), 5);
asserter.clear();
instance.getMutableJobCatalog().remove(js1.getUri());
instance.getMutableJobCatalog().put(js1);
jobDriver = jobDrivers.poll(10, TimeUnit.SECONDS);
Assert.assertNotNull(jobDriver);
jobResult = jobDriver.get(10, TimeUnit.SECONDS);
Assert.assertTrue(jobResult.isSuccessful());
events = asserter.getEvents();
for (TestingEventBuses.Event event : events) {
MyRecord record = (MyRecord) event.getValue();
seenInstanceObjectIds.add(record.getInstanceSharedObjectId());
seenJobObjectIds.add(record.getJobSharedObjectId());
seenTaskObjectIds.add(record.getTaskSharedObjectId());
}
// A different job should produce a new shared object id
Assert.assertEquals(seenInstanceObjectIds.size(), 1);
Assert.assertEquals(seenJobObjectIds.size(), 2);
Assert.assertEquals(seenTaskObjectIds.size(), 10);
}
use of java.util.concurrent.ArrayBlockingQueue in project incubator-gobblin by apache.
the class TestStandardGobblinInstanceLauncher method testDirectToScheduler.
@Test
public /**
* Test running of a job when submitted directly to the scheduler
*/
void testDirectToScheduler() throws Exception {
StandardGobblinInstanceLauncher.Builder instanceLauncherBuilder = StandardGobblinInstanceLauncher.builder().withInstanceName("testDirectToScheduler");
instanceLauncherBuilder.driver();
StandardGobblinInstanceLauncher instanceLauncher = instanceLauncherBuilder.build();
instanceLauncher.startAsync();
instanceLauncher.awaitRunning(5, TimeUnit.SECONDS);
JobSpec js1 = JobSpec.builder().withConfig(ConfigFactory.parseResources("gobblin/runtime/instance/SimpleHelloWorldJob.jobconf")).build();
final StandardGobblinInstanceDriver instance = (StandardGobblinInstanceDriver) instanceLauncher.getDriver();
final ArrayBlockingQueue<JobExecutionDriver> jobDrivers = new ArrayBlockingQueue<>(1);
JobLifecycleListener js1Listener = new FilteredJobLifecycleListener(JobSpecFilter.eqJobSpecURI(js1.getUri()), new DefaultJobLifecycleListenerImpl(instance.getLog()) {
@Override
public void onJobLaunch(JobExecutionDriver jobDriver) {
super.onJobLaunch(jobDriver);
try {
jobDrivers.offer(jobDriver, 5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
instance.getLog().error("Offer interrupted.");
}
}
});
instance.registerWeakJobLifecycleListener(js1Listener);
JobSpecRunnable js1Runnable = instance.createJobSpecRunnable(js1);
instance.getJobScheduler().scheduleOnce(js1, js1Runnable);
JobExecutionDriver jobDriver = jobDrivers.poll(10, TimeUnit.SECONDS);
Assert.assertNotNull(jobDriver);
JobExecutionResult jobResult = jobDriver.get(5, TimeUnit.SECONDS);
Assert.assertTrue(jobResult.isSuccessful());
instanceLauncher.stopAsync();
instanceLauncher.awaitTerminated(5, TimeUnit.SECONDS);
}
use of java.util.concurrent.ArrayBlockingQueue in project twister2 by DSC-SPIDAL.
the class MPIDataFlowPartition method init.
/**
* Initialize
*/
public void init(Config cfg, MessageType t, TaskPlan taskPlan, int edge) {
this.thisSources = TaskPlanUtils.getTasksOfThisExecutor(taskPlan, sources);
LOG.info(String.format("%d setup loadbalance routing %s", taskPlan.getThisExecutor(), thisSources));
this.thisTasks = taskPlan.getTasksOfThisExecutor();
this.router = new PartitionRouter(taskPlan, sources, destinations);
Map<Integer, Set<Integer>> internal = router.getInternalSendTasks(0);
Map<Integer, Set<Integer>> external = router.getExternalSendTasks(0);
this.instancePlan = taskPlan;
this.type = t;
LOG.log(Level.FINE, String.format("%d adding internal/external routing", taskPlan.getThisExecutor()));
for (int s : thisSources) {
Set<Integer> integerSetMap = internal.get(s);
if (integerSetMap != null) {
this.dests.internal.addAll(integerSetMap);
}
Set<Integer> integerSetMap1 = external.get(s);
if (integerSetMap1 != null) {
this.dests.external.addAll(integerSetMap1);
}
LOG.fine(String.format("%d adding internal/external routing %d", taskPlan.getThisExecutor(), s));
break;
}
LOG.log(Level.FINE, String.format("%d done adding internal/external routing", taskPlan.getThisExecutor()));
// TODO : Does this send the correct receiveExpectedTaskIds for partition communication
if (this.finalReceiver != null && isLastReceiver()) {
this.finalReceiver.init(cfg, this, receiveExpectedTaskIds());
}
Map<Integer, ArrayBlockingQueue<Pair<Object, MPISendMessage>>> pendingSendMessagesPerSource = new HashMap<>();
Map<Integer, Queue<Pair<Object, MPIMessage>>> pendingReceiveMessagesPerSource = new HashMap<>();
Map<Integer, Queue<MPIMessage>> pendingReceiveDeSerializations = new HashMap<>();
Map<Integer, MessageSerializer> serializerMap = new HashMap<>();
Map<Integer, MessageDeSerializer> deSerializerMap = new HashMap<>();
Set<Integer> srcs = TaskPlanUtils.getTasksOfThisExecutor(taskPlan, sources);
for (int s : srcs) {
// later look at how not to allocate pairs for this each time
ArrayBlockingQueue<Pair<Object, MPISendMessage>> pendingSendMessages = new ArrayBlockingQueue<Pair<Object, MPISendMessage>>(MPIContext.sendPendingMax(cfg));
pendingSendMessagesPerSource.put(s, pendingSendMessages);
serializerMap.put(s, new MPIMessageSerializer(new KryoSerializer()));
}
int maxReceiveBuffers = MPIContext.receiveBufferCount(cfg);
int receiveExecutorsSize = receivingExecutors().size();
if (receiveExecutorsSize == 0) {
receiveExecutorsSize = 1;
}
Set<Integer> execs = router.receivingExecutors();
for (int e : execs) {
int capacity = maxReceiveBuffers * 2 * receiveExecutorsSize;
Queue<Pair<Object, MPIMessage>> pendingReceiveMessages = new ArrayBlockingQueue<Pair<Object, MPIMessage>>(capacity);
pendingReceiveMessagesPerSource.put(e, pendingReceiveMessages);
pendingReceiveDeSerializations.put(e, new ArrayBlockingQueue<MPIMessage>(capacity));
deSerializerMap.put(e, new MPIMessageDeSerializer(new KryoSerializer()));
}
for (int src : srcs) {
for (int dest : destinations) {
sendRoutingParameters(src, dest);
}
}
delegete.setCompletionListener(completionListener);
delegete.init(cfg, t, taskPlan, edge, router.receivingExecutors(), router.isLastReceiver(), this, pendingSendMessagesPerSource, pendingReceiveMessagesPerSource, pendingReceiveDeSerializations, serializerMap, deSerializerMap, isKeyed);
delegete.setKeyType(keyType);
}
use of java.util.concurrent.ArrayBlockingQueue in project twister2 by DSC-SPIDAL.
the class MPIDirectDataFlowCommunication method init.
/**
* Initialize
* @param cfg
* @param t
* @param taskPlan
* @param edge
*/
public void init(Config cfg, MessageType t, TaskPlan taskPlan, int edge) {
this.router = new DirectRouter(taskPlan, sources, destination);
if (this.finalReceiver != null && isLastReceiver()) {
this.finalReceiver.init(cfg, this, receiveExpectedTaskIds());
}
Map<Integer, ArrayBlockingQueue<Pair<Object, MPISendMessage>>> pendingSendMessagesPerSource = new HashMap<>();
Map<Integer, Queue<Pair<Object, MPIMessage>>> pendingReceiveMessagesPerSource = new HashMap<>();
Map<Integer, Queue<MPIMessage>> pendingReceiveDeSerializations = new HashMap<>();
Map<Integer, MessageSerializer> serializerMap = new HashMap<>();
Map<Integer, MessageDeSerializer> deSerializerMap = new HashMap<>();
Set<Integer> srcs = TaskPlanUtils.getTasksOfThisExecutor(taskPlan, sources);
for (int s : srcs) {
// later look at how not to allocate pairs for this each time
ArrayBlockingQueue<Pair<Object, MPISendMessage>> pendingSendMessages = new ArrayBlockingQueue<Pair<Object, MPISendMessage>>(MPIContext.sendPendingMax(cfg));
pendingSendMessagesPerSource.put(s, pendingSendMessages);
pendingReceiveDeSerializations.put(s, new ArrayBlockingQueue<MPIMessage>(MPIContext.sendPendingMax(cfg)));
serializerMap.put(s, new MPIMessageSerializer(new KryoSerializer()));
}
MessageDeSerializer messageDeSerializer = new MPIMessageDeSerializer(new KryoSerializer());
deSerializerMap.put(destination, messageDeSerializer);
delegete.init(cfg, t, taskPlan, edge, router.receivingExecutors(), isLastReceiver(), this, pendingSendMessagesPerSource, pendingReceiveMessagesPerSource, pendingReceiveDeSerializations, serializerMap, deSerializerMap, false);
}
use of java.util.concurrent.ArrayBlockingQueue in project twister2 by DSC-SPIDAL.
the class MPIDataFlowGather method init.
/**
* Initialize
* @param cfg
* @param t
* @param taskPlan
* @param edge
*/
public void init(Config cfg, MessageType t, TaskPlan taskPlan, int edge) {
this.type = t;
this.instancePlan = taskPlan;
this.executor = taskPlan.getThisExecutor();
// we only have one path
this.router = new InvertedBinaryTreeRouter(cfg, taskPlan, destination, sources, index);
// initialize the receive
if (this.partialReceiver != null && !isLastReceiver()) {
partialReceiver.init(cfg, this, receiveExpectedTaskIds());
}
if (this.finalReceiver != null && isLastReceiver()) {
this.finalReceiver.init(cfg, this, receiveExpectedTaskIds());
}
Map<Integer, ArrayBlockingQueue<Pair<Object, MPISendMessage>>> pendingSendMessagesPerSource = new HashMap<>();
Map<Integer, Queue<Pair<Object, MPIMessage>>> pendingReceiveMessagesPerSource = new HashMap<>();
Map<Integer, Queue<MPIMessage>> pendingReceiveDeSerializations = new HashMap<>();
Map<Integer, MessageSerializer> serializerMap = new HashMap<>();
Map<Integer, MessageDeSerializer> deSerializerMap = new HashMap<>();
Set<Integer> srcs = router.sendQueueIds();
for (int s : srcs) {
// later look at how not to allocate pairs for this each time
ArrayBlockingQueue<Pair<Object, MPISendMessage>> pendingSendMessages = new ArrayBlockingQueue<Pair<Object, MPISendMessage>>(MPIContext.sendPendingMax(cfg));
pendingSendMessagesPerSource.put(s, pendingSendMessages);
serializerMap.put(s, new MPIMultiMessageSerializer(new KryoSerializer(), executor));
}
int maxReceiveBuffers = MPIContext.receiveBufferCount(cfg);
int receiveExecutorsSize = receivingExecutors().size();
if (receiveExecutorsSize == 0) {
receiveExecutorsSize = 1;
}
Set<Integer> execs = router.receivingExecutors();
for (int e : execs) {
int capacity = maxReceiveBuffers * 2 * receiveExecutorsSize;
Queue<Pair<Object, MPIMessage>> pendingReceiveMessages = new ArrayBlockingQueue<Pair<Object, MPIMessage>>(capacity);
pendingReceiveMessagesPerSource.put(e, pendingReceiveMessages);
pendingReceiveDeSerializations.put(e, new ArrayBlockingQueue<MPIMessage>(capacity));
deSerializerMap.put(e, new MPIMultiMessageDeserializer(new KryoSerializer(), executor));
}
Set<Integer> sourcesOfThisExec = TaskPlanUtils.getTasksOfThisExecutor(taskPlan, sources);
for (int s : sourcesOfThisExec) {
sendRoutingParameters(s, pathToUse);
partialSendRoutingParameters(s, pathToUse);
}
delegete.init(cfg, t, taskPlan, edge, router.receivingExecutors(), router.isLastReceiver(), this, pendingSendMessagesPerSource, pendingReceiveMessagesPerSource, pendingReceiveDeSerializations, serializerMap, deSerializerMap, isKeyed);
delegete.setKeyType(keyType);
}
Aggregations