use of org.quartz.impl.StdSchedulerFactory in project dq-easy-cloud by dq-open-cloud.
the class RemoteServerExample method run.
/**
* This example will spawn a large number of jobs to run
*
* @author James House, Bill Kratzer
*/
public void run() throws Exception {
Logger log = LoggerFactory.getLogger(RemoteServerExample.class);
// First we must get a reference to a scheduler
SchedulerFactory sf = new StdSchedulerFactory();
Scheduler sched = sf.getScheduler();
log.info("------- Initialization Complete -----------");
log.info("------- (Not Scheduling any Jobs - relying on a remote client to schedule jobs --");
log.info("------- Starting Scheduler ----------------");
// start the schedule
sched.start();
log.info("------- Started Scheduler -----------------");
log.info("------- Waiting ten minutes... ------------");
// wait five minutes to give our jobs a chance to run
try {
Thread.sleep(600L * 1000L);
} catch (Exception e) {
//
}
// shut down the scheduler
log.info("------- Shutting Down ---------------------");
sched.shutdown(true);
log.info("------- Shutdown Complete -----------------");
SchedulerMetaData metaData = sched.getMetaData();
log.info("Executed " + metaData.getNumberOfJobsExecuted() + " jobs.");
}
use of org.quartz.impl.StdSchedulerFactory in project dq-easy-cloud by dq-open-cloud.
the class ClusterExample method run.
public void run(boolean inClearJobs, boolean inScheduleJobs) throws Exception {
// First we must get a reference to a scheduler
SchedulerFactory sf = new StdSchedulerFactory();
Scheduler sched = sf.getScheduler();
if (inClearJobs) {
_log.warn("***** Deleting existing jobs/triggers *****");
sched.clear();
}
_log.info("------- Initialization Complete -----------");
if (inScheduleJobs) {
_log.info("------- Scheduling Jobs ------------------");
String schedId = sched.getSchedulerInstanceId();
int count = 1;
JobDetail job = // put triggers in group
newJob(SimpleRecoveryJob.class).withIdentity("job_" + count, schedId).requestRecovery().build();
SimpleTrigger trigger = newTrigger().withIdentity("triger_" + count, schedId).startAt(futureDate(1, IntervalUnit.SECOND)).withSchedule(simpleSchedule().withRepeatCount(20).withIntervalInSeconds(5)).build();
_log.info(job.getKey() + " will run at: " + trigger.getNextFireTime() + " and repeat: " + trigger.getRepeatCount() + " times, every " + trigger.getRepeatInterval() / 1000 + " seconds");
sched.scheduleJob(job, trigger);
count++;
job = // put triggers in group named after
newJob(SimpleRecoveryJob.class).withIdentity("job_" + count, schedId).requestRecovery().build();
trigger = newTrigger().withIdentity("triger_" + count, schedId).startAt(futureDate(2, IntervalUnit.SECOND)).withSchedule(simpleSchedule().withRepeatCount(20).withIntervalInSeconds(5)).build();
_log.info(job.getKey() + " will run at: " + trigger.getNextFireTime() + " and repeat: " + trigger.getRepeatCount() + " times, every " + trigger.getRepeatInterval() / 1000 + " seconds");
sched.scheduleJob(job, trigger);
count++;
job = // put triggers in group named
newJob(SimpleRecoveryStatefulJob.class).withIdentity("job_" + count, schedId).requestRecovery().build();
trigger = newTrigger().withIdentity("triger_" + count, schedId).startAt(futureDate(1, IntervalUnit.SECOND)).withSchedule(simpleSchedule().withRepeatCount(20).withIntervalInSeconds(3)).build();
_log.info(job.getKey() + " will run at: " + trigger.getNextFireTime() + " and repeat: " + trigger.getRepeatCount() + " times, every " + trigger.getRepeatInterval() / 1000 + " seconds");
sched.scheduleJob(job, trigger);
count++;
job = // put triggers in group named after
newJob(SimpleRecoveryJob.class).withIdentity("job_" + count, schedId).requestRecovery().build();
trigger = newTrigger().withIdentity("triger_" + count, schedId).startAt(futureDate(1, IntervalUnit.SECOND)).withSchedule(simpleSchedule().withRepeatCount(20).withIntervalInSeconds(4)).build();
_log.info(job.getKey() + " will run at: " + trigger.getNextFireTime() + " & repeat: " + trigger.getRepeatCount() + "/" + trigger.getRepeatInterval());
sched.scheduleJob(job, trigger);
count++;
job = // put triggers in group named after
newJob(SimpleRecoveryJob.class).withIdentity("job_" + count, schedId).requestRecovery().build();
trigger = newTrigger().withIdentity("triger_" + count, schedId).startAt(futureDate(1, IntervalUnit.SECOND)).withSchedule(simpleSchedule().withRepeatCount(20).withIntervalInMilliseconds(4500L)).build();
_log.info(job.getKey() + " will run at: " + trigger.getNextFireTime() + " & repeat: " + trigger.getRepeatCount() + "/" + trigger.getRepeatInterval());
sched.scheduleJob(job, trigger);
}
// jobs don't start firing until start() has been called...
_log.info("------- Starting Scheduler ---------------");
sched.start();
_log.info("------- Started Scheduler ----------------");
_log.info("------- Waiting for one hour... ----------");
try {
Thread.sleep(3600L * 1000L);
} catch (Exception e) {
//
}
_log.info("------- Shutting Down --------------------");
sched.shutdown();
_log.info("------- Shutdown Complete ----------------");
}
use of org.quartz.impl.StdSchedulerFactory in project dq-easy-cloud by dq-open-cloud.
the class InterruptExample method run.
public void run() throws Exception {
final Logger log = LoggerFactory.getLogger(InterruptExample.class);
log.info("------- Initializing ----------------------");
// First we must get a reference to a scheduler
SchedulerFactory sf = new StdSchedulerFactory();
Scheduler sched = sf.getScheduler();
log.info("------- Initialization Complete -----------");
log.info("------- Scheduling Jobs -------------------");
// get a "nice round" time a few seconds in the future...
Date startTime = nextGivenSecondDate(null, 15);
JobDetail job = newJob(DumbInterruptableJob.class).withIdentity("interruptableJob1", "group1").build();
SimpleTrigger trigger = newTrigger().withIdentity("trigger1", "group1").startAt(startTime).withSchedule(simpleSchedule().withIntervalInSeconds(5).repeatForever()).build();
Date ft = sched.scheduleJob(job, trigger);
log.info(job.getKey() + " will run at: " + ft + " and repeat: " + trigger.getRepeatCount() + " times, every " + trigger.getRepeatInterval() / 1000 + " seconds");
// start up the scheduler (jobs do not start to fire until
// the scheduler has been started)
sched.start();
log.info("------- Started Scheduler -----------------");
log.info("------- Starting loop to interrupt job every 7 seconds ----------");
for (int i = 0; i < 50; i++) {
try {
Thread.sleep(7000L);
// tell the scheduler to interrupt our job
sched.interrupt(job.getKey());
} catch (Exception e) {
//
}
}
log.info("------- Shutting Down ---------------------");
sched.shutdown(true);
log.info("------- Shutdown Complete -----------------");
SchedulerMetaData metaData = sched.getMetaData();
log.info("Executed " + metaData.getNumberOfJobsExecuted() + " jobs.");
}
use of org.quartz.impl.StdSchedulerFactory in project dq-easy-cloud by dq-open-cloud.
the class ListenerExample method run.
public void run() throws Exception {
Logger log = LoggerFactory.getLogger(ListenerExample.class);
log.info("------- Initializing ----------------------");
// First we must get a reference to a scheduler
SchedulerFactory sf = new StdSchedulerFactory();
Scheduler sched = sf.getScheduler();
log.info("------- Initialization Complete -----------");
log.info("------- Scheduling Jobs -------------------");
// schedule a job to run immediately
JobDetail job = newJob(SimpleJob1.class).withIdentity("job1").build();
Trigger trigger = newTrigger().withIdentity("trigger1").startNow().build();
// Set up the listener
JobListener listener = new Job1Listener();
Matcher<JobKey> matcher = KeyMatcher.keyEquals(job.getKey());
sched.getListenerManager().addJobListener(listener, matcher);
// schedule the job to run
sched.scheduleJob(job, trigger);
// All of the jobs have been added to the scheduler, but none of the jobs
// will run until the scheduler has been started
log.info("------- Starting Scheduler ----------------");
sched.start();
// wait 30 seconds:
// note: nothing will run
log.info("------- Waiting 30 seconds... --------------");
try {
// wait 30 seconds to show jobs
Thread.sleep(30L * 1000L);
// executing...
} catch (Exception e) {
//
}
// shut down the scheduler
log.info("------- Shutting Down ---------------------");
sched.shutdown(true);
log.info("------- Shutdown Complete -----------------");
SchedulerMetaData metaData = sched.getMetaData();
log.info("Executed " + metaData.getNumberOfJobsExecuted() + " jobs.");
}
use of org.quartz.impl.StdSchedulerFactory in project dq-easy-cloud by dq-open-cloud.
the class ClusterExample method run.
public void run(boolean inClearJobs, boolean inScheduleJobs) throws Exception {
// First we must get a reference to a scheduler
SchedulerFactory sf = new StdSchedulerFactory();
Scheduler sched = sf.getScheduler();
if (inClearJobs) {
_log.warn("***** Deleting existing jobs/triggers *****");
sched.clear();
}
_log.info("------- Initialization Complete -----------");
if (inScheduleJobs) {
_log.info("------- Scheduling Jobs ------------------");
String schedId = sched.getSchedulerInstanceId();
int count = 1;
JobDetail job = // put triggers in group
newJob(SimpleRecoveryJob.class).withIdentity("job_" + count, schedId).requestRecovery().build();
SimpleTrigger trigger = newTrigger().withIdentity("triger_" + count, schedId).startAt(futureDate(1, IntervalUnit.SECOND)).withSchedule(simpleSchedule().withRepeatCount(20).withIntervalInSeconds(5)).build();
_log.info(job.getKey() + " will run at: " + trigger.getNextFireTime() + " and repeat: " + trigger.getRepeatCount() + " times, every " + trigger.getRepeatInterval() / 1000 + " seconds");
sched.scheduleJob(job, trigger);
count++;
job = // put triggers in group named after
newJob(SimpleRecoveryJob.class).withIdentity("job_" + count, schedId).requestRecovery().build();
trigger = newTrigger().withIdentity("triger_" + count, schedId).startAt(futureDate(2, IntervalUnit.SECOND)).withSchedule(simpleSchedule().withRepeatCount(20).withIntervalInSeconds(5)).build();
_log.info(job.getKey() + " will run at: " + trigger.getNextFireTime() + " and repeat: " + trigger.getRepeatCount() + " times, every " + trigger.getRepeatInterval() / 1000 + " seconds");
sched.scheduleJob(job, trigger);
count++;
job = // put triggers in group named
newJob(SimpleRecoveryStatefulJob.class).withIdentity("job_" + count, schedId).requestRecovery().build();
trigger = newTrigger().withIdentity("triger_" + count, schedId).startAt(futureDate(1, IntervalUnit.SECOND)).withSchedule(simpleSchedule().withRepeatCount(20).withIntervalInSeconds(3)).build();
_log.info(job.getKey() + " will run at: " + trigger.getNextFireTime() + " and repeat: " + trigger.getRepeatCount() + " times, every " + trigger.getRepeatInterval() / 1000 + " seconds");
sched.scheduleJob(job, trigger);
count++;
job = // put triggers in group named after
newJob(SimpleRecoveryJob.class).withIdentity("job_" + count, schedId).requestRecovery().build();
trigger = newTrigger().withIdentity("triger_" + count, schedId).startAt(futureDate(1, IntervalUnit.SECOND)).withSchedule(simpleSchedule().withRepeatCount(20).withIntervalInSeconds(4)).build();
_log.info(job.getKey() + " will run at: " + trigger.getNextFireTime() + " & repeat: " + trigger.getRepeatCount() + "/" + trigger.getRepeatInterval());
sched.scheduleJob(job, trigger);
count++;
job = // put triggers in group named after
newJob(SimpleRecoveryJob.class).withIdentity("job_" + count, schedId).requestRecovery().build();
trigger = newTrigger().withIdentity("triger_" + count, schedId).startAt(futureDate(1, IntervalUnit.SECOND)).withSchedule(simpleSchedule().withRepeatCount(20).withIntervalInMilliseconds(4500L)).build();
_log.info(job.getKey() + " will run at: " + trigger.getNextFireTime() + " & repeat: " + trigger.getRepeatCount() + "/" + trigger.getRepeatInterval());
sched.scheduleJob(job, trigger);
}
// jobs don't start firing until start() has been called...
_log.info("------- Starting Scheduler ---------------");
sched.start();
_log.info("------- Started Scheduler ----------------");
_log.info("------- Waiting for one hour... ----------");
try {
Thread.sleep(3600L * 1000L);
} catch (Exception e) {
//
}
_log.info("------- Shutting Down --------------------");
sched.shutdown();
_log.info("------- Shutdown Complete ----------------");
}
Aggregations