use of edu.cmu.ml.proppr.learn.tools.StoppingCriterion in project ProPPR by TeamCohen.
the class CachingTrainer method trainCached.
public ParamVector<String, ?> trainCached(List<PosNegRWExample> examples, LearningGraphBuilder builder, ParamVector<String, ?> initialParamVec, int numEpochs, TrainingStatistics total) {
ParamVector<String, ?> paramVec = this.masterLearner.setupParams(initialParamVec);
NamedThreadFactory trainThreads = new NamedThreadFactory("work-");
ExecutorService trainPool;
ExecutorService cleanPool;
StoppingCriterion stopper = new StoppingCriterion(numEpochs, this.stoppingPercent, this.stoppingEpoch);
boolean graphSizesStatusLog = true;
// repeat until ready to stop
while (!stopper.satisified()) {
// set up current epoch
this.epoch++;
for (SRW learner : this.learners.values()) {
learner.setEpoch(epoch);
learner.clearLoss();
}
log.info("epoch " + epoch + " ...");
status.tick();
// reset counters & file pointers
this.statistics = new TrainingStatistics();
trainThreads.reset();
trainPool = Executors.newFixedThreadPool(this.nthreads, trainThreads);
cleanPool = Executors.newSingleThreadExecutor();
// run examples
int id = 1;
if (this.shuffle)
Collections.shuffle(examples);
for (PosNegRWExample s : examples) {
Future<ExampleStats> trained = trainPool.submit(new Train(new PretendParse(s), paramVec, id, null));
cleanPool.submit(new TraceLosses(trained, id));
id++;
if (log.isInfoEnabled() && status.due(1))
log.info("queued: " + id + " trained: " + statistics.exampleSetSize);
}
cleanEpoch(trainPool, cleanPool, paramVec, stopper, id, total);
if (graphSizesStatusLog) {
log.info("Dataset size stats: " + statistics.totalGraphSize + " total nodes / max " + statistics.maxGraphSize + " / avg " + (statistics.totalGraphSize / id));
graphSizesStatusLog = false;
}
}
log.info("Reading: " + total.readTime + " Parsing: " + total.parseTime + " Training: " + total.trainTime);
return paramVec;
}
use of edu.cmu.ml.proppr.learn.tools.StoppingCriterion in project ProPPR by TeamCohen.
the class Trainer method train.
public ParamVector<String, ?> train(SymbolTable<String> masterFeatures, Iterable<String> examples, LearningGraphBuilder builder, ParamVector<String, ?> initialParamVec, int numEpochs) {
ParamVector<String, ?> paramVec = this.masterLearner.setupParams(initialParamVec);
if (masterFeatures.size() > 0)
LearningGraphBuilder.setFeatures(masterFeatures);
NamedThreadFactory workingThreads = new NamedThreadFactory("work-");
NamedThreadFactory cleaningThreads = new NamedThreadFactory("cleanup-");
ThreadPoolExecutor workingPool;
ExecutorService cleanPool;
TrainingStatistics total = new TrainingStatistics();
StoppingCriterion stopper = new StoppingCriterion(numEpochs, this.stoppingPercent, this.stoppingEpoch);
boolean graphSizesStatusLog = true;
StatusLogger stattime = new StatusLogger();
// repeat until ready to stop
while (!stopper.satisified()) {
// set up current epoch
this.epoch++;
for (SRW learner : this.learners.values()) {
learner.setEpoch(epoch);
learner.clearLoss();
}
log.info("epoch " + epoch + " ...");
status.tick();
// reset counters & file pointers
this.statistics = new TrainingStatistics();
workingThreads.reset();
cleaningThreads.reset();
workingPool = new ThreadPoolExecutor(this.nthreads, Integer.MAX_VALUE, 10, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), workingThreads);
cleanPool = Executors.newSingleThreadExecutor(cleaningThreads);
// run examples
int id = 1;
stattime.start();
int countdown = -1;
Trainer notify = null;
for (String s : examples) {
if (log.isDebugEnabled())
log.debug("Queue size " + (workingPool.getTaskCount() - workingPool.getCompletedTaskCount()));
statistics.updateReadingStatistics(stattime.sinceLast());
/*
* Throttling behavior:
* Once the number of unfinished tasks exceeds 1.5x the number of threads,
* we add a 'notify' object to the next nthreads training tasks. Then, the
* master thread gathers 'notify' signals until the number of unfinished tasks
* is no longer greater than the number of threads. Then we start adding tasks again.
*
* This works more or less fine, since the master thread stops pulling examples
* from disk when there are then a maximum of 2.5x training examples in the queue (that's
* the original 1.5x, which could represent a maximum of 1.5x training examples,
* plus the nthreads training tasks with active 'notify' objects. There's an
* additional nthreads parsing tasks in the queue but those don't take up much
* memory so we don't care). This lets us read in a good-sized buffer without
* blowing up the heap.
*
* Worst-case: None of the backlog is cleared before the master thread enters
* the synchronized block. nthreads-1 threads will be training long jobs, and
* the one free thread works through the 0.5x backlog and all nthreads countdown
* examples. The notify() sent by the final countdown example will occur when
* there are nthreads unfinished tasks in the queue, and the master thread will exit
* the synchronized block and proceed.
*
* Best-case: The backlog is already cleared by the time the master thread enters
* the synchronized block. The while() loop immediately exits, and the notify()
* signals from the countdown examples have no effect.
*/
if (countdown > 0) {
if (log.isDebugEnabled())
log.debug("Countdown " + countdown);
countdown--;
} else if (countdown == 0) {
if (log.isDebugEnabled())
log.debug("Countdown " + countdown + "; throttling:");
countdown--;
notify = null;
try {
synchronized (this) {
if (log.isDebugEnabled())
log.debug("Clearing training queue...");
while (workingPool.getTaskCount() - workingPool.getCompletedTaskCount() > this.nthreads) this.wait();
if (log.isDebugEnabled())
log.debug("Queue cleared.");
}
} catch (InterruptedException e) {
e.printStackTrace();
}
} else if (workingPool.getTaskCount() - workingPool.getCompletedTaskCount() > 1.5 * this.nthreads) {
if (log.isDebugEnabled())
log.debug("Starting countdown");
countdown = this.nthreads;
notify = this;
}
Future<PosNegRWExample> parsed = workingPool.submit(new Parse(s, builder, id));
Future<ExampleStats> trained = workingPool.submit(new Train(parsed, paramVec, id, notify));
cleanPool.submit(new TraceLosses(trained, id));
id++;
stattime.tick();
if (log.isInfoEnabled() && status.due(1))
log.info("parsed: " + id + " trained: " + statistics.exampleSetSize);
}
cleanEpoch(workingPool, cleanPool, paramVec, stopper, id, total);
if (graphSizesStatusLog) {
log.info("Dataset size stats: " + statistics.totalGraphSize + " total nodes / max " + statistics.maxGraphSize + " / avg " + (statistics.totalGraphSize / id));
graphSizesStatusLog = false;
}
}
log.info("Reading statistics: min " + total.minReadTime + " / max " + total.maxReadTime + " / total " + total.readTime);
log.info("Parsing statistics: min " + total.minParseTime + " / max " + total.maxParseTime + " / total " + total.parseTime);
log.info("Training statistics: min " + total.minTrainTime + " / max " + total.maxTrainTime + " / total " + total.trainTime);
return paramVec;
}
Aggregations