use of edu.rit.util.Range in project ffx by mjschnie.
the class WorkerLongStrideForLoop method masterExecuteNonFixed.
/**
* Execute this worker for loop in the master thread with a non-fixed
* schedule.
*
* @param range Loop index range.
* @param sch Schedule.
*
* @exception IOException Thrown if an I/O error occurred.
*/
void masterExecuteNonFixed(LongRange range, LongSchedule sch) throws IOException {
int count = myTeam.count;
sch.start(count, range);
int remaining = count;
ObjectItemBuf<LongRange> buf = ObjectBuf.buffer();
Range tagRange = new Range(tagFor(0), tagFor(count - 1));
Comm comm = myTeam.comm;
// Send initial task to each worker.
for (int w = 0; w < count; ++w) {
LongRange chunk = sch.next(w);
buf.item = chunk;
buf.reset();
int r = myTeam.workerRank(w);
int tag = tagFor(w);
comm.send(r, tag, buf);
if (chunk == null) {
--remaining;
} else {
sendTaskInput(chunk, comm, r, tag);
}
}
// that worker.
while (remaining > 0) {
CommStatus status = comm.receive(null, tagRange, buf);
LongRange chunk = buf.item;
int r = status.fromRank;
int tag = status.tag;
int w = workerFor(tag);
receiveTaskOutput(chunk, comm, r, tag);
chunk = sch.next(w);
buf.item = chunk;
buf.reset();
comm.send(r, tag, buf);
if (chunk == null) {
--remaining;
} else {
sendTaskInput(chunk, comm, r, tag);
}
}
}
use of edu.rit.util.Range in project ffx by mjschnie.
the class GuidedIntegerSchedule method next.
/**
* {@inheritDoc}
*
* Obtain the next chunk of iterations for the given thread index. If there
* are more iterations, a range object is returned whose lower bound, upper
* bound, and stride specify the chunk of iterations to perform. The
* returned range object's stride is the same as that given to the
* <TT>start()</TT> method. The returned range object's lower bound and
* upper bound are contained within the range given to the <TT>start()</TT>
* method. If there are no more iterations, null is returned.
* <P>
* The <TT>next()</TT> method is called by multiple parallel team threads in
* the Parallel Java middleware. The <TT>next()</TT> method must be multiple
* thread safe.
*/
public Range next(int theThreadIndex) {
for (; ; ) {
int oldN1 = N1.get();
Range result = myLoopRange.chunk(oldN1, Math.max(N2, (myLoopRangeLength - oldN1) / two_K));
int N = result.length();
if (N == 0) {
return null;
}
int newN1 = oldN1 + N;
if (N1.compareAndSet(oldN1, newN1)) {
return result;
}
}
}
use of edu.rit.util.Range in project ffx by mjschnie.
the class ParticleMeshEwaldCart method ligandElec.
/**
* 3.) Aperiodic ligand electrostatics.
*
* A.) Real space with an Ewald coefficient of 0.0 (no reciprocal space).
*
* B.) Polarization scaled as in Step 2 by (1 - lambda).
*/
private double ligandElec() {
for (int i = 0; i < nAtoms; i++) {
use[i] = atoms[i].applyLambda();
}
/**
* Scale the permanent vacuum electrostatics. The softcore alpha is not
* necessary (nothing in vacuum to collide with).
*/
doPermanentRealSpace = true;
permanentScale = 1.0 - lPowPerm;
dEdLSign = -1.0;
double lAlphaBack = lAlpha;
double dlAlphaBack = dlAlpha;
double d2lAlphaBack = d2lAlpha;
lAlpha = 0.0;
dlAlpha = 0.0;
d2lAlpha = 0.0;
/**
* If we are past the end of the polarization lambda window, then only
* the condensed phase is necessary.
*/
if (lambda <= polLambdaEnd) {
doPolarization = true;
polarizationScale = 1.0 - lPowPol;
} else {
doPolarization = false;
polarizationScale = 0.0;
}
/**
* Save the current real space PME parameters.
*/
double offBack = off;
double aewaldBack = aewald;
off = Double.MAX_VALUE;
aewald = 0.0;
setEwaldParameters(off, aewald);
/**
* Save the current parallelization schedule.
*/
IntegerSchedule permanentScheduleBack = permanentSchedule;
IntegerSchedule ewaldScheduleBack = realSpaceSchedule;
Range[] rangesBack = realSpaceRanges;
permanentSchedule = vaporPermanentSchedule;
realSpaceSchedule = vaporEwaldSchedule;
realSpaceRanges = vacuumRanges;
/**
* Use vacuum crystal / vacuum neighborLists.
*/
Crystal crystalBack = crystal;
int nSymmBack = nSymm;
int[][][] listsBack = neighborLists;
neighborLists = vaporLists;
crystal = vaporCrystal;
nSymm = 1;
/**
* Turn off GK if in use.
*/
boolean gkBack = generalizedKirkwoodTerm;
/**
* Turn off Pre-conditioned conjugate gradient SCF solver.
*/
SCFAlgorithm scfBack = scfAlgorithm;
scfAlgorithm = SCFAlgorithm.SOR;
if (doLigandGKElec) {
generalizedKirkwoodTerm = true;
generalizedKirkwood.setNeighborList(vaporLists);
generalizedKirkwood.setLambda(lambda);
generalizedKirkwood.setCutoff(off);
generalizedKirkwood.setCrystal(vaporCrystal);
generalizedKirkwood.setLambdaFunction(polarizationScale, dEdLSign * dlPowPol, dEdLSign * d2lPowPol);
} else {
generalizedKirkwoodTerm = false;
}
double energy = computeEnergy(false);
/**
* Revert to the saved parameters.
*/
off = offBack;
aewald = aewaldBack;
setEwaldParameters(off, aewald);
neighborLists = listsBack;
crystal = crystalBack;
nSymm = nSymmBack;
permanentSchedule = permanentScheduleBack;
realSpaceSchedule = ewaldScheduleBack;
realSpaceRanges = rangesBack;
lAlpha = lAlphaBack;
dlAlpha = dlAlphaBack;
d2lAlpha = d2lAlphaBack;
generalizedKirkwoodTerm = gkBack;
scfAlgorithm = scfBack;
fill(use, true);
return energy;
}
use of edu.rit.util.Range in project ffx by mjschnie.
the class ParticleMeshEwaldQI method ligandElec.
/**
* 3.) Ligand in vapor A.) Real space with an Ewald coefficient of 0.0 (no
* reciprocal space). B.) Polarization scaled as in Step 2 by (1 - lambda).
*/
private double ligandElec() {
for (int i = 0; i < nAtoms; i++) {
use[i] = atoms[i].applyLambda();
}
/**
* Scale the permanent vacuum electrostatics. The softcore alpha is not
* necessary (nothing in vacuum to collide with).
*/
doPermanentRealSpace = true;
permanentScale = 1.0 - lPowPerm;
dEdLSign = -1.0;
double lAlphaBack = lAlpha;
double dlAlphaBack = dlAlpha;
double d2lAlphaBack = d2lAlpha;
lAlpha = 0.0;
dlAlpha = 0.0;
d2lAlpha = 0.0;
/**
* If we are past the end of the polarization lambda window, then only
* the condensed phase is necessary.
*/
if (lambda <= polLambdaEnd) {
doPolarization = true;
polarizationScale = 1.0 - lPowPol;
} else {
doPolarization = false;
polarizationScale = 0.0;
}
/**
* Save the current real space PME parameters.
*/
double offBack = off;
double aewaldBack = aewald;
off = Double.MAX_VALUE;
aewald = 0.0;
setEwaldParameters(off, aewald);
/**
* Save the current parallelization schedule.
*/
IntegerSchedule permanentScheduleBack = permanentSchedule;
IntegerSchedule ewaldScheduleBack = realSpaceSchedule;
Range[] rangesBack = realSpaceRanges;
permanentSchedule = vaporPermanentSchedule;
realSpaceSchedule = vaporEwaldSchedule;
realSpaceRanges = vacuumRanges;
/**
* Use vacuum crystal / vacuum neighborLists.
*/
Crystal crystalBack = crystal;
int nSymmBack = nSymm;
int[][][] listsBack = neighborLists;
neighborLists = vaporLists;
crystal = vaporCrystal;
nSymm = 1;
for (LambdaFactors lf : lambdaFactors) {
lf.setFactors();
}
/**
* Turn off GK if it is in use, unless it's being used as the decoupling
* target. If so, set its parameters and lambda (derivative) factors.
*/
boolean gkBack = generalizedKirkwoodTerm;
if (doLigandGKElec) {
generalizedKirkwoodTerm = true;
generalizedKirkwood.setNeighborList(vaporLists);
generalizedKirkwood.setLambda(lambda);
generalizedKirkwood.setCutoff(off);
generalizedKirkwood.setCrystal(vaporCrystal);
// TODO Decide whether to send LambdaFactors into generalizedKirkwood.
generalizedKirkwood.setLambdaFunction(polarizationScale, dEdLSign * dlPowPol, dEdLSign * d2lPowPol);
} else {
generalizedKirkwoodTerm = false;
}
double energy = computeEnergy(false);
/**
* Revert to the saved parameters.
*/
off = offBack;
aewald = aewaldBack;
setEwaldParameters(off, aewald);
neighborLists = listsBack;
crystal = crystalBack;
nSymm = nSymmBack;
permanentSchedule = permanentScheduleBack;
realSpaceSchedule = ewaldScheduleBack;
realSpaceRanges = rangesBack;
lAlpha = lAlphaBack;
dlAlpha = dlAlphaBack;
d2lAlpha = d2lAlphaBack;
generalizedKirkwoodTerm = gkBack;
for (LambdaFactors lf : lambdaFactors) {
lf.setFactors();
}
fill(use, true);
return energy;
}
use of edu.rit.util.Range in project ffx by mjschnie.
the class SpatialDensitySchedule method defineRanges.
private void defineRanges() {
int lb = chunkRange.lb();
int ub = chunkRange.ub();
int thread = 0;
int start = 0;
int total = 0;
// Calculate the total number of atoms that will be place on the grid.
for (int i = 0; i < atomsPerChunk.length; i++) {
total += atomsPerChunk[i];
}
int goal = (int) ((total * loadBalancePercentage) / nThreads);
total = 0;
for (int i = lb; i <= ub; i++) {
int chunksLeft = ub - i + 1;
int threadsLeft = nThreads - thread;
// Count the number of atoms in each work chunk.
total += atomsPerChunk[i];
// Check if the load balancing goal has been reached.
if (total > goal || chunksLeft <= threadsLeft) {
int stop = i;
// Define the range for this thread.
Range current = ranges[thread];
if (current == null || current.lb() != start || current.ub() != stop) {
ranges[thread] = new Range(start, stop);
// logger.info(String.format("Range for thread %d %s %d.", thread, ranges[thread], total));
}
// Initialization for the next thread.
thread++;
start = i + 1;
total = 0;
// The last thread gets the rest of the work chunks.
if (thread == nThreads - 1) {
stop = ub;
current = ranges[thread];
if (current == null || current.lb() != start || current.ub() != stop) {
ranges[thread] = new Range(start, stop);
// logger.finest(String.format("Range for thread %d %s %d.", thread, ranges[thread], total));
}
break;
}
} else if (i == ub) {
// The final range may not meet the goal.
int stop = i;
Range current = ranges[thread];
if (current == null || current.lb() != start || current.ub() != stop) {
ranges[thread] = new Range(start, stop);
// logger.info(String.format("Range for thread %d %s %d.", thread, ranges[thread], total));
}
}
}
// No work for remaining threads.
for (int i = thread + 1; i < nThreads; i++) {
ranges[i] = null;
}
}
Aggregations