use of org.apache.hadoop.fs.slive.Constants.OperationType in project hadoop by apache.
the class WeightSelector method select.
/**
* Selects an operation from the known operation set or returns null if none
* are available by applying the weighting algorithms and then handing off the
* weight operations to the selection object.
*
* @param elapsed
* the currently elapsed time (milliseconds) of the running program
* @param duration
* the maximum amount of milliseconds of the running program
*
* @return operation or null if none left
*/
Operation select(int elapsed, int duration) {
List<OperationWeight> validOps = new ArrayList<OperationWeight>(operations.size());
for (OperationType type : operations.keySet()) {
OperationInfo opinfo = operations.get(type);
if (opinfo == null || opinfo.amountLeft <= 0) {
continue;
}
Weightable weighter = weights.get(opinfo.distribution);
if (weighter != null) {
OperationWeight weightOp = new OperationWeight(opinfo.operation, weighter.weight(elapsed, duration));
validOps.add(weightOp);
} else {
throw new RuntimeException("Unable to get weight for distribution " + opinfo.distribution);
}
}
if (validOps.isEmpty()) {
return null;
}
return getSelector().select(validOps);
}
use of org.apache.hadoop.fs.slive.Constants.OperationType in project hadoop by apache.
the class WeightSelector method configureOperations.
/**
* Sets up the operation using the given configuration by setting up the
* number of operations to perform (and how many are left) and setting up the
* operation objects to be used throughout selection.
*
* @param cfg
* ConfigExtractor.
*/
private void configureOperations(ConfigExtractor cfg) {
operations = new TreeMap<OperationType, OperationInfo>();
Map<OperationType, OperationData> opinfo = cfg.getOperations();
int totalAm = cfg.getOpCount();
int opsLeft = totalAm;
NumberFormat formatter = Formatter.getPercentFormatter();
for (final OperationType type : opinfo.keySet()) {
OperationData opData = opinfo.get(type);
OperationInfo info = new OperationInfo();
info.distribution = opData.getDistribution();
int amLeft = determineHowMany(totalAm, opData, type);
opsLeft -= amLeft;
LOG.info(type.name() + " has " + amLeft + " initial operations out of " + totalAm + " for its ratio " + formatter.format(opData.getPercent()));
info.amountLeft = amLeft;
Operation op = factory.getOperation(type);
// its done
if (op != null) {
Observer fn = new Observer() {
public void notifyFinished(Operation op) {
OperationInfo opInfo = operations.get(type);
if (opInfo != null) {
--opInfo.amountLeft;
}
}
public void notifyStarting(Operation op) {
}
};
info.operation = new ObserveableOp(op, fn);
operations.put(type, info);
}
}
if (opsLeft > 0) {
LOG.info(opsLeft + " left over operations found (due to inability to support partial operations)");
}
}
use of org.apache.hadoop.fs.slive.Constants.OperationType in project hadoop by apache.
the class ArgumentParser method getOptions.
/**
* @return the option set to be used in command line parsing
*/
private Options getOptions() {
Options cliopt = new Options();
cliopt.addOption(ConfigOption.MAPS);
cliopt.addOption(ConfigOption.REDUCES);
cliopt.addOption(ConfigOption.PACKET_SIZE);
cliopt.addOption(ConfigOption.OPS);
cliopt.addOption(ConfigOption.DURATION);
cliopt.addOption(ConfigOption.EXIT_ON_ERROR);
cliopt.addOption(ConfigOption.SLEEP_TIME);
cliopt.addOption(ConfigOption.TRUNCATE_WAIT);
cliopt.addOption(ConfigOption.FILES);
cliopt.addOption(ConfigOption.DIR_SIZE);
cliopt.addOption(ConfigOption.BASE_DIR);
cliopt.addOption(ConfigOption.RESULT_FILE);
cliopt.addOption(ConfigOption.CLEANUP);
{
String[] distStrs = new String[Distribution.values().length];
Distribution[] distValues = Distribution.values();
for (int i = 0; i < distValues.length; ++i) {
distStrs[i] = distValues[i].lowerName();
}
String opdesc = String.format(Constants.OP_DESCR, StringUtils.arrayToString(distStrs));
for (OperationType type : OperationType.values()) {
String opname = type.lowerName();
cliopt.addOption(new Option(opname, true, opdesc));
}
}
cliopt.addOption(ConfigOption.REPLICATION_AM);
cliopt.addOption(ConfigOption.BLOCK_SIZE);
cliopt.addOption(ConfigOption.READ_SIZE);
cliopt.addOption(ConfigOption.WRITE_SIZE);
cliopt.addOption(ConfigOption.APPEND_SIZE);
cliopt.addOption(ConfigOption.TRUNCATE_SIZE);
cliopt.addOption(ConfigOption.RANDOM_SEED);
cliopt.addOption(ConfigOption.QUEUE_NAME);
cliopt.addOption(ConfigOption.HELP);
return cliopt;
}
use of org.apache.hadoop.fs.slive.Constants.OperationType in project hadoop by apache.
the class ConfigExtractor method dumpOptions.
/**
* Dumps out the given options for the given config extractor
*
* @param cfg
* the config to write to the log
*/
static void dumpOptions(ConfigExtractor cfg) {
if (cfg == null) {
return;
}
LOG.info("Base directory = " + cfg.getBaseDirectory());
LOG.info("Data directory = " + cfg.getDataPath());
LOG.info("Output directory = " + cfg.getOutputPath());
LOG.info("Result file = " + cfg.getResultFile());
LOG.info("Grid queue = " + cfg.getQueueName());
LOG.info("Should exit on first error = " + cfg.shouldExitOnFirstError());
{
String duration = "Duration = ";
if (cfg.getDurationMilliseconds() == Integer.MAX_VALUE) {
duration += "unlimited";
} else {
duration += cfg.getDurationMilliseconds() + " milliseconds";
}
LOG.info(duration);
}
LOG.info("Map amount = " + cfg.getMapAmount());
LOG.info("Reducer amount = " + cfg.getReducerAmount());
LOG.info("Operation amount = " + cfg.getOpCount());
LOG.info("Total file limit = " + cfg.getTotalFiles());
LOG.info("Total dir file limit = " + cfg.getDirSize());
{
String read = "Read size = ";
if (cfg.shouldReadFullFile()) {
read += "entire file";
} else {
read += cfg.getReadSize() + " bytes";
}
LOG.info(read);
}
{
String write = "Write size = ";
if (cfg.shouldWriteUseBlockSize()) {
write += "blocksize";
} else {
write += cfg.getWriteSize() + " bytes";
}
LOG.info(write);
}
{
String append = "Append size = ";
if (cfg.shouldAppendUseBlockSize()) {
append += "blocksize";
} else {
append += cfg.getAppendSize() + " bytes";
}
LOG.info(append);
}
{
String bsize = "Block size = ";
bsize += cfg.getBlockSize() + " bytes";
LOG.info(bsize);
}
if (cfg.getRandomSeed() != null) {
LOG.info("Random seed = " + cfg.getRandomSeed());
}
if (cfg.getSleepRange() != null) {
LOG.info("Sleep range = " + cfg.getSleepRange() + " milliseconds");
}
LOG.info("Replication amount = " + cfg.getReplication());
LOG.info("Operations are:");
NumberFormat percFormatter = Formatter.getPercentFormatter();
Map<OperationType, OperationData> operations = cfg.getOperations();
for (OperationType type : operations.keySet()) {
String name = type.name();
LOG.info(name);
OperationData opInfo = operations.get(type);
LOG.info(" " + opInfo.getDistribution().name());
if (opInfo.getPercent() != null) {
LOG.info(" " + percFormatter.format(opInfo.getPercent()));
} else {
LOG.info(" ???");
}
}
}
use of org.apache.hadoop.fs.slive.Constants.OperationType in project hadoop by apache.
the class ConfigMerger method getBaseOperations.
/**
* Gets the base set of operations to use
*
* @return Map
*/
private Map<OperationType, OperationData> getBaseOperations() {
Map<OperationType, OperationData> base = new HashMap<OperationType, OperationData>();
// add in all the operations
// since they will all be applied unless changed
OperationType[] types = OperationType.values();
for (OperationType type : types) {
base.put(type, new OperationData(Distribution.UNIFORM, null));
}
return base;
}
Aggregations