use of org.pentaho.di.core.row.value.ValueMetaNumber in project pentaho-kettle by pentaho.
the class RandomValueMeta method getFields.
@Override
public void getFields(RowMetaInterface row, String name, RowMetaInterface[] info, StepMeta nextStep, VariableSpace space, Repository repository, IMetaStore metaStore) throws KettleStepException {
for (int i = 0; i < fieldName.length; i++) {
ValueMetaInterface v;
switch(fieldType[i]) {
case TYPE_RANDOM_NUMBER:
v = new ValueMetaNumber(fieldName[i], 10, 5);
break;
case TYPE_RANDOM_INTEGER:
v = new ValueMetaInteger(fieldName[i], 10, 0);
break;
case TYPE_RANDOM_STRING:
v = new ValueMetaString(fieldName[i], 13, 0);
break;
case TYPE_RANDOM_UUID:
v = new ValueMetaString(fieldName[i], 36, 0);
break;
case TYPE_RANDOM_UUID4:
v = new ValueMetaString(fieldName[i], 36, 0);
break;
case TYPE_RANDOM_MAC_HMACMD5:
v = new ValueMetaString(fieldName[i], 100, 0);
break;
case TYPE_RANDOM_MAC_HMACSHA1:
v = new ValueMetaString(fieldName[i], 100, 0);
break;
default:
v = new ValueMetaNone(fieldName[i]);
break;
}
v.setOrigin(name);
row.addValueMeta(v);
}
}
use of org.pentaho.di.core.row.value.ValueMetaNumber in project pentaho-kettle by pentaho.
the class BaseStep method buildLog.
/**
* Builds the log.
*
* @param sname the sname
* @param copynr the copynr
* @param lines_read the lines_read
* @param lines_written the lines_written
* @param lines_updated the lines_updated
* @param lines_skipped the lines_skipped
* @param errors the errors
* @param start_date the start_date
* @param end_date the end_date
* @return the row meta and data
*/
public RowMetaAndData buildLog(String sname, int copynr, long lines_read, long lines_written, long lines_updated, long lines_skipped, long errors, Date start_date, Date end_date) {
RowMetaInterface r = new RowMeta();
Object[] data = new Object[9];
int nr = 0;
r.addValueMeta(new ValueMetaString(BaseMessages.getString(PKG, "BaseStep.ColumnName.Stepname")));
data[nr] = sname;
nr++;
r.addValueMeta(new ValueMetaNumber(BaseMessages.getString(PKG, "BaseStep.ColumnName.Copy")));
data[nr] = new Double(copynr);
nr++;
r.addValueMeta(new ValueMetaNumber(BaseMessages.getString(PKG, "BaseStep.ColumnName.LinesReaded")));
data[nr] = new Double(lines_read);
nr++;
r.addValueMeta(new ValueMetaNumber(BaseMessages.getString(PKG, "BaseStep.ColumnName.LinesWritten")));
data[nr] = new Double(lines_written);
nr++;
r.addValueMeta(new ValueMetaNumber(BaseMessages.getString(PKG, "BaseStep.ColumnName.LinesUpdated")));
data[nr] = new Double(lines_updated);
nr++;
r.addValueMeta(new ValueMetaNumber(BaseMessages.getString(PKG, "BaseStep.ColumnName.LinesSkipped")));
data[nr] = new Double(lines_skipped);
nr++;
r.addValueMeta(new ValueMetaNumber(BaseMessages.getString(PKG, "BaseStep.ColumnName.Errors")));
data[nr] = new Double(errors);
nr++;
r.addValueMeta(new ValueMetaDate("start_date"));
data[nr] = start_date;
nr++;
r.addValueMeta(new ValueMetaDate("end_date"));
data[nr] = end_date;
nr++;
return new RowMetaAndData(r, data);
}
use of org.pentaho.di.core.row.value.ValueMetaNumber in project pentaho-kettle by pentaho.
the class MemoryGroupBy method processRow.
@Override
public boolean processRow(StepMetaInterface smi, StepDataInterface sdi) throws KettleException {
meta = (MemoryGroupByMeta) smi;
data = (MemoryGroupByData) sdi;
// get row!
Object[] r = getRow();
if (first) {
if ((r == null) && (!meta.isAlwaysGivingBackOneRow())) {
setOutputDone();
return false;
}
String val = getVariable(Const.KETTLE_AGGREGATION_ALL_NULLS_ARE_ZERO, "N");
allNullsAreZero = ValueMetaBase.convertStringToBoolean(val);
val = getVariable(Const.KETTLE_AGGREGATION_MIN_NULL_IS_VALUED, "N");
minNullIsValued = ValueMetaBase.convertStringToBoolean(val);
compatibilityMode = ValueMetaBase.convertStringToBoolean(getVariable(Const.KETTLE_COMPATIBILITY_MEMORY_GROUP_BY_SUM_AVERAGE_RETURN_NUMBER_TYPE, "N"));
// What is the output looking like?
//
data.inputRowMeta = getInputRowMeta();
//
if (data.inputRowMeta == null) {
data.inputRowMeta = getTransMeta().getPrevStepFields(getStepMeta());
}
data.outputRowMeta = data.inputRowMeta.clone();
meta.getFields(data.outputRowMeta, getStepname(), null, null, this, repository, metaStore);
// Do all the work we can beforehand
// Calculate indexes, loop up fields, etc.
//
data.subjectnrs = new int[meta.getSubjectField().length];
data.groupnrs = new int[meta.getGroupField().length];
// If the step does not receive any rows, we can not lookup field position indexes
if (r != null) {
for (int i = 0; i < meta.getSubjectField().length; i++) {
if (meta.getAggregateType()[i] == MemoryGroupByMeta.TYPE_GROUP_COUNT_ANY) {
data.subjectnrs[i] = 0;
} else {
data.subjectnrs[i] = data.inputRowMeta.indexOfValue(meta.getSubjectField()[i]);
}
if (data.subjectnrs[i] < 0) {
logError(BaseMessages.getString(PKG, "MemoryGroupBy.Log.AggregateSubjectFieldCouldNotFound", meta.getSubjectField()[i]));
setErrors(1);
stopAll();
return false;
}
}
for (int i = 0; i < meta.getGroupField().length; i++) {
data.groupnrs[i] = data.inputRowMeta.indexOfValue(meta.getGroupField()[i]);
if (data.groupnrs[i] < 0) {
logError(BaseMessages.getString(PKG, "MemoryGroupBy.Log.GroupFieldCouldNotFound", meta.getGroupField()[i]));
setErrors(1);
stopAll();
return false;
}
}
}
// Create a metadata value for the counter Integers
//
data.valueMetaInteger = new ValueMetaInteger("count");
data.valueMetaNumber = new ValueMetaNumber("sum");
// Initialize the group metadata
//
initGroupMeta(data.inputRowMeta);
}
if (first) {
// Only calculate data.aggMeta here, not for every new aggregate.
//
newAggregate(r, null);
// for speed: groupMeta+aggMeta
//
data.groupAggMeta = new RowMeta();
data.groupAggMeta.addRowMeta(data.groupMeta);
data.groupAggMeta.addRowMeta(data.aggMeta);
}
//
if (r == null) {
// no more input to be expected... (or none received in the first place)
handleLastOfGroup();
setOutputDone();
return false;
}
if (first || data.newBatch) {
first = false;
data.newBatch = false;
}
addToAggregate(r);
if (checkFeedback(getLinesRead())) {
if (log.isBasic()) {
logBasic(BaseMessages.getString(PKG, "MemoryGroupBy.LineNumber") + getLinesRead());
}
}
return true;
}
use of org.pentaho.di.core.row.value.ValueMetaNumber in project pentaho-kettle by pentaho.
the class MemoryGroupBy method newAggregate.
/**
* Used for junits in MemoryGroupByNewAggregateTest
*
* @param r
* @param aggregate
* @throws KettleException
*/
void newAggregate(Object[] r, Aggregate aggregate) throws KettleException {
if (aggregate == null) {
data.aggMeta = new RowMeta();
} else {
aggregate.counts = new long[data.subjectnrs.length];
// Put all the counters at 0
for (int i = 0; i < aggregate.counts.length; i++) {
aggregate.counts[i] = 0;
}
aggregate.distinctObjs = null;
aggregate.agg = new Object[data.subjectnrs.length];
// sets all doubles to 0.0
aggregate.mean = new double[data.subjectnrs.length];
}
for (int i = 0; i < data.subjectnrs.length; i++) {
ValueMetaInterface subjMeta = data.inputRowMeta.getValueMeta(data.subjectnrs[i]);
Object v = null;
ValueMetaInterface vMeta = null;
switch(meta.getAggregateType()[i]) {
case MemoryGroupByMeta.TYPE_GROUP_MEDIAN:
case MemoryGroupByMeta.TYPE_GROUP_PERCENTILE:
vMeta = new ValueMetaNumber(meta.getAggregateField()[i]);
v = new ArrayList<Double>();
break;
case MemoryGroupByMeta.TYPE_GROUP_STANDARD_DEVIATION:
vMeta = new ValueMetaNumber(meta.getAggregateField()[i]);
break;
case MemoryGroupByMeta.TYPE_GROUP_COUNT_DISTINCT:
case MemoryGroupByMeta.TYPE_GROUP_COUNT_ANY:
case MemoryGroupByMeta.TYPE_GROUP_COUNT_ALL:
vMeta = new ValueMetaInteger(meta.getAggregateField()[i]);
break;
case MemoryGroupByMeta.TYPE_GROUP_SUM:
case MemoryGroupByMeta.TYPE_GROUP_AVERAGE:
vMeta = !compatibilityMode && subjMeta.isNumeric() ? subjMeta.clone() : new ValueMetaNumber();
vMeta.setName(meta.getAggregateField()[i]);
break;
case MemoryGroupByMeta.TYPE_GROUP_FIRST:
case MemoryGroupByMeta.TYPE_GROUP_LAST:
case MemoryGroupByMeta.TYPE_GROUP_FIRST_INCL_NULL:
case MemoryGroupByMeta.TYPE_GROUP_LAST_INCL_NULL:
case MemoryGroupByMeta.TYPE_GROUP_MIN:
case MemoryGroupByMeta.TYPE_GROUP_MAX:
vMeta = subjMeta.clone();
vMeta.setName(meta.getAggregateField()[i]);
v = r == null ? null : r[data.subjectnrs[i]];
break;
case MemoryGroupByMeta.TYPE_GROUP_CONCAT_COMMA:
vMeta = new ValueMetaString(meta.getAggregateField()[i]);
v = new StringBuilder();
break;
case MemoryGroupByMeta.TYPE_GROUP_CONCAT_STRING:
vMeta = new ValueMetaString(meta.getAggregateField()[i]);
v = new StringBuilder();
break;
default:
throw new KettleException("Unknown data type for aggregation : " + meta.getAggregateField()[i]);
}
if (meta.getAggregateType()[i] != MemoryGroupByMeta.TYPE_GROUP_COUNT_ALL && meta.getAggregateType()[i] != MemoryGroupByMeta.TYPE_GROUP_COUNT_DISTINCT && meta.getAggregateType()[i] != MemoryGroupByMeta.TYPE_GROUP_COUNT_ANY) {
vMeta.setLength(subjMeta.getLength(), subjMeta.getPrecision());
}
if (aggregate == null) {
data.aggMeta.addValueMeta(vMeta);
} else {
aggregate.agg[i] = v;
}
}
}
use of org.pentaho.di.core.row.value.ValueMetaNumber in project pentaho-kettle by pentaho.
the class MySQLBulkLoader method init.
@Override
public boolean init(StepMetaInterface smi, StepDataInterface sdi) {
meta = (MySQLBulkLoaderMeta) smi;
data = (MySQLBulkLoaderData) sdi;
if (super.init(smi, sdi)) {
if (Utils.isEmpty(meta.getEnclosure())) {
data.quote = new byte[] {};
} else {
data.quote = environmentSubstitute(meta.getEnclosure()).getBytes();
}
if (Utils.isEmpty(meta.getDelimiter())) {
data.separator = "\t".getBytes();
} else {
data.separator = environmentSubstitute(meta.getDelimiter()).getBytes();
}
data.newline = Const.CR.getBytes();
String realEncoding = environmentSubstitute(meta.getEncoding());
data.bulkTimestampMeta = new ValueMetaDate("timestampMeta");
data.bulkTimestampMeta.setConversionMask("yyyy-MM-dd HH:mm:ss");
data.bulkTimestampMeta.setStringEncoding(realEncoding);
data.bulkDateMeta = new ValueMetaDate("dateMeta");
data.bulkDateMeta.setConversionMask("yyyy-MM-dd");
data.bulkDateMeta.setStringEncoding(realEncoding);
data.bulkNumberMeta = new ValueMetaNumber("numberMeta");
data.bulkNumberMeta.setConversionMask("#.#");
data.bulkNumberMeta.setGroupingSymbol(",");
data.bulkNumberMeta.setDecimalSymbol(".");
data.bulkNumberMeta.setStringEncoding(realEncoding);
data.bulkSize = Const.toLong(environmentSubstitute(meta.getBulkSize()), -1L);
// Schema-table combination...
data.schemaTable = meta.getDatabaseMeta().getQuotedSchemaTableCombination(environmentSubstitute(meta.getSchemaName()), environmentSubstitute(meta.getTableName()));
return true;
}
return false;
}
Aggregations