use of org.pentaho.di.core.QueueRowSet in project pentaho-kettle by pentaho.
the class Trans method prepareExecution.
/**
* Prepares the transformation for execution. This includes setting the arguments and parameters as well as preparing
* and tracking the steps and hops in the transformation.
*
* @param arguments
* the arguments to use for this transformation
* @throws KettleException
* in case the transformation could not be prepared (initialized)
*/
public void prepareExecution(String[] arguments) throws KettleException {
setPreparing(true);
startDate = null;
setRunning(false);
log.snap(Metrics.METRIC_TRANSFORMATION_EXECUTION_START);
log.snap(Metrics.METRIC_TRANSFORMATION_INIT_START);
ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.TransformationPrepareExecution.id, this);
transMeta.disposeEmbeddedMetastoreProvider();
if (transMeta.getMetastoreLocatorOsgi() != null) {
transMeta.setEmbeddedMetastoreProviderKey(transMeta.getMetastoreLocatorOsgi().setEmbeddedMetastore(transMeta.getEmbeddedMetaStore()));
}
checkCompatibility();
//
if (arguments != null) {
setArguments(arguments);
}
activateParameters();
transMeta.activateParameters();
if (transMeta.getName() == null) {
if (transMeta.getFilename() != null) {
log.logBasic(BaseMessages.getString(PKG, "Trans.Log.DispacthingStartedForFilename", transMeta.getFilename()));
}
} else {
log.logBasic(BaseMessages.getString(PKG, "Trans.Log.DispacthingStartedForTransformation", transMeta.getName()));
}
if (getArguments() != null) {
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.NumberOfArgumentsDetected", String.valueOf(getArguments().length)));
}
}
if (isSafeModeEnabled()) {
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.SafeModeIsEnabled", transMeta.getName()));
}
}
if (getReplayDate() != null) {
SimpleDateFormat df = new SimpleDateFormat(REPLAY_DATE_FORMAT);
log.logBasic(BaseMessages.getString(PKG, "Trans.Log.ThisIsAReplayTransformation") + df.format(getReplayDate()));
} else {
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.ThisIsNotAReplayTransformation"));
}
}
//
if (servletPrintWriter == null) {
String encoding = System.getProperty("KETTLE_DEFAULT_SERVLET_ENCODING", null);
if (encoding == null) {
servletPrintWriter = new PrintWriter(new OutputStreamWriter(System.out));
} else {
try {
servletPrintWriter = new PrintWriter(new OutputStreamWriter(System.out, encoding));
} catch (UnsupportedEncodingException ex) {
servletPrintWriter = new PrintWriter(new OutputStreamWriter(System.out));
}
}
}
// Keep track of all the row sets and allocated steps
//
steps = new ArrayList<>();
rowsets = new ArrayList<>();
List<StepMeta> hopsteps = transMeta.getTransHopSteps(false);
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.FoundDefferentSteps", String.valueOf(hopsteps.size())));
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.AllocatingRowsets"));
}
//
for (int i = 0; i < hopsteps.size(); i++) {
StepMeta thisStep = hopsteps.get(i);
if (thisStep.isMapping()) {
// handled and allocated by the mapping step itself.
continue;
}
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.AllocateingRowsetsForStep", String.valueOf(i), thisStep.getName()));
}
List<StepMeta> nextSteps = transMeta.findNextSteps(thisStep);
int nrTargets = nextSteps.size();
for (int n = 0; n < nrTargets; n++) {
// What's the next step?
StepMeta nextStep = nextSteps.get(n);
if (nextStep.isMapping()) {
// handled and allocated by the mapping step itself.
continue;
}
// How many times do we start the source step?
int thisCopies = thisStep.getCopies();
if (thisCopies < 0) {
//
throw new KettleException(BaseMessages.getString(PKG, "Trans.Log.StepCopiesNotCorrectlyDefined", thisStep.getName()));
}
// How many times do we start the target step?
int nextCopies = nextStep.getCopies();
// Are we re-partitioning?
boolean repartitioning;
if (thisStep.isPartitioned()) {
repartitioning = !thisStep.getStepPartitioningMeta().equals(nextStep.getStepPartitioningMeta());
} else {
repartitioning = nextStep.isPartitioned();
}
int nrCopies;
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.copiesInfo", String.valueOf(thisCopies), String.valueOf(nextCopies)));
}
int dispatchType;
if (thisCopies == 1 && nextCopies == 1) {
dispatchType = TYPE_DISP_1_1;
nrCopies = 1;
} else if (thisCopies == 1 && nextCopies > 1) {
dispatchType = TYPE_DISP_1_N;
nrCopies = nextCopies;
} else if (thisCopies > 1 && nextCopies == 1) {
dispatchType = TYPE_DISP_N_1;
nrCopies = thisCopies;
} else if (thisCopies == nextCopies && !repartitioning) {
dispatchType = TYPE_DISP_N_N;
nrCopies = nextCopies;
} else {
// > 1!
dispatchType = TYPE_DISP_N_M;
nrCopies = nextCopies;
}
//
if (dispatchType != TYPE_DISP_N_M) {
for (int c = 0; c < nrCopies; c++) {
RowSet rowSet;
switch(transMeta.getTransformationType()) {
case Normal:
// This is a temporary patch until the batching rowset has proven
// to be working in all situations.
// Currently there are stalling problems when dealing with small
// amounts of rows.
//
Boolean batchingRowSet = ValueMetaString.convertStringToBoolean(System.getProperty(Const.KETTLE_BATCHING_ROWSET));
if (batchingRowSet != null && batchingRowSet.booleanValue()) {
rowSet = new BlockingBatchingRowSet(transMeta.getSizeRowset());
} else {
rowSet = new BlockingRowSet(transMeta.getSizeRowset());
}
break;
case SerialSingleThreaded:
rowSet = new SingleRowRowSet();
break;
case SingleThreaded:
rowSet = new QueueRowSet();
break;
default:
throw new KettleException("Unhandled transformation type: " + transMeta.getTransformationType());
}
switch(dispatchType) {
case TYPE_DISP_1_1:
rowSet.setThreadNameFromToCopy(thisStep.getName(), 0, nextStep.getName(), 0);
break;
case TYPE_DISP_1_N:
rowSet.setThreadNameFromToCopy(thisStep.getName(), 0, nextStep.getName(), c);
break;
case TYPE_DISP_N_1:
rowSet.setThreadNameFromToCopy(thisStep.getName(), c, nextStep.getName(), 0);
break;
case TYPE_DISP_N_N:
rowSet.setThreadNameFromToCopy(thisStep.getName(), c, nextStep.getName(), c);
break;
default:
break;
}
rowsets.add(rowSet);
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.TransformationAllocatedNewRowset", rowSet.toString()));
}
}
} else {
// distribution...
for (int s = 0; s < thisCopies; s++) {
for (int t = 0; t < nextCopies; t++) {
BlockingRowSet rowSet = new BlockingRowSet(transMeta.getSizeRowset());
rowSet.setThreadNameFromToCopy(thisStep.getName(), s, nextStep.getName(), t);
rowsets.add(rowSet);
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.TransformationAllocatedNewRowset", rowSet.toString()));
}
}
}
}
}
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.AllocatedRowsets", String.valueOf(rowsets.size()), String.valueOf(i), thisStep.getName()) + " ");
}
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.AllocatingStepsAndStepData"));
}
//
for (int i = 0; i < hopsteps.size(); i++) {
StepMeta stepMeta = hopsteps.get(i);
String stepid = stepMeta.getStepID();
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.TransformationIsToAllocateStep", stepMeta.getName(), stepid));
}
// How many copies are launched of this step?
int nrCopies = stepMeta.getCopies();
if (log.isDebug()) {
log.logDebug(BaseMessages.getString(PKG, "Trans.Log.StepHasNumberRowCopies", String.valueOf(nrCopies)));
}
// At least run once...
for (int c = 0; c < nrCopies; c++) {
// Make sure we haven't started it yet!
if (!hasStepStarted(stepMeta.getName(), c)) {
StepMetaDataCombi combi = new StepMetaDataCombi();
combi.stepname = stepMeta.getName();
combi.copy = c;
// The meta-data
combi.stepMeta = stepMeta;
combi.meta = stepMeta.getStepMetaInterface();
// Allocate the step data
StepDataInterface data = combi.meta.getStepData();
combi.data = data;
// Allocate the step
StepInterface step = combi.meta.getStep(stepMeta, data, c, transMeta, this);
// Copy the variables of the transformation to the step...
// don't share. Each copy of the step has its own variables.
//
step.initializeVariablesFrom(this);
step.setUsingThreadPriorityManagment(transMeta.isUsingThreadPriorityManagment());
// Pass the connected repository & metaStore to the steps runtime
//
step.setRepository(repository);
step.setMetaStore(metaStore);
// things as well...
if (stepMeta.isPartitioned()) {
List<String> partitionIDs = stepMeta.getStepPartitioningMeta().getPartitionSchema().getPartitionIDs();
if (partitionIDs != null && partitionIDs.size() > 0) {
// Pass the partition ID
step.setPartitionID(partitionIDs.get(c));
// to the step
}
}
// Save the step too
combi.step = step;
// /
if (combi.step instanceof LoggingObjectInterface) {
LogChannelInterface logChannel = combi.step.getLogChannel();
logChannel.setLogLevel(logLevel);
logChannel.setGatheringMetrics(log.isGatheringMetrics());
}
// Add to the bunch...
steps.add(combi);
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.TransformationHasAllocatedANewStep", stepMeta.getName(), String.valueOf(c)));
}
}
}
}
//
for (int s = 0; s < steps.size(); s++) {
StepMetaDataCombi combi = steps.get(s);
if (combi.stepMeta.isDoingErrorHandling()) {
combi.step.identifyErrorOutput();
}
}
// Now (optionally) write start log record!
// Make sure we synchronize appropriately to avoid duplicate batch IDs.
//
Object syncObject = this;
if (parentJob != null) {
// parallel execution in a job
syncObject = parentJob;
}
if (parentTrans != null) {
// multiple sub-transformations
syncObject = parentTrans;
}
synchronized (syncObject) {
calculateBatchIdAndDateRange();
beginProcessing();
}
//
for (int i = 0; i < steps.size(); i++) {
StepMetaDataCombi sid = steps.get(i);
StepMeta stepMeta = sid.stepMeta;
StepInterface baseStep = sid.step;
baseStep.setPartitioned(stepMeta.isPartitioned());
// Now let's take a look at the source and target relation
//
// If this source step is not partitioned, and the target step is: it
// means we need to re-partition the incoming data.
// If both steps are partitioned on the same method and schema, we don't
// need to re-partition
// If both steps are partitioned on a different method or schema, we need
// to re-partition as well.
// If both steps are not partitioned, we don't need to re-partition
//
boolean isThisPartitioned = stepMeta.isPartitioned();
PartitionSchema thisPartitionSchema = null;
if (isThisPartitioned) {
thisPartitionSchema = stepMeta.getStepPartitioningMeta().getPartitionSchema();
}
boolean isNextPartitioned = false;
StepPartitioningMeta nextStepPartitioningMeta = null;
PartitionSchema nextPartitionSchema = null;
List<StepMeta> nextSteps = transMeta.findNextSteps(stepMeta);
int nrNext = nextSteps.size();
for (int p = 0; p < nrNext; p++) {
StepMeta nextStep = nextSteps.get(p);
if (nextStep.isPartitioned()) {
isNextPartitioned = true;
nextStepPartitioningMeta = nextStep.getStepPartitioningMeta();
nextPartitionSchema = nextStepPartitioningMeta.getPartitionSchema();
}
}
baseStep.setRepartitioning(StepPartitioningMeta.PARTITIONING_METHOD_NONE);
//
if ((!isThisPartitioned && isNextPartitioned) || (isThisPartitioned && isNextPartitioned && !thisPartitionSchema.equals(nextPartitionSchema))) {
baseStep.setRepartitioning(nextStepPartitioningMeta.getMethodType());
}
// For partitioning to a set of remove steps (repartitioning from a master
// to a set or remote output steps)
//
StepPartitioningMeta targetStepPartitioningMeta = baseStep.getStepMeta().getTargetStepPartitioningMeta();
if (targetStepPartitioningMeta != null) {
baseStep.setRepartitioning(targetStepPartitioningMeta.getMethodType());
}
}
setPreparing(false);
setInitializing(true);
//
if (isMonitored() && steps.size() < 150) {
doTopologySortOfSteps();
}
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.InitialisingSteps", String.valueOf(steps.size())));
}
StepInitThread[] initThreads = new StepInitThread[steps.size()];
Thread[] threads = new Thread[steps.size()];
//
for (int i = 0; i < steps.size(); i++) {
final StepMetaDataCombi sid = steps.get(i);
// Do the init code in the background!
// Init all steps at once, but ALL steps need to finish before we can
// continue properly!
//
initThreads[i] = new StepInitThread(sid, log);
// Put it in a separate thread!
//
threads[i] = new Thread(initThreads[i]);
threads[i].setName("init of " + sid.stepname + "." + sid.copy + " (" + threads[i].getName() + ")");
ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.StepBeforeInitialize.id, initThreads[i]);
threads[i].start();
}
for (int i = 0; i < threads.length; i++) {
try {
threads[i].join();
ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.StepAfterInitialize.id, initThreads[i]);
} catch (Exception ex) {
log.logError("Error with init thread: " + ex.getMessage(), ex.getMessage());
log.logError(Const.getStackTracker(ex));
}
}
setInitializing(false);
boolean ok = true;
//
for (int i = 0; i < initThreads.length; i++) {
StepMetaDataCombi combi = initThreads[i].getCombi();
if (!initThreads[i].isOk()) {
log.logError(BaseMessages.getString(PKG, "Trans.Log.StepFailedToInit", combi.stepname + "." + combi.copy));
combi.data.setStatus(StepExecutionStatus.STATUS_STOPPED);
ok = false;
} else {
combi.data.setStatus(StepExecutionStatus.STATUS_IDLE);
if (log.isDetailed()) {
log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.StepInitialized", combi.stepname + "." + combi.copy));
}
}
}
if (!ok) {
//
for (int i = 0; i < initThreads.length; i++) {
StepMetaDataCombi combi = initThreads[i].getCombi();
// Dispose will overwrite the status, but we set it back right after
// this.
combi.step.dispose(combi.meta, combi.data);
if (initThreads[i].isOk()) {
combi.data.setStatus(StepExecutionStatus.STATUS_HALTED);
} else {
combi.data.setStatus(StepExecutionStatus.STATUS_STOPPED);
}
}
// Just for safety, fire the trans finished listeners...
try {
fireTransFinishedListeners();
} catch (KettleException e) {
// listeners produces errors
log.logError(BaseMessages.getString(PKG, "Trans.FinishListeners.Exception"));
// we will not pass this exception up to prepareExecuton() entry point.
} finally {
// Flag the transformation as finished even if exception was thrown
setFinished(true);
}
//
if (preview) {
String logText = KettleLogStore.getAppender().getBuffer(getLogChannelId(), true).toString();
throw new KettleException(BaseMessages.getString(PKG, "Trans.Log.FailToInitializeAtLeastOneStep") + Const.CR + logText);
} else {
throw new KettleException(BaseMessages.getString(PKG, "Trans.Log.FailToInitializeAtLeastOneStep") + Const.CR);
}
}
log.snap(Metrics.METRIC_TRANSFORMATION_INIT_STOP);
KettleEnvironment.setExecutionInformation(this, repository);
setReadyToStart(true);
}
use of org.pentaho.di.core.QueueRowSet in project pentaho-kettle by pentaho.
the class Trans method addRowProducer.
/**
* This adds a row producer to the transformation that just got set up. It is preferable to run this BEFORE execute()
* but after prepareExecution()
*
* @param stepname
* The step to produce rows for
* @param copynr
* The copynr of the step to produce row for (normally 0 unless you have multiple copies running)
* @return the row producer
* @throws KettleException
* in case the thread/step to produce rows for could not be found.
* @see Trans#execute(String[])
* @see Trans#prepareExecution(String[])
*/
public RowProducer addRowProducer(String stepname, int copynr) throws KettleException {
StepInterface stepInterface = getStepInterface(stepname, copynr);
if (stepInterface == null) {
throw new KettleException("Unable to find thread with name " + stepname + " and copy number " + copynr);
}
// We are going to add an extra RowSet to this stepInterface.
RowSet rowSet;
switch(transMeta.getTransformationType()) {
case Normal:
rowSet = new BlockingRowSet(transMeta.getSizeRowset());
break;
case SerialSingleThreaded:
rowSet = new SingleRowRowSet();
break;
case SingleThreaded:
rowSet = new QueueRowSet();
break;
default:
throw new KettleException("Unhandled transformation type: " + transMeta.getTransformationType());
}
// Add this rowset to the list of active rowsets for the selected step
stepInterface.addRowSetToInputRowSets(rowSet);
return new RowProducer(stepInterface, rowSet);
}
use of org.pentaho.di.core.QueueRowSet in project pentaho-kettle by pentaho.
the class JsonInput method prepareToRowProcessing.
@Override
protected void prepareToRowProcessing() throws KettleException, KettleStepException, KettleValueException {
if (!meta.isInFields()) {
data.outputRowMeta = new RowMeta();
if (!meta.isDoNotFailIfNoFile() && data.files.nrOfFiles() == 0) {
String errMsg = BaseMessages.getString(PKG, "JsonInput.Log.NoFiles");
logError(errMsg);
inputError(errMsg);
}
} else {
data.readrow = getRow();
data.inputRowMeta = getInputRowMeta();
if (data.inputRowMeta == null) {
data.hasFirstRow = false;
return;
}
data.hasFirstRow = true;
data.outputRowMeta = data.inputRowMeta.clone();
// Check if source field is provided
if (Utils.isEmpty(meta.getFieldValue())) {
logError(BaseMessages.getString(PKG, "JsonInput.Log.NoField"));
throw new KettleException(BaseMessages.getString(PKG, "JsonInput.Log.NoField"));
}
// cache the position of the field
if (data.indexSourceField < 0) {
data.indexSourceField = getInputRowMeta().indexOfValue(meta.getFieldValue());
if (data.indexSourceField < 0) {
logError(BaseMessages.getString(PKG, "JsonInput.Log.ErrorFindingField", meta.getFieldValue()));
throw new KettleException(BaseMessages.getString(PKG, "JsonInput.Exception.CouldnotFindField", meta.getFieldValue()));
}
}
// if RemoveSourceField option is set, we remove the source field from the output meta
if (meta.isRemoveSourceField()) {
data.outputRowMeta.removeValueMeta(data.indexSourceField);
// Get total previous fields minus one since we remove source field
data.totalpreviousfields = data.inputRowMeta.size() - 1;
} else {
// Get total previous fields
data.totalpreviousfields = data.inputRowMeta.size();
}
}
meta.getFields(data.outputRowMeta, getStepname(), null, null, this, repository, metaStore);
// Create convert meta-data objects that will contain Date & Number formatters
data.convertRowMeta = data.outputRowMeta.cloneToType(ValueMetaInterface.TYPE_STRING);
data.inputs = new InputsReader(this, meta, data, new InputErrorHandler()).iterator();
// data.recordnr = 0;
data.readerRowSet = new QueueRowSet();
data.readerRowSet.setDone();
this.rowOutputConverter = new RowOutputConverter(getLogChannel());
// provide reader input fields with real path [PDI-15942]
JsonInputField[] inputFields = new JsonInputField[data.nrInputFields];
for (int i = 0; i < data.nrInputFields; i++) {
JsonInputField field = meta.getInputFields()[i].clone();
field.setPath(environmentSubstitute(field.getPath()));
inputFields[i] = field;
}
data.reader.setFields(inputFields);
}
use of org.pentaho.di.core.QueueRowSet in project pentaho-kettle by pentaho.
the class SwitchCaseTest method testProcessRow.
/**
* PDI 6900. Test that process row works correctly. Simulate step workload when input and output row sets already
* created and mapped to specified case values.
*
* @throws KettleException
*/
@Test
public void testProcessRow() throws KettleException {
SwitchCaseCustom krasavez = new SwitchCaseCustom(mockHelper);
krasavez.first = false;
// create two output row sets
RowSet rowSetOne = new QueueRowSet();
RowSet rowSetTwo = new QueueRowSet();
// this row set should contain only '3'.
krasavez.data.outputMap.put(3, rowSetOne);
krasavez.data.outputMap.put(3, rowSetTwo);
// this row set contains nulls only
RowSet rowSetNullOne = new QueueRowSet();
RowSet rowSetNullTwo = new QueueRowSet();
krasavez.data.nullRowSetSet.add(rowSetNullOne);
krasavez.data.nullRowSetSet.add(rowSetNullTwo);
// this row set contains all expect null or '3'
RowSet def = new QueueRowSet();
krasavez.data.defaultRowSetSet.add(def);
// generate some data (see method implementation)
// expected: 5 times null,
// expected 1*2 = 2 times 3
// expected 5*2 + 5 = 15 rows generated
// expected 15 - 5 - 2 = 8 rows go to default.
// expected one empty string at the end
// System.out.println( krasavez.getInputDataOverview() );
// 1, 1, null, 2, 2, null, 3, 3, null, 4, 4, null, 5, 5, null,""
krasavez.generateData(1, 5, 2);
// call method under test
krasavez.processRow();
assertEquals("First row set collects 2 rows", 2, rowSetOne.size());
assertEquals("Second row set collects 2 rows", 2, rowSetTwo.size());
assertEquals("First null row set collects 5 rows", 6, rowSetNullOne.size());
assertEquals("Second null row set collects 5 rows", 6, rowSetNullTwo.size());
assertEquals("Default row set collects the rest of rows", 8, def.size());
// now - check the data is correct in every row set:
assertEquals("First row set contains only 3: ", true, isRowSetContainsValue(rowSetOne, new Object[] { 3 }, new Object[] {}));
assertEquals("Second row set contains only 3: ", true, isRowSetContainsValue(rowSetTwo, new Object[] { 3 }, new Object[] {}));
assertEquals("First null row set contains only null: ", true, isRowSetContainsValue(rowSetNullOne, new Object[] { null }, new Object[] {}));
assertEquals("Second null row set contains only null: ", true, isRowSetContainsValue(rowSetNullTwo, new Object[] { null }, new Object[] {}));
assertEquals("Default row set do not contains null or 3, but other", true, isRowSetContainsValue(def, new Object[] { 1, 2, 4, 5 }, new Object[] { 3, null }));
}
use of org.pentaho.di.core.QueueRowSet in project pentaho-kettle by pentaho.
the class SwitchCaseTest method testCreateOutputValueMapping.
/**
* PDI-6900 Check that SwichCase step can correctly set up input values to output rowsets.
*
* @throws KettleException
* @throws URISyntaxException
* @throws ParserConfigurationException
* @throws SAXException
* @throws IOException
*/
@Test
public void testCreateOutputValueMapping() throws KettleException, URISyntaxException, ParserConfigurationException, SAXException, IOException {
SwitchCaseCustom krasavez = new SwitchCaseCustom(mockHelper);
// load step info value-case mapping from xml.
List<DatabaseMeta> emptyList = new ArrayList<DatabaseMeta>();
krasavez.meta.loadXML(loadStepXmlMetadata("SwitchCaseTest.xml"), emptyList, mock(IMetaStore.class));
KeyToRowSetMap expectedNN = new KeyToRowSetMap();
Set<RowSet> nulls = new HashSet<RowSet>();
// create real steps for all targets
List<SwitchCaseTarget> list = krasavez.meta.getCaseTargets();
for (SwitchCaseTarget item : list) {
StepMetaInterface smInt = new DummyTransMeta();
StepMeta stepMeta = new StepMeta(item.caseTargetStepname, smInt);
item.caseTargetStep = stepMeta;
// create and put row set for this
RowSet rw = new QueueRowSet();
krasavez.map.put(item.caseTargetStepname, rw);
// null values goes to null rowset
if (item.caseValue != null) {
expectedNN.put(item.caseValue, rw);
} else {
nulls.add(rw);
}
}
// create default step
StepMetaInterface smInt = new DummyTransMeta();
StepMeta stepMeta = new StepMeta(krasavez.meta.getDefaultTargetStepname(), smInt);
krasavez.meta.setDefaultTargetStep(stepMeta);
RowSet rw = new QueueRowSet();
krasavez.map.put(krasavez.meta.getDefaultTargetStepname(), rw);
krasavez.createOutputValueMapping();
// inspect step output data:
Set<RowSet> ones = krasavez.data.outputMap.get("1");
assertEquals("Output map for 1 values contains 2 row sets", 2, ones.size());
Set<RowSet> twos = krasavez.data.outputMap.get("2");
assertEquals("Output map for 2 values contains 1 row sets", 1, twos.size());
assertEquals("Null row set contains 2 items: ", 2, krasavez.data.nullRowSetSet.size());
assertEquals("We have at least one default rowset", 1, krasavez.data.defaultRowSetSet.size());
// check that rowsets data is correct:
Set<RowSet> rowsets = expectedNN.get("1");
for (RowSet rowset : rowsets) {
assertTrue("Output map for 1 values contains expected row set", ones.contains(rowset));
}
rowsets = expectedNN.get("2");
for (RowSet rowset : rowsets) {
assertTrue("Output map for 2 values contains expected row set", twos.contains(rowset));
}
for (RowSet rowset : krasavez.data.nullRowSetSet) {
assertTrue("Output map for null values contains expected row set", nulls.contains(rowset));
}
// we have already check that there is only one item.
for (RowSet rowset : krasavez.data.defaultRowSetSet) {
assertTrue("Output map for default case contains expected row set", rowset.equals(rw));
}
}
Aggregations