use of org.pentaho.di.core.TimedRow in project pentaho-kettle by pentaho.
the class DefaultCache method storeRowInCache.
@Override
public void storeRowInCache(DatabaseLookupMeta meta, RowMetaInterface lookupMeta, Object[] lookupRow, Object[] add) {
RowMetaAndData rowMetaAndData = new RowMetaAndData(lookupMeta, lookupRow);
// DEinspanjer 2009-02-01 XXX: I want to write a test case to prove this point before checking in.
// /* Don't insert a row with a duplicate key into the cache. It doesn't seem
// * to serve a useful purpose and can potentially cause the step to return
// * different values over the life of the transformation (if the source DB rows change)
// * Additionally, if using the load all data feature, re-inserting would reverse the order
// * specified in the step.
// */
// if (!data.look.containsKey(rowMetaAndData)) {
// data.look.put(rowMetaAndData, new TimedRow(add));
// }
map.put(rowMetaAndData, new TimedRow(add));
// method would throw out entries if the previous cache size wasn't big enough.
if (!meta.isLoadingAllDataInCache() && meta.getCacheSize() > 0 && map.size() > meta.getCacheSize()) {
List<RowMetaAndData> keys = new ArrayList<RowMetaAndData>(map.keySet());
List<Date> samples = new ArrayList<Date>();
int incr = keys.size() / 10;
if (incr == 0) {
incr = 1;
}
for (int k = 0; k < keys.size(); k += incr) {
RowMetaAndData key = keys.get(k);
TimedRow timedRow = map.get(key);
samples.add(timedRow.getLogDate());
}
Collections.sort(samples);
if (samples.size() > 1) {
Date smallest = samples.get(1);
// Everything below the smallest date goes away...
for (RowMetaAndData key : keys) {
TimedRow timedRow = map.get(key);
if (timedRow.getLogDate().compareTo(smallest) < 0) {
map.remove(key);
}
}
}
}
}
Aggregations