use of de.lmu.ifi.dbs.elki.logging.Logging in project elki by elki-project.
the class AbstractRStarTree method logStatistics.
@Override
public void logStatistics() {
Logging log = getLogger();
if (log.isStatistics()) {
super.logStatistics();
log.statistics(new LongStatistic(this.getClass().getName() + ".height", height));
statistics.logStatistics();
}
}
use of de.lmu.ifi.dbs.elki.logging.Logging in project elki by elki-project.
the class AbstractRStarTree method initializeCapacities.
@Override
protected void initializeCapacities(E exampleLeaf) {
/* Simulate the creation of a leaf page to get the page capacity */
try {
int cap = 0;
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(baos);
SpatialPointLeafEntry sl = new SpatialPointLeafEntry(DBIDUtil.importInteger(0), new double[exampleLeaf.getDimensionality()]);
while (baos.size() <= getPageSize()) {
sl.writeExternal(oos);
oos.flush();
cap++;
}
// the last one caused the page to overflow.
leafCapacity = cap - 1;
} catch (IOException e) {
throw new AbortException("Error determining page sizes.", e);
}
/* Simulate the creation of a directory page to get the capacity */
try {
int cap = 0;
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(baos);
ModifiableHyperBoundingBox hb = new ModifiableHyperBoundingBox(new double[exampleLeaf.getDimensionality()], new double[exampleLeaf.getDimensionality()]);
SpatialDirectoryEntry sl = new SpatialDirectoryEntry(0, hb);
while (baos.size() <= getPageSize()) {
sl.writeExternal(oos);
oos.flush();
cap++;
}
dirCapacity = cap - 1;
} catch (IOException e) {
throw new AbortException("Error determining page sizes.", e);
}
if (dirCapacity <= 2) {
throw new IllegalArgumentException("Node size of " + getPageSize() + " bytes is chosen too small!");
}
final Logging log = getLogger();
if (dirCapacity < 10) {
log.warning("Page size is choosen very small! Maximum number of entries in a directory node = " + dirCapacity);
}
// minimum entries per directory node
dirMinimum = (int) Math.floor(dirCapacity * settings.relativeMinFill);
if (dirMinimum < 1) {
dirMinimum = 1;
}
if (leafCapacity <= 2) {
throw new IllegalArgumentException("Node size of " + getPageSize() + " bytes is chosen too small!");
}
if (leafCapacity < 10) {
log.warning("Page size is choosen very small! Maximum number of entries in a leaf node = " + leafCapacity);
}
// minimum entries per leaf node
leafMinimum = (int) Math.floor(leafCapacity * settings.relativeMinFill);
if (leafMinimum < 1) {
leafMinimum = 1;
}
}
use of de.lmu.ifi.dbs.elki.logging.Logging in project elki by elki-project.
the class AbstractConversionFilter method filter.
/**
* A standard implementation of the filter process. First of all, all suitable
* representations are found. Then (if {@link #prepareStart} returns true),
* the data is processed read-only in a first pass.
*
* In the main pass, each object is then filtered using
* {@link #filterSingleObject}.
*
* @param objects Objects to filter
* @return Filtered bundle
*/
@Override
public MultipleObjectsBundle filter(MultipleObjectsBundle objects) {
if (objects.dataLength() == 0) {
return objects;
}
MultipleObjectsBundle bundle = new MultipleObjectsBundle();
final Logging logger = getLogger();
for (int r = 0; r < objects.metaLength(); r++) {
@SuppressWarnings("unchecked") SimpleTypeInformation<Object> type = (SimpleTypeInformation<Object>) objects.meta(r);
@SuppressWarnings("unchecked") final List<Object> column = (List<Object>) objects.getColumn(r);
if (!getInputTypeRestriction().isAssignableFromType(type)) {
bundle.appendColumn(type, column);
continue;
}
// Get the replacement type information
@SuppressWarnings("unchecked") final SimpleTypeInformation<I> castType = (SimpleTypeInformation<I>) type;
// When necessary, perform an initialization scan
if (prepareStart(castType)) {
FiniteProgress pprog = logger.isVerbose() ? new FiniteProgress("Preparing normalization", objects.dataLength(), logger) : null;
for (Object o : column) {
@SuppressWarnings("unchecked") final I obj = (I) o;
prepareProcessInstance(obj);
logger.incrementProcessed(pprog);
}
logger.ensureCompleted(pprog);
prepareComplete();
}
@SuppressWarnings("unchecked") final List<O> castColumn = (List<O>) column;
bundle.appendColumn(convertedType(castType), castColumn);
// Normalization scan
FiniteProgress nprog = logger.isVerbose() ? new FiniteProgress("Data normalization", objects.dataLength(), logger) : null;
for (int i = 0; i < objects.dataLength(); i++) {
@SuppressWarnings("unchecked") final I obj = (I) column.get(i);
final O normalizedObj = filterSingleObject(obj);
castColumn.set(i, normalizedObj);
logger.incrementProcessed(nprog);
}
logger.ensureCompleted(nprog);
}
return bundle;
}
Aggregations