use of org.apache.jena.tdb.base.file.Location in project jena by apache.
the class TestTDBFactory method testTDBFresh22.
@Test
public void testTDBFresh22() {
Location loc = Location.mem();
boolean b = TDBFactory.inUseLocation(loc);
TDBFactory.createDataset(loc);
b = TDBFactory.inUseLocation(loc);
assertFalse("Expected false for a unique memory location", b);
}
use of org.apache.jena.tdb.base.file.Location in project jena by apache.
the class JournalControl method findJournal.
private static Journal findJournal(DatasetGraphTDB dsg) {
Location loc = dsg.getLocation();
String journalFilename = loc.absolute(Names.journalFile);
File f = new File(journalFilename);
if (f.exists() && f.isFile() && f.length() > 0)
return Journal.create(loc);
else
return null;
}
use of org.apache.jena.tdb.base.file.Location in project jena by apache.
the class DebugTDB method dumpNodeIndex.
// public static RangeIndex makeRangeIndex(Location location, String indexName,
// int dftKeyLength, int dftValueLength,
// int readCacheSize,int writeCacheSize)
public static void dumpNodeIndex(String dir) {
Location location = Location.create(dir);
Index nodeToId = SetupTDB.makeIndex(location, Names.indexNode2Id, SystemTDB.BlockSize, SystemTDB.LenNodeHash, SystemTDB.SizeOfNodeId, -1, -1);
for (Record aNodeToId : nodeToId) {
System.out.println(aNodeToId);
}
}
use of org.apache.jena.tdb.base.file.Location in project jena by apache.
the class ProcIndexCopy method exec.
// Ideas:
// Copy to buffer, sort, write in sequential clumps.
// Profile code for hotspots
// Maybe be worth opening the data file (the leaves) as a regular,
// non-memory mapped file as we read it through once, in natural order,
// and it may be laid out in increasing block order on-disk, e.g. repacked
// and in increasing order with occassional oddities if SPO from the bulk loader.
public static void exec(String locationStr1, String indexName1, String locationStr2, String indexName2) {
// Argument processing
Location location1 = Location.create(locationStr1);
Location location2 = Location.create(locationStr2);
int keyLength = SystemTDB.SizeOfNodeId * indexName1.length();
int valueLength = 0;
// The name is the order.
String primary = "SPO";
String indexOrder = indexName2;
String label = indexName1 + " => " + indexName2;
TupleIndex index1 = Build.openTupleIndex(location1, indexName1, primary, indexName1, 10, 10, keyLength, valueLength);
TupleIndex index2 = Build.openTupleIndex(location2, indexName2, primary, indexOrder, 10, 10, keyLength, valueLength);
tupleIndexCopy(index1, index2, label);
index1.close();
index2.close();
}
use of org.apache.jena.tdb.base.file.Location in project jena by apache.
the class ProcIndexBuild method exec.
public static void exec(String locationStr, String indexName, String dataFile) {
// Argument processing
Location location = Location.create(locationStr);
//InputStream input = System.in ;
InputStream input = IO.openFile(dataFile);
int keyLength = SystemTDB.SizeOfNodeId * indexName.length();
int valueLength = 0;
// The name is the order.
String primary = indexName;
// Scope for optimization:
// Null column map => no churn.
// Do record -> record copy, not Tuple, Tuple copy.
String primaryOrder;
int dftKeyLength;
int dftValueLength;
int tupleLength = indexName.length();
if (tupleLength == 3) {
primaryOrder = Names.primaryIndexTriples;
dftKeyLength = SystemTDB.LenIndexTripleRecord;
dftValueLength = 0;
} else if (tupleLength == 4) {
primaryOrder = Names.primaryIndexQuads;
dftKeyLength = SystemTDB.LenIndexQuadRecord;
dftValueLength = 0;
} else {
throw new AtlasException("Index name: " + indexName);
}
ColumnMap colMap = new ColumnMap(primaryOrder, indexName);
// -1? Write only.
// Also flush cache every so often => block writes (but not sequential so boring).
int readCacheSize = 10;
int writeCacheSize = 100;
int blockSize = SystemTDB.BlockSize;
RecordFactory recordFactory = new RecordFactory(dftKeyLength, dftValueLength);
int order = BPlusTreeParams.calcOrder(blockSize, recordFactory);
BPlusTreeParams bptParams = new BPlusTreeParams(order, recordFactory);
int blockSizeNodes = blockSize;
int blockSizeRecords = blockSize;
FileSet destination = new FileSet(location, indexName);
BlockMgr blkMgrNodes = BlockMgrFactory.create(destination, Names.bptExtTree, blockSizeNodes, readCacheSize, writeCacheSize);
BlockMgr blkMgrRecords = BlockMgrFactory.create(destination, Names.bptExtRecords, blockSizeRecords, readCacheSize, writeCacheSize);
int rowBlock = 1000;
Iterator<Record> iter = new RecordsFromInput(input, tupleLength, colMap, rowBlock);
BPlusTree bpt2 = BPlusTreeRewriter.packIntoBPlusTree(iter, bptParams, recordFactory, blkMgrNodes, blkMgrRecords);
bpt2.close();
}
Aggregations