use of org.h2.engine.Database in project h2database by h2database.
the class FullTextLucene method search.
/**
* Do the search.
*
* @param conn the database connection
* @param text the query
* @param limit the limit
* @param offset the offset
* @param data whether the raw data should be returned
* @return the result set
*/
protected static ResultSet search(Connection conn, String text, int limit, int offset, boolean data) throws SQLException {
SimpleResultSet result = createResultSet(data);
if (conn.getMetaData().getURL().startsWith("jdbc:columnlist:")) {
// this is just to query the result set columns
return result;
}
if (text == null || text.trim().length() == 0) {
return result;
}
try {
IndexAccess access = getIndexAccess(conn);
// take a reference as the searcher may change
IndexSearcher searcher = access.getSearcher();
try {
// reuse the same analyzer; it's thread-safe;
// also allows subclasses to control the analyzer used.
Analyzer analyzer = access.writer.getAnalyzer();
QueryParser parser = new QueryParser(Version.LUCENE_30, LUCENE_FIELD_DATA, analyzer);
Query query = parser.parse(text);
// Lucene 3 insists on a hard limit and will not provide
// a total hits value. Take at least 100 which is
// an optimal limit for Lucene as any more
// will trigger writing results to disk.
int maxResults = (limit == 0 ? 100 : limit) + offset;
TopDocs docs = searcher.search(query, maxResults);
if (limit == 0) {
limit = docs.totalHits;
}
for (int i = 0, len = docs.scoreDocs.length; i < limit && i + offset < docs.totalHits && i + offset < len; i++) {
ScoreDoc sd = docs.scoreDocs[i + offset];
Document doc = searcher.doc(sd.doc);
float score = sd.score;
String q = doc.get(LUCENE_FIELD_QUERY);
if (data) {
int idx = q.indexOf(" WHERE ");
JdbcConnection c = (JdbcConnection) conn;
Session session = (Session) c.getSession();
Parser p = new Parser(session);
String tab = q.substring(0, idx);
ExpressionColumn expr = (ExpressionColumn) p.parseExpression(tab);
String schemaName = expr.getOriginalTableAliasName();
String tableName = expr.getColumnName();
q = q.substring(idx + " WHERE ".length());
Object[][] columnData = parseKey(conn, q);
result.addRow(schemaName, tableName, columnData[0], columnData[1], score);
} else {
result.addRow(q, score);
}
}
} finally {
access.returnSearcher(searcher);
}
} catch (Exception e) {
throw convertException(e);
}
return result;
}
use of org.h2.engine.Database in project h2database by h2database.
the class Session method unlockReadLocks.
/**
* Unlock all read locks. This is done if the transaction isolation mode is
* READ_COMMITTED.
*/
public void unlockReadLocks() {
if (database.isMultiVersion()) {
// MVCC: keep shared locks (insert / update / delete)
return;
}
// locks is modified in the loop
for (int i = 0; i < locks.size(); i++) {
Table t = locks.get(i);
if (!t.isLockedExclusively()) {
synchronized (database) {
t.unlock(this);
locks.remove(i);
}
i--;
}
}
}
use of org.h2.engine.Database in project h2database by h2database.
the class Session method commit.
/**
* Commit the current transaction. If the statement was not a data
* definition statement, and if there are temporary tables that should be
* dropped or truncated at commit, this is done as well.
*
* @param ddl if the statement was a data definition statement
*/
public void commit(boolean ddl) {
checkCommitRollback();
currentTransactionName = null;
transactionStart = 0;
if (transaction != null) {
// TODO should not rely on locking
if (!locks.isEmpty()) {
for (Table t : locks) {
if (t instanceof MVTable) {
((MVTable) t).commit();
}
}
}
transaction.commit();
transaction = null;
}
if (containsUncommitted()) {
// need to commit even if rollback is not possible
// (create/drop table and so on)
database.commit(this);
}
removeTemporaryLobs(true);
if (undoLog.size() > 0) {
// commit the rows when using MVCC
if (database.isMultiVersion()) {
ArrayList<Row> rows = New.arrayList();
synchronized (database) {
while (undoLog.size() > 0) {
UndoLogRecord entry = undoLog.getLast();
entry.commit();
rows.add(entry.getRow());
undoLog.removeLast(false);
}
for (Row r : rows) {
r.commit();
}
}
}
undoLog.clear();
}
if (!ddl) {
// do not clean the temp tables if the last command was a
// create/drop
cleanTempTables(false);
if (autoCommitAtTransactionEnd) {
autoCommit = true;
autoCommitAtTransactionEnd = false;
}
}
int rows = getDatabase().getSettings().analyzeSample / 10;
if (tablesToAnalyze != null) {
for (Table table : tablesToAnalyze) {
Analyze.analyzeTable(this, table, rows, false);
}
// analyze can lock the meta
database.unlockMeta(this);
}
tablesToAnalyze = null;
endTransaction();
}
use of org.h2.engine.Database in project h2database by h2database.
the class UndoLog method add.
/**
* Append an undo log entry to the log.
*
* @param entry the entry
*/
void add(UndoLogRecord entry) {
records.add(entry);
if (largeTransactions) {
memoryUndo++;
if (memoryUndo > database.getMaxMemoryUndo() && database.isPersistent() && !database.isMultiVersion()) {
if (file == null) {
String fileName = database.createTempFile();
file = database.openFile(fileName, "rw", false);
file.setCheckedWriting(false);
file.setLength(FileStore.HEADER_LENGTH);
}
Data buff = Data.create(database, Constants.DEFAULT_PAGE_SIZE);
for (int i = 0; i < records.size(); i++) {
UndoLogRecord r = records.get(i);
buff.checkCapacity(Constants.DEFAULT_PAGE_SIZE);
r.append(buff, this);
if (i == records.size() - 1 || buff.length() > Constants.UNDO_BLOCK_SIZE) {
storedEntriesPos.add(file.getFilePointer());
file.write(buff.getBytes(), 0, buff.length());
buff.reset();
}
}
storedEntries += records.size();
memoryUndo = 0;
records.clear();
file.autoDelete();
}
} else {
if (!entry.isStored()) {
memoryUndo++;
}
if (memoryUndo > database.getMaxMemoryUndo() && database.isPersistent() && !database.isMultiVersion()) {
if (file == null) {
String fileName = database.createTempFile();
file = database.openFile(fileName, "rw", false);
file.setCheckedWriting(false);
file.seek(FileStore.HEADER_LENGTH);
rowBuff = Data.create(database, Constants.DEFAULT_PAGE_SIZE);
Data buff = rowBuff;
for (UndoLogRecord r : records) {
saveIfPossible(r, buff);
}
} else {
saveIfPossible(entry, rowBuff);
}
file.autoDelete();
}
}
}
use of org.h2.engine.Database in project h2database by h2database.
the class UndoLog method getLast.
/**
* Get the last record and remove it from the list of operations.
*
* @return the last record
*/
public UndoLogRecord getLast() {
int i = records.size() - 1;
if (largeTransactions) {
if (i < 0 && storedEntries > 0) {
int last = storedEntriesPos.size() - 1;
long pos = storedEntriesPos.remove(last);
long end = file.length();
int bufferLength = (int) (end - pos);
Data buff = Data.create(database, bufferLength);
file.seek(pos);
file.readFully(buff.getBytes(), 0, bufferLength);
while (buff.length() < bufferLength) {
UndoLogRecord e = UndoLogRecord.loadFromBuffer(buff, this);
records.add(e);
memoryUndo++;
}
storedEntries -= records.size();
file.setLength(pos);
file.seek(pos);
}
i = records.size() - 1;
}
UndoLogRecord entry = records.get(i);
if (entry.isStored()) {
int start = Math.max(0, i - database.getMaxMemoryUndo() / 2);
UndoLogRecord first = null;
for (int j = start; j <= i; j++) {
UndoLogRecord e = records.get(j);
if (e.isStored()) {
e.load(rowBuff, file, this);
memoryUndo++;
if (first == null) {
first = e;
}
}
}
for (int k = 0; k < i; k++) {
UndoLogRecord e = records.get(k);
e.invalidatePos();
}
seek(first.getFilePos());
}
return entry;
}
Aggregations