use of org.h2.mvstore.db.TransactionStore.Transaction in project h2database by h2database.
the class WebApp method getResult.
private String getResult(Connection conn, int id, String sql, boolean allowEdit, boolean forceEdit) {
try {
sql = sql.trim();
StringBuilder buff = new StringBuilder();
String sqlUpper = StringUtils.toUpperEnglish(sql);
if (sqlUpper.contains("CREATE") || sqlUpper.contains("DROP") || sqlUpper.contains("ALTER") || sqlUpper.contains("RUNSCRIPT")) {
String sessionId = attributes.getProperty("jsessionid");
buff.append("<script type=\"text/javascript\">parent['h2menu'].location='tables.do?jsessionid=").append(sessionId).append("';</script>");
}
Statement stat;
DbContents contents = session.getContents();
if (forceEdit || (allowEdit && contents.isH2())) {
stat = conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
} else {
stat = conn.createStatement();
}
ResultSet rs;
long time = System.currentTimeMillis();
boolean metadata = false;
int generatedKeys = Statement.NO_GENERATED_KEYS;
boolean edit = false;
boolean list = false;
if (isBuiltIn(sql, "@autocommit_true")) {
conn.setAutoCommit(true);
return "${text.result.autoCommitOn}";
} else if (isBuiltIn(sql, "@autocommit_false")) {
conn.setAutoCommit(false);
return "${text.result.autoCommitOff}";
} else if (isBuiltIn(sql, "@cancel")) {
stat = session.executingStatement;
if (stat != null) {
stat.cancel();
buff.append("${text.result.statementWasCanceled}");
} else {
buff.append("${text.result.noRunningStatement}");
}
return buff.toString();
} else if (isBuiltIn(sql, "@edit")) {
edit = true;
sql = sql.substring("@edit".length()).trim();
session.put("resultSetSQL", sql);
}
if (isBuiltIn(sql, "@list")) {
list = true;
sql = sql.substring("@list".length()).trim();
}
if (isBuiltIn(sql, "@meta")) {
metadata = true;
sql = sql.substring("@meta".length()).trim();
}
if (isBuiltIn(sql, "@generated")) {
generatedKeys = Statement.RETURN_GENERATED_KEYS;
sql = sql.substring("@generated".length()).trim();
} else if (isBuiltIn(sql, "@history")) {
buff.append(getCommandHistoryString());
return buff.toString();
} else if (isBuiltIn(sql, "@loop")) {
sql = sql.substring("@loop".length()).trim();
int idx = sql.indexOf(' ');
int count = Integer.decode(sql.substring(0, idx));
sql = sql.substring(idx).trim();
return executeLoop(conn, count, sql);
} else if (isBuiltIn(sql, "@maxrows")) {
int maxrows = (int) Double.parseDouble(sql.substring("@maxrows".length()).trim());
session.put("maxrows", "" + maxrows);
return "${text.result.maxrowsSet}";
} else if (isBuiltIn(sql, "@parameter_meta")) {
sql = sql.substring("@parameter_meta".length()).trim();
PreparedStatement prep = conn.prepareStatement(sql);
buff.append(getParameterResultSet(prep.getParameterMetaData()));
return buff.toString();
} else if (isBuiltIn(sql, "@password_hash")) {
sql = sql.substring("@password_hash".length()).trim();
String[] p = split(sql);
return StringUtils.convertBytesToHex(SHA256.getKeyPasswordHash(p[0], p[1].toCharArray()));
} else if (isBuiltIn(sql, "@prof_start")) {
if (profiler != null) {
profiler.stopCollecting();
}
profiler = new Profiler();
profiler.startCollecting();
return "Ok";
} else if (isBuiltIn(sql, "@sleep")) {
String s = sql.substring("@sleep".length()).trim();
int sleep = 1;
if (s.length() > 0) {
sleep = Integer.parseInt(s);
}
Thread.sleep(sleep * 1000);
return "Ok";
} else if (isBuiltIn(sql, "@transaction_isolation")) {
String s = sql.substring("@transaction_isolation".length()).trim();
if (s.length() > 0) {
int level = Integer.parseInt(s);
conn.setTransactionIsolation(level);
}
buff.append("Transaction Isolation: ").append(conn.getTransactionIsolation()).append("<br />");
buff.append(Connection.TRANSACTION_READ_UNCOMMITTED).append(": read_uncommitted<br />");
buff.append(Connection.TRANSACTION_READ_COMMITTED).append(": read_committed<br />");
buff.append(Connection.TRANSACTION_REPEATABLE_READ).append(": repeatable_read<br />");
buff.append(Connection.TRANSACTION_SERIALIZABLE).append(": serializable");
}
if (sql.startsWith("@")) {
rs = getMetaResultSet(conn, sql);
if (rs == null) {
buff.append("?: ").append(sql);
return buff.toString();
}
} else {
int maxrows = getMaxrows();
stat.setMaxRows(maxrows);
session.executingStatement = stat;
boolean isResultSet = stat.execute(sql, generatedKeys);
session.addCommand(sql);
if (generatedKeys == Statement.RETURN_GENERATED_KEYS) {
rs = null;
rs = stat.getGeneratedKeys();
} else {
if (!isResultSet) {
buff.append("${text.result.updateCount}: ").append(stat.getUpdateCount());
time = System.currentTimeMillis() - time;
buff.append("<br />(").append(time).append(" ms)");
stat.close();
return buff.toString();
}
rs = stat.getResultSet();
}
}
time = System.currentTimeMillis() - time;
buff.append(getResultSet(sql, rs, metadata, list, edit, time, allowEdit));
// }
if (!edit) {
stat.close();
}
return buff.toString();
} catch (Throwable e) {
// throwable: including OutOfMemoryError and so on
return getStackTrace(id, e, session.getContents().isH2());
} finally {
session.executingStatement = null;
}
}
use of org.h2.mvstore.db.TransactionStore.Transaction in project h2database by h2database.
the class Recover method dumpPageLogStream.
private void dumpPageLogStream(PrintWriter writer, int logKey, int logFirstTrunkPage, int logFirstDataPage, long pageCount) throws IOException {
Data s = Data.create(this, pageSize);
DataReader in = new DataReader(new PageInputStream(writer, this, store, logKey, logFirstTrunkPage, logFirstDataPage, pageSize));
writer.println("---- Transaction log ----");
CompressLZF compress = new CompressLZF();
while (true) {
int x = in.readByte();
if (x < 0) {
break;
}
if (x == PageLog.NOOP) {
// ignore
} else if (x == PageLog.UNDO) {
int pageId = in.readVarInt();
int size = in.readVarInt();
byte[] data = new byte[pageSize];
if (size == 0) {
in.readFully(data, pageSize);
} else if (size == 1) {
// empty
} else {
byte[] compressBuffer = new byte[size];
in.readFully(compressBuffer, size);
try {
compress.expand(compressBuffer, 0, size, data, 0, pageSize);
} catch (ArrayIndexOutOfBoundsException e) {
throw DbException.convertToIOException(e);
}
}
String typeName = "";
int type = data[0];
boolean last = (type & Page.FLAG_LAST) != 0;
type &= ~Page.FLAG_LAST;
switch(type) {
case Page.TYPE_EMPTY:
typeName = "empty";
break;
case Page.TYPE_DATA_LEAF:
typeName = "data leaf " + (last ? "(last)" : "");
break;
case Page.TYPE_DATA_NODE:
typeName = "data node " + (last ? "(last)" : "");
break;
case Page.TYPE_DATA_OVERFLOW:
typeName = "data overflow " + (last ? "(last)" : "");
break;
case Page.TYPE_BTREE_LEAF:
typeName = "b-tree leaf " + (last ? "(last)" : "");
break;
case Page.TYPE_BTREE_NODE:
typeName = "b-tree node " + (last ? "(last)" : "");
break;
case Page.TYPE_FREE_LIST:
typeName = "free list " + (last ? "(last)" : "");
break;
case Page.TYPE_STREAM_TRUNK:
typeName = "log trunk";
break;
case Page.TYPE_STREAM_DATA:
typeName = "log data";
break;
default:
typeName = "ERROR: unknown type " + type;
break;
}
writer.println("-- undo page " + pageId + " " + typeName);
if (trace) {
Data d = Data.create(null, data);
dumpPage(writer, d, pageId, pageCount);
}
} else if (x == PageLog.ADD) {
int sessionId = in.readVarInt();
setStorage(in.readVarInt());
Row row = PageLog.readRow(RowFactory.DEFAULT, in, s);
writer.println("-- session " + sessionId + " table " + storageId + " + " + row.toString());
if (transactionLog) {
if (storageId == 0 && row.getColumnCount() >= 4) {
int tableId = (int) row.getKey();
String sql = row.getValue(3).getString();
String name = extractTableOrViewName(sql);
if (row.getValue(2).getInt() == DbObject.TABLE_OR_VIEW) {
tableMap.put(tableId, name);
}
writer.println(sql + ";");
} else {
String tableName = tableMap.get(storageId);
if (tableName != null) {
StatementBuilder buff = new StatementBuilder();
buff.append("INSERT INTO ").append(tableName).append(" VALUES(");
for (int i = 0; i < row.getColumnCount(); i++) {
buff.appendExceptFirst(", ");
buff.append(row.getValue(i).getSQL());
}
buff.append(");");
writer.println(buff.toString());
}
}
}
} else if (x == PageLog.REMOVE) {
int sessionId = in.readVarInt();
setStorage(in.readVarInt());
long key = in.readVarLong();
writer.println("-- session " + sessionId + " table " + storageId + " - " + key);
if (transactionLog) {
if (storageId == 0) {
int tableId = (int) key;
String tableName = tableMap.get(tableId);
if (tableName != null) {
writer.println("DROP TABLE IF EXISTS " + tableName + ";");
}
} else {
String tableName = tableMap.get(storageId);
if (tableName != null) {
String sql = "DELETE FROM " + tableName + " WHERE _ROWID_ = " + key + ";";
writer.println(sql);
}
}
}
} else if (x == PageLog.TRUNCATE) {
int sessionId = in.readVarInt();
setStorage(in.readVarInt());
writer.println("-- session " + sessionId + " table " + storageId + " truncate");
if (transactionLog) {
writer.println("TRUNCATE TABLE " + storageId);
}
} else if (x == PageLog.COMMIT) {
int sessionId = in.readVarInt();
writer.println("-- commit " + sessionId);
} else if (x == PageLog.ROLLBACK) {
int sessionId = in.readVarInt();
writer.println("-- rollback " + sessionId);
} else if (x == PageLog.PREPARE_COMMIT) {
int sessionId = in.readVarInt();
String transaction = in.readString();
writer.println("-- prepare commit " + sessionId + " " + transaction);
} else if (x == PageLog.NOOP) {
// nothing to do
} else if (x == PageLog.CHECKPOINT) {
writer.println("-- checkpoint");
} else if (x == PageLog.FREE_LOG) {
int size = in.readVarInt();
StringBuilder buff = new StringBuilder("-- free");
for (int i = 0; i < size; i++) {
buff.append(' ').append(in.readVarInt());
}
writer.println(buff);
} else {
writer.println("-- ERROR: unknown operation " + x);
break;
}
}
}
use of org.h2.mvstore.db.TransactionStore.Transaction in project h2database by h2database.
the class PageLog method recover.
/**
* Run one recovery stage. There are three recovery stages: 0: only the undo
* steps are run (restoring the state before the last checkpoint). 1: the
* pages that are used by the transaction log are allocated. 2: the
* committed operations are re-applied.
*
* @param stage the recovery stage
* @return whether the transaction log was empty
*/
boolean recover(int stage) {
if (trace.isDebugEnabled()) {
trace.debug("log recover stage: " + stage);
}
if (stage == RECOVERY_STAGE_ALLOCATE) {
PageInputStream in = new PageInputStream(store, logKey, firstTrunkPage, firstDataPage);
usedLogPages = in.allocateAllPages();
in.close();
return true;
}
PageInputStream pageIn = new PageInputStream(store, logKey, firstTrunkPage, firstDataPage);
DataReader in = new DataReader(pageIn);
int logId = 0;
Data data = store.createData();
boolean isEmpty = true;
try {
int pos = 0;
while (true) {
int x = in.readByte();
if (x < 0) {
break;
}
pos++;
isEmpty = false;
if (x == UNDO) {
int pageId = in.readVarInt();
int size = in.readVarInt();
if (size == 0) {
in.readFully(data.getBytes(), store.getPageSize());
} else if (size == 1) {
// empty
Arrays.fill(data.getBytes(), 0, store.getPageSize(), (byte) 0);
} else {
in.readFully(compressBuffer, size);
try {
compress.expand(compressBuffer, 0, size, data.getBytes(), 0, store.getPageSize());
} catch (ArrayIndexOutOfBoundsException e) {
DbException.convertToIOException(e);
}
}
if (stage == RECOVERY_STAGE_UNDO) {
if (!undo.get(pageId)) {
if (trace.isDebugEnabled()) {
trace.debug("log undo {0}", pageId);
}
store.writePage(pageId, data);
undo.set(pageId);
undoAll.set(pageId);
} else {
if (trace.isDebugEnabled()) {
trace.debug("log undo skip {0}", pageId);
}
}
}
} else if (x == ADD) {
int sessionId = in.readVarInt();
int tableId = in.readVarInt();
Row row = readRow(store.getDatabase().getRowFactory(), in, data);
if (stage == RECOVERY_STAGE_UNDO) {
store.allocateIfIndexRoot(pos, tableId, row);
} else if (stage == RECOVERY_STAGE_REDO) {
if (isSessionCommitted(sessionId, logId, pos)) {
if (trace.isDebugEnabled()) {
trace.debug("log redo + table: " + tableId + " s: " + sessionId + " " + row);
}
store.redo(tableId, row, true);
} else {
if (trace.isDebugEnabled()) {
trace.debug("log ignore s: " + sessionId + " + table: " + tableId + " " + row);
}
}
}
} else if (x == REMOVE) {
int sessionId = in.readVarInt();
int tableId = in.readVarInt();
long key = in.readVarLong();
if (stage == RECOVERY_STAGE_REDO) {
if (isSessionCommitted(sessionId, logId, pos)) {
if (trace.isDebugEnabled()) {
trace.debug("log redo - table: " + tableId + " s:" + sessionId + " key: " + key);
}
store.redoDelete(tableId, key);
} else {
if (trace.isDebugEnabled()) {
trace.debug("log ignore s: " + sessionId + " - table: " + tableId + " " + key);
}
}
}
} else if (x == TRUNCATE) {
int sessionId = in.readVarInt();
int tableId = in.readVarInt();
if (stage == RECOVERY_STAGE_REDO) {
if (isSessionCommitted(sessionId, logId, pos)) {
if (trace.isDebugEnabled()) {
trace.debug("log redo truncate table: " + tableId);
}
store.redoTruncate(tableId);
} else {
if (trace.isDebugEnabled()) {
trace.debug("log ignore s: " + sessionId + " truncate table: " + tableId);
}
}
}
} else if (x == PREPARE_COMMIT) {
int sessionId = in.readVarInt();
String transaction = in.readString();
if (trace.isDebugEnabled()) {
trace.debug("log prepare commit " + sessionId + " " + transaction + " pos: " + pos);
}
if (stage == RECOVERY_STAGE_UNDO) {
int page = pageIn.getDataPage();
setPrepareCommit(sessionId, page, transaction);
}
} else if (x == ROLLBACK) {
int sessionId = in.readVarInt();
if (trace.isDebugEnabled()) {
trace.debug("log rollback " + sessionId + " pos: " + pos);
}
// ignore - this entry is just informational
} else if (x == COMMIT) {
int sessionId = in.readVarInt();
if (trace.isDebugEnabled()) {
trace.debug("log commit " + sessionId + " pos: " + pos);
}
if (stage == RECOVERY_STAGE_UNDO) {
setLastCommitForSession(sessionId, logId, pos);
}
} else if (x == NOOP) {
// nothing to do
} else if (x == CHECKPOINT) {
logId++;
} else if (x == FREE_LOG) {
int count = in.readVarInt();
for (int i = 0; i < count; i++) {
int pageId = in.readVarInt();
if (stage == RECOVERY_STAGE_REDO) {
if (!usedLogPages.get(pageId)) {
store.free(pageId, false);
}
}
}
} else {
if (trace.isDebugEnabled()) {
trace.debug("log end");
break;
}
}
}
} catch (DbException e) {
if (e.getErrorCode() == ErrorCode.FILE_CORRUPTED_1) {
trace.debug("log recovery stopped");
} else {
throw e;
}
} catch (IOException e) {
trace.debug("log recovery completed");
}
undo = new BitSet();
if (stage == RECOVERY_STAGE_REDO) {
usedLogPages = null;
}
return isEmpty;
}
use of org.h2.mvstore.db.TransactionStore.Transaction in project h2database by h2database.
the class PageStore method recover.
/**
* Run recovery.
*
* @return whether the transaction log was empty
*/
private boolean recover() {
trace.debug("log recover");
recoveryRunning = true;
boolean isEmpty = true;
isEmpty &= log.recover(PageLog.RECOVERY_STAGE_UNDO);
if (reservedPages != null) {
for (int r : reservedPages.keySet()) {
if (trace.isDebugEnabled()) {
trace.debug("reserve " + r);
}
allocatePage(r);
}
}
isEmpty &= log.recover(PageLog.RECOVERY_STAGE_ALLOCATE);
openMetaIndex();
readMetaData();
isEmpty &= log.recover(PageLog.RECOVERY_STAGE_REDO);
boolean setReadOnly = false;
if (!database.isReadOnly()) {
if (log.getInDoubtTransactions().isEmpty()) {
log.recoverEnd();
int firstUncommittedSection = getFirstUncommittedSection();
log.removeUntil(firstUncommittedSection);
} else {
setReadOnly = true;
}
}
PageDataIndex systemTable = (PageDataIndex) metaObjects.get(0);
isNew = systemTable == null;
for (PageIndex index : metaObjects.values()) {
if (index.getTable().isTemporary()) {
// temporary indexes are removed after opening
if (tempObjects == null) {
tempObjects = new HashMap<>();
}
tempObjects.put(index.getId(), index);
} else {
index.close(pageStoreSession);
}
}
allocatePage(PAGE_ID_META_ROOT);
writeIndexRowCounts();
recoveryRunning = false;
reservedPages = null;
writeBack();
// clear the cache because it contains pages with closed indexes
cache.clear();
freeLists.clear();
metaObjects.clear();
metaObjects.put(-1, metaIndex);
if (setReadOnly) {
database.setReadOnly(true);
}
trace.debug("log recover done");
return isEmpty;
}
use of org.h2.mvstore.db.TransactionStore.Transaction in project h2database by h2database.
the class Parser method parseBegin.
private TransactionCommand parseBegin() {
TransactionCommand command;
if (!readIf("WORK")) {
readIf("TRANSACTION");
}
command = new TransactionCommand(session, CommandInterface.BEGIN);
return command;
}
Aggregations