use of org.h2.command.dml.Update in project h2database by h2database.
the class MVRTreeMap method remove.
@Override
protected synchronized Object remove(Page p, long writeVersion, Object key) {
Object result = null;
if (p.isLeaf()) {
for (int i = 0; i < p.getKeyCount(); i++) {
if (keyType.equals(p.getKey(i), key)) {
result = p.getValue(i);
p.remove(i);
break;
}
}
return result;
}
for (int i = 0; i < p.getKeyCount(); i++) {
if (contains(p, i, key)) {
Page cOld = p.getChildPage(i);
// this will mark the old page as deleted
// so we need to update the parent in any case
// (otherwise the old page might be deleted again)
Page c = cOld.copy(writeVersion);
long oldSize = c.getTotalCount();
result = remove(c, writeVersion, key);
p.setChild(i, c);
if (oldSize == c.getTotalCount()) {
continue;
}
if (c.getTotalCount() == 0) {
// this child was deleted
p.remove(i);
if (p.getKeyCount() == 0) {
c.removePage();
}
break;
}
Object oldBounds = p.getKey(i);
if (!keyType.isInside(key, oldBounds)) {
p.setKey(i, getBounds(c));
}
break;
}
}
return result;
}
use of org.h2.command.dml.Update in project h2database by h2database.
the class Page method write.
/**
* Store the page and update the position.
*
* @param chunk the chunk
* @param buff the target buffer
* @return the position of the buffer just after the type
*/
private int write(Chunk chunk, WriteBuffer buff) {
int start = buff.position();
int len = keys.length;
int type = children != null ? DataUtils.PAGE_TYPE_NODE : DataUtils.PAGE_TYPE_LEAF;
buff.putInt(0).putShort((byte) 0).putVarInt(map.getId()).putVarInt(len);
int typePos = buff.position();
buff.put((byte) type);
if (type == DataUtils.PAGE_TYPE_NODE) {
writeChildren(buff);
for (int i = 0; i <= len; i++) {
buff.putVarLong(children[i].count);
}
}
int compressStart = buff.position();
map.getKeyType().write(buff, keys, len, true);
if (type == DataUtils.PAGE_TYPE_LEAF) {
map.getValueType().write(buff, values, len, false);
}
MVStore store = map.getStore();
int expLen = buff.position() - compressStart;
if (expLen > 16) {
int compressionLevel = store.getCompressionLevel();
if (compressionLevel > 0) {
Compressor compressor;
int compressType;
if (compressionLevel == 1) {
compressor = map.getStore().getCompressorFast();
compressType = DataUtils.PAGE_COMPRESSED;
} else {
compressor = map.getStore().getCompressorHigh();
compressType = DataUtils.PAGE_COMPRESSED_HIGH;
}
byte[] exp = new byte[expLen];
buff.position(compressStart).get(exp);
byte[] comp = new byte[expLen * 2];
int compLen = compressor.compress(exp, expLen, comp, 0);
int plus = DataUtils.getVarIntLen(compLen - expLen);
if (compLen + plus < expLen) {
buff.position(typePos).put((byte) (type + compressType));
buff.position(compressStart).putVarInt(expLen - compLen).put(comp, 0, compLen);
}
}
}
int pageLength = buff.position() - start;
int chunkId = chunk.id;
int check = DataUtils.getCheckValue(chunkId) ^ DataUtils.getCheckValue(start) ^ DataUtils.getCheckValue(pageLength);
buff.putInt(start, pageLength).putShort(start + 4, (short) check);
if (pos != 0) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_INTERNAL, "Page already stored");
}
pos = DataUtils.getPagePos(chunkId, start, pageLength, type);
store.cachePage(pos, this, getMemory());
if (type == DataUtils.PAGE_TYPE_NODE) {
// cache again - this will make sure nodes stays in the cache
// for a longer time
store.cachePage(pos, this, getMemory());
}
long max = DataUtils.getPageMaxLength(pos);
chunk.maxLen += max;
chunk.maxLenLive += max;
chunk.pageCount++;
chunk.pageCountLive++;
if (removedInMemory) {
// if the page was removed _before_ the position was assigned, we
// need to mark it removed here, so the fields are updated
// when the next chunk is stored
map.removePage(pos, memory);
}
return typePos + 1;
}
use of org.h2.command.dml.Update in project h2database by h2database.
the class TestReopen method logDb.
private synchronized void logDb(String fileName) {
writeCount++;
if ((writeCount & (testEvery - 1)) != 0) {
return;
}
if (FileUtils.size(fileName) > maxFileSize) {
// System.out.println(fileName + " " + IOUtils.length(fileName));
return;
}
System.out.println("+ write #" + writeCount + " verify #" + verifyCount);
try {
if (fileName.endsWith(Constants.SUFFIX_PAGE_FILE)) {
IOUtils.copyFiles(fileName, testDatabase + Constants.SUFFIX_PAGE_FILE);
} else {
IOUtils.copyFiles(fileName, testDatabase + Constants.SUFFIX_MV_FILE);
}
verifyCount++;
// avoid using the Engine class to avoid deadlocks
Properties p = new Properties();
String userName = getUser();
p.setProperty("user", userName);
p.setProperty("password", getPassword());
String url = "jdbc:h2:" + testDatabase + ";FILE_LOCK=NO;TRACE_LEVEL_FILE=0";
ConnectionInfo ci = new ConnectionInfo(url, p);
Database database = new Database(ci, null);
// close the database
Session session = database.getSystemSession();
session.prepare("script to '" + testDatabase + ".sql'").query(0);
session.prepare("shutdown immediately").update();
database.removeSession(null);
// everything OK - return
return;
} catch (DbException e) {
SQLException e2 = DbException.toSQLException(e);
int errorCode = e2.getErrorCode();
if (errorCode == ErrorCode.WRONG_USER_OR_PASSWORD) {
return;
} else if (errorCode == ErrorCode.FILE_ENCRYPTION_ERROR_1) {
return;
}
e.printStackTrace(System.out);
throw e;
} catch (Exception e) {
// failed
int errorCode = 0;
if (e instanceof SQLException) {
errorCode = ((SQLException) e).getErrorCode();
}
if (errorCode == ErrorCode.WRONG_USER_OR_PASSWORD) {
return;
} else if (errorCode == ErrorCode.FILE_ENCRYPTION_ERROR_1) {
return;
}
e.printStackTrace(System.out);
}
System.out.println("begin ------------------------------ " + writeCount);
try {
Recover.execute(fileName.substring(0, fileName.lastIndexOf('/')), null);
} catch (SQLException e) {
// ignore
}
testDatabase += "X";
try {
if (fileName.endsWith(Constants.SUFFIX_PAGE_FILE)) {
IOUtils.copyFiles(fileName, testDatabase + Constants.SUFFIX_PAGE_FILE);
} else {
IOUtils.copyFiles(fileName, testDatabase + Constants.SUFFIX_MV_FILE);
}
// avoid using the Engine class to avoid deadlocks
Properties p = new Properties();
String url = "jdbc:h2:" + testDatabase + ";FILE_LOCK=NO";
ConnectionInfo ci = new ConnectionInfo(url, p);
Database database = new Database(ci, null);
// close the database
database.removeSession(null);
} catch (Exception e) {
int errorCode = 0;
if (e instanceof DbException) {
e = ((DbException) e).getSQLException();
errorCode = ((SQLException) e).getErrorCode();
}
if (errorCode == ErrorCode.WRONG_USER_OR_PASSWORD) {
return;
} else if (errorCode == ErrorCode.FILE_ENCRYPTION_ERROR_1) {
return;
}
StringBuilder buff = new StringBuilder();
StackTraceElement[] list = e.getStackTrace();
for (int i = 0; i < 10 && i < list.length; i++) {
buff.append(list[i].toString()).append('\n');
}
String s = buff.toString();
if (!knownErrors.contains(s)) {
System.out.println(writeCount + " code: " + errorCode + " " + e.toString());
e.printStackTrace(System.out);
knownErrors.add(s);
} else {
System.out.println(writeCount + " code: " + errorCode);
}
}
}
use of org.h2.command.dml.Update in project h2database by h2database.
the class TestFileLockSerialized method testCheckpointInUpdateRaceCondition.
/**
* If a checkpoint occurs between beforeWriting and checkWritingAllowed then
* the result of checkWritingAllowed is READ_ONLY, which is wrong.
*
* Also, if a checkpoint started before beforeWriting, and ends between
* between beforeWriting and checkWritingAllowed, then the same error
* occurs.
*/
private void testCheckpointInUpdateRaceCondition() throws Exception {
boolean longRun = false;
deleteDb("fileLockSerialized");
String url = "jdbc:h2:" + getBaseDir() + "/fileLockSerialized;FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE";
Connection conn = getConnection(url);
conn.createStatement().execute("create table test(id int)");
conn.createStatement().execute("insert into test values(1)");
for (int i = 0; i < (longRun ? 10000 : 5); i++) {
Thread.sleep(402);
conn.createStatement().execute("update test set id = " + i);
}
conn.close();
deleteDb("fileLockSerialized");
}
use of org.h2.command.dml.Update in project h2database by h2database.
the class TestFileLockSerialized method testCache.
/**
* Caches must be cleared. Session.reconnect only closes the DiskFile (which
* is associated with the cache) if there is one session
*/
private void testCache() throws Exception {
deleteDb("fileLockSerialized");
String urlShared = "jdbc:h2:" + getBaseDir() + "/fileLockSerialized;FILE_LOCK=SERIALIZED";
Connection connShared1 = getConnection(urlShared);
Statement statement1 = connShared1.createStatement();
Connection connShared2 = getConnection(urlShared);
Statement statement2 = connShared2.createStatement();
statement1.execute("create table test1(id int)");
statement1.execute("insert into test1 values(1)");
ResultSet rs = statement1.executeQuery("select id from test1");
rs.close();
rs = statement2.executeQuery("select id from test1");
rs.close();
statement1.execute("update test1 set id=2");
Thread.sleep(500);
rs = statement2.executeQuery("select id from test1");
assertTrue(rs.next());
assertEquals(2, rs.getInt(1));
rs.close();
connShared1.close();
connShared2.close();
deleteDb("fileLockSerialized");
}
Aggregations