use of herddb.core.AbstractTableManager in project herddb by diennea.
the class SQLPlanner method buildUpdateStatement.
private ExecutionPlan buildUpdateStatement(String defaultTableSpace, Update s, boolean returnValues) throws StatementExecutionException {
if (s.getTables().size() != 1) {
throw new StatementExecutionException("unsupported multi-table update " + s);
}
net.sf.jsqlparser.schema.Table fromTable = s.getTables().get(0);
String tableSpace = fromTable.getSchemaName();
String tableName = fromTable.getName();
if (tableSpace == null) {
tableSpace = defaultTableSpace;
}
TableSpaceManager tableSpaceManager = manager.getTableSpaceManager(tableSpace);
if (tableSpaceManager == null) {
throw new StatementExecutionException("no such tablespace " + tableSpace + " here at " + manager.getNodeId());
}
AbstractTableManager tableManager = tableSpaceManager.getTableManager(tableName);
if (tableManager == null) {
throw new StatementExecutionException("no such table " + tableName + " in tablespace " + tableSpace);
}
Table table = tableManager.getTable();
for (net.sf.jsqlparser.schema.Column c : s.getColumns()) {
Column column = table.getColumn(c.getColumnName());
if (column == null) {
throw new StatementExecutionException("no such column " + c.getColumnName() + " in table " + tableName + " in tablespace " + tableSpace);
}
if (table.isPrimaryKeyColumn(c.getColumnName())) {
throw new StatementExecutionException("updates of fields on the PK (" + Arrays.toString(table.primaryKey) + ") are not supported. Please perform a DELETE and than an INSERT");
}
}
List<CompiledSQLExpression> compiledSQLExpressions = new ArrayList<>();
for (Expression e : s.getExpressions()) {
compiledSQLExpressions.add(SQLExpressionCompiler.compileExpression(null, e));
}
RecordFunction function = new SQLRecordFunction(table, s.getColumns(), compiledSQLExpressions);
// Perform a scan and then update each row
SQLRecordPredicate where = s.getWhere() != null ? new SQLRecordPredicate(table, table.name, s.getWhere()) : null;
if (where != null) {
Expression expressionWhere = s.getWhere();
discoverIndexOperations(expressionWhere, table, table.name, where, tableSpaceManager);
}
DMLStatement st = new UpdateStatement(tableSpace, tableName, null, function, where).setReturnValues(returnValues);
return ExecutionPlan.simple(st);
}
use of herddb.core.AbstractTableManager in project herddb by diennea.
the class SQLPlanner method buildDeleteStatement.
private ExecutionPlan buildDeleteStatement(String defaultTableSpace, Delete s, boolean returnValues) throws StatementExecutionException {
net.sf.jsqlparser.schema.Table fromTable = s.getTable();
String tableSpace = fromTable.getSchemaName();
String tableName = fromTable.getName();
if (tableSpace == null) {
tableSpace = defaultTableSpace;
}
TableSpaceManager tableSpaceManager = manager.getTableSpaceManager(tableSpace);
if (tableSpaceManager == null) {
throw new StatementExecutionException("no such tablespace " + tableSpace + " here at " + manager.getNodeId());
}
AbstractTableManager tableManager = tableSpaceManager.getTableManager(tableName);
if (tableManager == null) {
throw new StatementExecutionException("no such table " + tableName + " in tablespace " + tableSpace);
}
Table table = tableManager.getTable();
// Perform a scan and then delete each row
SQLRecordPredicate where = s.getWhere() != null ? new SQLRecordPredicate(table, table.name, s.getWhere()) : null;
if (where != null) {
Expression expressionWhere = s.getWhere();
discoverIndexOperations(expressionWhere, table, table.name, where, tableSpaceManager);
}
DMLStatement st = new DeleteStatement(tableSpace, tableName, null, where).setReturnValues(returnValues);
return ExecutionPlan.simple(st);
}
use of herddb.core.AbstractTableManager in project herddb by diennea.
the class SystablestatsTableManager method buildVirtualRecordList.
@Override
protected Iterable<Record> buildVirtualRecordList(Transaction transaction) {
List<Table> tables = tableSpaceManager.getAllVisibleTables(transaction);
List<Record> result = new ArrayList<>();
for (Table r : tables) {
AbstractTableManager tableManager = tableSpaceManager.getTableManager(r.name);
if (tableManager != null && !tableManager.isSystemTable()) {
TableManagerStats stats = tableManager.getStats();
result.add(RecordSerializer.makeRecord(table, "tablespace", r.tablespace, "table_name", r.name, "systemtable", r.name.startsWith("sys") ? "true" : "false", "tablesize", stats.getTablesize(), "loadedpages", stats.getLoadedpages(), "loadedpagescount", stats.getLoadedPagesCount(), "unloadedpagescount", stats.getUnloadedPagesCount(), "dirtypages", stats.getDirtypages(), "dirtyrecords", stats.getDirtyrecords(), "maxlogicalpagesize", stats.getMaxLogicalPageSize(), "keysmemory", stats.getKeysUsedMemory(), "buffersmemory", stats.getBuffersUsedMemory()));
}
}
return result;
}
use of herddb.core.AbstractTableManager in project herddb by diennea.
the class TableDataChecksum method createChecksum.
public static TableChecksum createChecksum(DBManager manager, TranslatedQuery query, TableSpaceManager tableSpaceManager, String tableSpace, String tableName) throws DataScannerException {
AbstractTableManager tablemanager = tableSpaceManager.getTableManager(tableName);
String nodeID = tableSpaceManager.getDbmanager().getNodeId();
TranslatedQuery translated = query;
final Table table = manager.getTableSpaceManager(tableSpace).getTableManager(tableName).getTable();
// Number of records
long nrecords = 0;
// For example, in leader node we may not know the query
if (translated == null) {
String columns = formatColumns(table);
/*
scan = true
allowCache = false
returnValues = false
maxRows = -1
*/
translated = manager.getPlanner().translate(tableSpace, "SELECT " + columns + " FROM " + tableName + " order by " + formatPrimaryKeys(table), Collections.emptyList(), true, false, false, -1);
}
ScanStatement statement = translated.plan.mainStatement.unwrap(ScanStatement.class);
statement.setAllowExecutionFromFollower(true);
LOGGER.log(Level.INFO, "creating checksum for table {0}.{1} on node {2}", new Object[] { tableSpace, tableName, nodeID });
try (DataScanner scan = manager.scan(statement, translated.context, TransactionContext.NO_TRANSACTION)) {
StreamingXXHash64 hash64 = FACTORY.newStreamingHash64(SEED);
long _start = System.currentTimeMillis();
while (scan.hasNext()) {
nrecords++;
DataAccessor data = scan.next();
data.forEach((String key, Object value) -> {
int type = table.getColumn(key).type;
byte[] serialize = RecordSerializer.serialize(value, type);
/*
Update need three parameters
update(byte[]buff, int off, int len)
buff is the input data
off is the start offset in buff
len is the number of bytes to hash
*/
if (serialize != null) {
hash64.update(serialize, 0, serialize.length);
}
});
}
LOGGER.log(Level.INFO, "Number of processed records for table {0}.{1} on node {2} = {3} ", new Object[] { tableSpace, tableName, nodeID, nrecords });
long _stop = System.currentTimeMillis();
long nextAutoIncrementValue = tablemanager.getNextPrimaryKeyValue();
long scanduration = (_stop - _start);
LOGGER.log(Level.INFO, "Creating checksum for table {0}.{1} on node {2} finished in {3} ms", new Object[] { tableSpace, tableName, nodeID, scanduration });
SystemInstrumentation.instrumentationPoint("createChecksum", tableSpace, tableName);
return new TableChecksum(tableSpace, tableName, hash64.getValue(), HASH_TYPE, nrecords, nextAutoIncrementValue, translated.context.query, scanduration);
} catch (DataScannerException ex) {
LOGGER.log(Level.SEVERE, "Scan failled", ex);
throw new DataScannerException(ex);
}
}
use of herddb.core.AbstractTableManager in project herddb by diennea.
the class EdgeCasesFollowerTest method followerCatchupAfterRestartAfterLongtimeNoDataPresent.
@Test
public void followerCatchupAfterRestartAfterLongtimeNoDataPresent() throws Exception {
ServerConfiguration serverconfig_1 = newServerConfigurationWithAutoPort(folder.newFolder().toPath());
serverconfig_1.set(ServerConfiguration.PROPERTY_NODEID, "server1");
serverconfig_1.set(ServerConfiguration.PROPERTY_MODE, ServerConfiguration.PROPERTY_MODE_CLUSTER);
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_ADDRESS, testEnv.getAddress());
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_PATH, testEnv.getPath());
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_SESSIONTIMEOUT, testEnv.getTimeout());
serverconfig_1.set(ServerConfiguration.PROPERTY_ENFORCE_LEADERSHIP, false);
serverconfig_1.set(ServerConfiguration.PROPERTY_BOOKKEEPER_LEDGERS_RETENTION_PERIOD, 1);
serverconfig_1.set(ServerConfiguration.PROPERTY_CHECKPOINT_PERIOD, 0);
// disabled
serverconfig_1.set(ServerConfiguration.PROPERTY_BOOKKEEPER_MAX_IDLE_TIME, 0);
ServerConfiguration serverconfig_2 = serverconfig_1.copy().set(ServerConfiguration.PROPERTY_NODEID, "server2").set(ServerConfiguration.PROPERTY_BASEDIR, folder.newFolder().toPath().toAbsolutePath());
Table table = Table.builder().name("t1").column("c", ColumnTypes.INTEGER).primaryKey("c").build();
try (Server server_1 = new Server(serverconfig_1)) {
server_1.start();
server_1.waitForStandaloneBoot();
server_1.getManager().executeStatement(new CreateTableStatement(table), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
server_1.getManager().executeUpdate(new InsertStatement(TableSpace.DEFAULT, "t1", RecordSerializer.makeRecord(table, "c", 1)), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
server_1.getManager().executeUpdate(new InsertStatement(TableSpace.DEFAULT, "t1", RecordSerializer.makeRecord(table, "c", 2)), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
server_1.getManager().executeUpdate(new InsertStatement(TableSpace.DEFAULT, "t1", RecordSerializer.makeRecord(table, "c", 3)), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
server_1.getManager().executeUpdate(new InsertStatement(TableSpace.DEFAULT, "t1", RecordSerializer.makeRecord(table, "c", 4)), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
server_1.getManager().executeStatement(new AlterTableSpaceStatement(TableSpace.DEFAULT, new HashSet<>(Arrays.asList("server1", "server2")), "server1", 1, 0), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
}
String tableSpaceUUID;
try (Server server_1 = new Server(serverconfig_1)) {
server_1.start();
server_1.waitForStandaloneBoot();
{
ZookeeperMetadataStorageManager man = (ZookeeperMetadataStorageManager) server_1.getMetadataStorageManager();
tableSpaceUUID = man.describeTableSpace(TableSpace.DEFAULT).uuid;
LedgersInfo ledgersList = ZookeeperMetadataStorageManager.readActualLedgersListFromZookeeper(man.getZooKeeper(), testEnv.getPath() + "/ledgers", tableSpaceUUID);
assertEquals(2, ledgersList.getActiveLedgers().size());
}
server_1.getManager().executeUpdate(new InsertStatement(TableSpace.DEFAULT, "t1", RecordSerializer.makeRecord(table, "c", 5)), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
server_1.getManager().checkpoint();
}
try (Server server_1 = new Server(serverconfig_1)) {
server_1.start();
server_1.waitForStandaloneBoot();
{
ZookeeperMetadataStorageManager man = (ZookeeperMetadataStorageManager) server_1.getMetadataStorageManager();
LedgersInfo ledgersList = ZookeeperMetadataStorageManager.readActualLedgersListFromZookeeper(man.getZooKeeper(), testEnv.getPath() + "/ledgers", tableSpaceUUID);
assertEquals(2, ledgersList.getActiveLedgers().size());
}
server_1.getManager().executeUpdate(new InsertStatement(TableSpace.DEFAULT, "t1", RecordSerializer.makeRecord(table, "c", 6)), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
{
ZookeeperMetadataStorageManager man = (ZookeeperMetadataStorageManager) server_1.getMetadataStorageManager();
LedgersInfo ledgersList = ZookeeperMetadataStorageManager.readActualLedgersListFromZookeeper(man.getZooKeeper(), testEnv.getPath() + "/ledgers", tableSpaceUUID);
assertEquals(2, ledgersList.getActiveLedgers().size());
}
server_1.getManager().checkpoint();
}
try (Server server_1 = new Server(serverconfig_1)) {
server_1.start();
server_1.waitForStandaloneBoot();
{
ZookeeperMetadataStorageManager man = (ZookeeperMetadataStorageManager) server_1.getMetadataStorageManager();
LedgersInfo ledgersList = ZookeeperMetadataStorageManager.readActualLedgersListFromZookeeper(man.getZooKeeper(), testEnv.getPath() + "/ledgers", tableSpaceUUID);
System.out.println("current ledgersList: " + ledgersList);
assertEquals(2, ledgersList.getActiveLedgers().size());
assertTrue(!ledgersList.getActiveLedgers().contains(ledgersList.getFirstLedger()));
}
// data will be downloaded from the other server
try (Server server_2 = new Server(serverconfig_2)) {
server_2.start();
assertTrue(server_2.getManager().waitForTablespace(TableSpace.DEFAULT, 60000, false));
// wait for data to arrive on server_2
for (int i = 0; i < 100; i++) {
GetResult found = server_2.getManager().get(new GetStatement(TableSpace.DEFAULT, "t1", Bytes.from_int(1), null, false), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
if (found.found()) {
break;
}
Thread.sleep(100);
}
assertTrue(server_2.getManager().get(new GetStatement(TableSpace.DEFAULT, "t1", Bytes.from_int(1), null, false), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION).found());
}
// write more data, server_2 is down, it will need to catch up
server_1.getManager().executeUpdate(new InsertStatement(TableSpace.DEFAULT, "t1", RecordSerializer.makeRecord(table, "c", 8)), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
server_1.getManager().executeUpdate(new InsertStatement(TableSpace.DEFAULT, "t1", RecordSerializer.makeRecord(table, "c", 9)), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
server_1.getManager().executeUpdate(new InsertStatement(TableSpace.DEFAULT, "t1", RecordSerializer.makeRecord(table, "c", 10)), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
// truncate Bookkeeper logs, we want the follower to download again the dump
BookkeeperCommitLog bklog = (BookkeeperCommitLog) server_1.getManager().getTableSpaceManager(TableSpace.DEFAULT).getLog();
bklog.rollNewLedger();
Thread.sleep(1000);
server_1.getManager().checkpoint();
{
ZookeeperMetadataStorageManager man = (ZookeeperMetadataStorageManager) server_1.getMetadataStorageManager();
LedgersInfo ledgersList = ZookeeperMetadataStorageManager.readActualLedgersListFromZookeeper(man.getZooKeeper(), testEnv.getPath() + "/ledgers", tableSpaceUUID);
System.out.println("current ledgersList: " + ledgersList);
assertEquals(1, ledgersList.getActiveLedgers().size());
assertTrue(!ledgersList.getActiveLedgers().contains(ledgersList.getFirstLedger()));
}
Table table1 = server_1.getManager().getTableSpaceManager(TableSpace.DEFAULT).getTableManager("t1").getTable();
try (DataScanner scan = server_1.getManager().scan(new ScanStatement(TableSpace.DEFAULT, table1, null), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION)) {
List<DataAccessor> all = scan.consume();
all.forEach(d -> {
System.out.println("RECORD ON SERVER 1: " + d.toMap());
});
}
// data will be downloaded again, but it has to be merged with stale data
try (Server server_2 = new Server(serverconfig_2)) {
server_2.start();
assertTrue(server_2.getManager().waitForTablespace(TableSpace.DEFAULT, 60000, false));
AbstractTableManager tableManager = server_2.getManager().getTableSpaceManager(TableSpace.DEFAULT).getTableManager("t1");
Table table2 = tableManager.getTable();
// wait for data to arrive on server_2
for (int i = 0; i < 100; i++) {
GetResult found = server_2.getManager().get(new GetStatement(TableSpace.DEFAULT, "t1", Bytes.from_int(10), null, false), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
if (found.found()) {
break;
}
try (DataScanner scan = server_2.getManager().scan(new ScanStatement(TableSpace.DEFAULT, table2, null), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION)) {
List<DataAccessor> all = scan.consume();
System.out.println("WAIT #" + i);
all.forEach(d -> {
System.out.println("RECORD ON SERVER 2: " + d.toMap());
});
}
Thread.sleep(1000);
}
assertTrue(server_2.getManager().get(new GetStatement(TableSpace.DEFAULT, "t1", Bytes.from_int(10), null, false), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION).found());
}
}
}
Aggregations