use of herddb.sql.SQLRecordKeyFunction in project herddb by diennea.
the class InsertOp method execute.
@Override
public StatementExecutionResult execute(TableSpaceManager tableSpaceManager, TransactionContext transactionContext, StatementEvaluationContext context, boolean lockRequired, boolean forWrite) {
StatementExecutionResult input = this.input.execute(tableSpaceManager, transactionContext, context, true, true);
ScanResult downstreamScanResult = (ScanResult) input;
final Table table = tableSpaceManager.getTableManager(tableName).getTable();
long transactionId = transactionContext.transactionId;
int updateCount = 0;
Bytes key = null;
Bytes newValue = null;
try (DataScanner inputScanner = downstreamScanResult.dataScanner) {
while (inputScanner.hasNext()) {
DataAccessor row = inputScanner.next();
long transactionIdFromScanner = inputScanner.getTransactionId();
if (transactionIdFromScanner > 0 && transactionIdFromScanner != transactionId) {
transactionId = transactionIdFromScanner;
transactionContext = new TransactionContext(transactionId);
}
int index = 0;
List<CompiledSQLExpression> keyValueExpression = new ArrayList<>();
List<String> keyExpressionToColumn = new ArrayList<>();
List<CompiledSQLExpression> valuesExpressions = new ArrayList<>();
List<String> valuesColumns = new ArrayList<>();
for (Column column : table.getColumns()) {
Object value = row.get(index++);
if (value != null) {
ConstantExpression exp = new ConstantExpression(value);
if (table.isPrimaryKeyColumn(column.name)) {
keyExpressionToColumn.add(column.name);
keyValueExpression.add(exp);
}
valuesColumns.add(column.name);
valuesExpressions.add(exp);
}
}
RecordFunction keyfunction;
if (keyValueExpression.isEmpty() && table.auto_increment) {
keyfunction = new AutoIncrementPrimaryKeyRecordFunction();
} else {
if (keyValueExpression.size() != table.primaryKey.length) {
throw new StatementExecutionException("you must set a value for the primary key (expressions=" + keyValueExpression.size() + ")");
}
keyfunction = new SQLRecordKeyFunction(keyExpressionToColumn, keyValueExpression, table);
}
RecordFunction valuesfunction = new SQLRecordFunction(valuesColumns, table, valuesExpressions);
DMLStatement insertStatement = new InsertStatement(tableSpace, tableName, keyfunction, valuesfunction).setReturnValues(returnValues);
DMLStatementExecutionResult _result = (DMLStatementExecutionResult) tableSpaceManager.executeStatement(insertStatement, context, transactionContext);
updateCount += _result.getUpdateCount();
if (_result.transactionId > 0 && _result.transactionId != transactionId) {
transactionId = _result.transactionId;
transactionContext = new TransactionContext(transactionId);
}
key = _result.getKey();
newValue = _result.getNewvalue();
}
if (updateCount > 1 && returnValues) {
if (transactionId > 0) {
// usually the first record will be rolledback with transaction failure
throw new StatementExecutionException("cannot 'return values' on multi-values insert");
} else {
throw new StatementExecutionException("cannot 'return values' on multi-values insert, at least record could have been written because autocommit=true");
}
}
return new DMLStatementExecutionResult(transactionId, updateCount, key, newValue);
} catch (DataScannerException err) {
throw new StatementExecutionException(err);
}
}
use of herddb.sql.SQLRecordKeyFunction in project herddb by diennea.
the class ConcurrentMapKeyToPageIndex method scanner.
@Override
public Stream<Map.Entry<Bytes, Long>> scanner(IndexOperation operation, StatementEvaluationContext context, TableContext tableContext, herddb.core.AbstractIndexManager index) throws DataStorageManagerException {
if (operation instanceof PrimaryIndexSeek) {
PrimaryIndexSeek seek = (PrimaryIndexSeek) operation;
byte[] seekValue = seek.value.computeNewValue(null, context, tableContext);
if (seekValue == null) {
return Stream.empty();
}
Bytes key = Bytes.from_array(seekValue);
Long pageId = map.get(key);
if (pageId == null) {
return Stream.empty();
}
return Stream.of(new AbstractMap.SimpleImmutableEntry<>(key, pageId));
}
// every predicate (WHEREs...) will always be evaluated anyway on every record, in order to guarantee correctness
if (index != null) {
return index.recordSetScanner(operation, context, tableContext, this);
}
if (operation == null) {
Stream<Map.Entry<Bytes, Long>> baseStream = map.entrySet().stream();
return baseStream;
} else if (operation instanceof PrimaryIndexPrefixScan) {
PrimaryIndexPrefixScan scan = (PrimaryIndexPrefixScan) operation;
byte[] prefix;
try {
prefix = scan.value.computeNewValue(null, context, tableContext);
} catch (StatementExecutionException err) {
throw new RuntimeException(err);
}
Predicate<Map.Entry<Bytes, Long>> predicate = (Map.Entry<Bytes, Long> t) -> {
byte[] fullrecordKey = t.getKey().data;
return Bytes.startsWith(fullrecordKey, prefix.length, prefix);
};
Stream<Map.Entry<Bytes, Long>> baseStream = map.entrySet().stream();
return baseStream.filter(predicate);
} else if (operation instanceof PrimaryIndexRangeScan) {
byte[] refminvalue;
PrimaryIndexRangeScan sis = (PrimaryIndexRangeScan) operation;
SQLRecordKeyFunction minKey = sis.minValue;
if (minKey != null) {
refminvalue = minKey.computeNewValue(null, context, tableContext);
} else {
refminvalue = null;
}
byte[] refmaxvalue;
SQLRecordKeyFunction maxKey = sis.maxValue;
if (maxKey != null) {
refmaxvalue = maxKey.computeNewValue(null, context, tableContext);
} else {
refmaxvalue = null;
}
Predicate<Map.Entry<Bytes, Long>> predicate;
if (refminvalue != null && refmaxvalue == null) {
predicate = (Map.Entry<Bytes, Long> entry) -> {
byte[] datum = entry.getKey().data;
return Bytes.compare(datum, refminvalue) >= 0;
};
} else if (refminvalue == null && refmaxvalue != null) {
predicate = (Map.Entry<Bytes, Long> entry) -> {
byte[] datum = entry.getKey().data;
return Bytes.compare(datum, refmaxvalue) <= 0;
};
} else if (refminvalue != null && refmaxvalue != null) {
predicate = (Map.Entry<Bytes, Long> entry) -> {
byte[] datum = entry.getKey().data;
return Bytes.compare(datum, refmaxvalue) <= 0 && Bytes.compare(datum, refminvalue) >= 0;
};
} else {
predicate = (Map.Entry<Bytes, Long> entry) -> {
return true;
};
}
Stream<Map.Entry<Bytes, Long>> baseStream = map.entrySet().stream();
return baseStream.filter(predicate);
} else {
throw new DataStorageManagerException("operation " + operation + " not implemented on " + this.getClass());
}
}
use of herddb.sql.SQLRecordKeyFunction in project herddb by diennea.
the class MemoryHashIndexManager method scanner.
@Override
public Stream<Bytes> scanner(IndexOperation operation, StatementEvaluationContext context, TableContext tableContext) throws StatementExecutionException {
if (operation instanceof SecondaryIndexSeek) {
SecondaryIndexSeek sis = (SecondaryIndexSeek) operation;
SQLRecordKeyFunction value = sis.value;
byte[] refvalue = value.computeNewValue(null, context, tableContext);
List<Bytes> result = data.get(Bytes.from_array(refvalue));
if (result != null) {
return result.stream();
} else {
return Stream.empty();
}
} else if (operation instanceof SecondaryIndexPrefixScan) {
SecondaryIndexPrefixScan sis = (SecondaryIndexPrefixScan) operation;
SQLRecordKeyFunction value = sis.value;
byte[] refvalue = value.computeNewValue(null, context, tableContext);
Predicate<Map.Entry<Bytes, List<Bytes>>> predicate = (Map.Entry<Bytes, List<Bytes>> entry) -> {
byte[] recordValue = entry.getKey().data;
return Bytes.startsWith(recordValue, refvalue.length, refvalue);
};
return data.entrySet().stream().filter(predicate).map(entry -> entry.getValue()).flatMap(l -> l.stream());
} else if (operation instanceof SecondaryIndexRangeScan) {
byte[] refminvalue;
SecondaryIndexRangeScan sis = (SecondaryIndexRangeScan) operation;
SQLRecordKeyFunction minKey = sis.minValue;
if (minKey != null) {
refminvalue = minKey.computeNewValue(null, context, tableContext);
} else {
refminvalue = null;
}
byte[] refmaxvalue;
SQLRecordKeyFunction maxKey = sis.maxValue;
if (maxKey != null) {
refmaxvalue = maxKey.computeNewValue(null, context, tableContext);
} else {
refmaxvalue = null;
}
Predicate<Map.Entry<Bytes, List<Bytes>>> predicate;
if (refminvalue != null && refmaxvalue == null) {
predicate = (Map.Entry<Bytes, List<Bytes>> entry) -> {
byte[] datum = entry.getKey().data;
return Bytes.compare(datum, refminvalue) >= 0;
};
} else if (refminvalue == null && refmaxvalue != null) {
predicate = (Map.Entry<Bytes, List<Bytes>> entry) -> {
byte[] datum = entry.getKey().data;
return Bytes.compare(datum, refmaxvalue) <= 0;
};
} else if (refminvalue != null && refmaxvalue != null) {
predicate = (Map.Entry<Bytes, List<Bytes>> entry) -> {
byte[] datum = entry.getKey().data;
return Bytes.compare(datum, refmaxvalue) <= 0 && Bytes.compare(datum, refminvalue) >= 0;
};
} else {
predicate = (Map.Entry<Bytes, List<Bytes>> entry) -> {
return true;
};
}
return data.entrySet().stream().filter(predicate).map(entry -> entry.getValue()).flatMap(l -> l.stream());
} else {
throw new UnsupportedOperationException("unsuppported index access type " + operation);
}
}
use of herddb.sql.SQLRecordKeyFunction in project herddb by diennea.
the class BLinkKeyToPageIndex method scanner.
@Override
public Stream<Entry<Bytes, Long>> scanner(IndexOperation operation, StatementEvaluationContext context, TableContext tableContext, AbstractIndexManager index) throws DataStorageManagerException, StatementExecutionException {
if (operation instanceof PrimaryIndexSeek) {
PrimaryIndexSeek seek = (PrimaryIndexSeek) operation;
byte[] seekValue = seek.value.computeNewValue(null, context, tableContext);
if (seekValue == null) {
return Stream.empty();
}
Bytes key = Bytes.from_array(seekValue);
Long pageId = getTree().search(key);
if (pageId == null) {
return Stream.empty();
}
return Stream.of(new AbstractMap.SimpleImmutableEntry<>(key, pageId));
}
if (operation instanceof PrimaryIndexPrefixScan) {
PrimaryIndexPrefixScan scan = (PrimaryIndexPrefixScan) operation;
// SQLRecordKeyFunction value = sis.value;
byte[] refvalue = scan.value.computeNewValue(null, context, tableContext);
Bytes firstKey = Bytes.from_array(refvalue);
Bytes lastKey = firstKey.next();
return getTree().scan(firstKey, lastKey);
}
// every predicate (WHEREs...) will always be evaluated anyway on every record, in order to guarantee correctness
if (index != null) {
return index.recordSetScanner(operation, context, tableContext, this);
}
if (operation == null) {
Stream<Map.Entry<Bytes, Long>> baseStream = getTree().scan(null, null);
return baseStream;
} else if (operation instanceof PrimaryIndexRangeScan) {
Bytes refminvalue;
PrimaryIndexRangeScan sis = (PrimaryIndexRangeScan) operation;
SQLRecordKeyFunction minKey = sis.minValue;
if (minKey != null) {
refminvalue = Bytes.from_array(minKey.computeNewValue(null, context, tableContext));
} else {
refminvalue = null;
}
Bytes refmaxvalue;
SQLRecordKeyFunction maxKey = sis.maxValue;
if (maxKey != null) {
refmaxvalue = Bytes.from_array(maxKey.computeNewValue(null, context, tableContext));
} else {
refmaxvalue = null;
}
return getTree().scan(refminvalue, refmaxvalue, refmaxvalue != null);
}
throw new DataStorageManagerException("operation " + operation + " not implemented on " + this.getClass());
}
use of herddb.sql.SQLRecordKeyFunction in project herddb by diennea.
the class BRINIndexManager method scanner.
@Override
protected Stream<Bytes> scanner(IndexOperation operation, StatementEvaluationContext context, TableContext tableContext) throws StatementExecutionException {
if (operation instanceof SecondaryIndexSeek) {
SecondaryIndexSeek sis = (SecondaryIndexSeek) operation;
SQLRecordKeyFunction value = sis.value;
byte[] refvalue = value.computeNewValue(null, context, tableContext);
List<Bytes> result = data.search(Bytes.from_array(refvalue));
if (result != null) {
return result.stream();
} else {
return Stream.empty();
}
} else if (operation instanceof SecondaryIndexPrefixScan) {
SecondaryIndexPrefixScan sis = (SecondaryIndexPrefixScan) operation;
SQLRecordKeyFunction value = sis.value;
byte[] refvalue = value.computeNewValue(null, context, tableContext);
Bytes firstKey = Bytes.from_array(refvalue);
Bytes lastKey = firstKey.next();
return data.query(firstKey, lastKey);
} else if (operation instanceof SecondaryIndexRangeScan) {
Bytes firstKey = null;
Bytes lastKey = null;
SecondaryIndexRangeScan sis = (SecondaryIndexRangeScan) operation;
SQLRecordKeyFunction minKey = sis.minValue;
if (minKey != null) {
byte[] refminvalue = minKey.computeNewValue(null, context, tableContext);
firstKey = Bytes.from_array(refminvalue);
}
SQLRecordKeyFunction maxKey = sis.maxValue;
if (maxKey != null) {
byte[] refmaxvalue = maxKey.computeNewValue(null, context, tableContext);
lastKey = Bytes.from_array(refmaxvalue);
}
LOGGER.log(Level.FINE, "range scan on {0}.{1}, from {2} to {1}", new Object[] { index.table, index.name, firstKey, lastKey });
return data.query(firstKey, lastKey);
} else {
throw new UnsupportedOperationException("unsuppported index access type " + operation);
}
}
Aggregations