use of com.scalar.db.io.DataType in project scalardb by scalar-labs.
the class TableSchema method buildTableMetadata.
protected TableMetadata buildTableMetadata(JsonObject tableDefinition) throws SchemaLoaderException {
TableMetadata.Builder tableBuilder = TableMetadata.newBuilder();
// Add partition keys
if (!tableDefinition.keySet().contains(PARTITION_KEY)) {
throw new SchemaLoaderException("Parsing the schema JSON failed. Table must contains partition key");
}
JsonArray partitionKeys = tableDefinition.get(PARTITION_KEY).getAsJsonArray();
traveledKeys.add(PARTITION_KEY);
for (JsonElement partitionKey : partitionKeys) {
tableBuilder.addPartitionKey(partitionKey.getAsString());
}
// Add clustering keys
if (tableDefinition.keySet().contains(CLUSTERING_KEY)) {
JsonArray clusteringKeys = tableDefinition.get(CLUSTERING_KEY).getAsJsonArray();
traveledKeys.add(CLUSTERING_KEY);
for (JsonElement clusteringKeyRaw : clusteringKeys) {
String clusteringKey;
String order;
String[] clusteringKeyFull = clusteringKeyRaw.getAsString().split(" ", -1);
if (clusteringKeyFull.length < 2) {
clusteringKey = clusteringKeyFull[0];
tableBuilder.addClusteringKey(clusteringKey);
} else if (clusteringKeyFull.length == 2 && (clusteringKeyFull[1].equalsIgnoreCase("ASC") || clusteringKeyFull[1].equalsIgnoreCase("DESC"))) {
clusteringKey = clusteringKeyFull[0];
order = clusteringKeyFull[1];
tableBuilder.addClusteringKey(clusteringKey, ORDER_MAP.get(order.toUpperCase()));
} else {
throw new SchemaLoaderException("Parsing the schema JSON failed. Invalid clustering keys");
}
}
}
boolean transaction = false;
if (tableDefinition.keySet().contains(TRANSACTION)) {
transaction = tableDefinition.get(TRANSACTION).getAsBoolean();
traveledKeys.add(TRANSACTION);
}
if (transaction) {
isTransactionalTable = true;
}
// Add columns
if (!tableDefinition.keySet().contains(COLUMNS)) {
throw new SchemaLoaderException("Parsing the schema JSON failed. Table must contains columns");
}
JsonObject columns = tableDefinition.get(COLUMNS).getAsJsonObject();
traveledKeys.add(COLUMNS);
for (Entry<String, JsonElement> column : columns.entrySet()) {
String columnName = column.getKey();
DataType columnDataType = DATA_MAP_TYPE.get(column.getValue().getAsString().toUpperCase());
if (columnDataType == null) {
throw new SchemaLoaderException("Parsing the schema JSON failed. Invalid column type for column " + columnName);
}
tableBuilder.addColumn(columnName, columnDataType);
}
// Add secondary indexes
if (tableDefinition.keySet().contains(SECONDARY_INDEX)) {
JsonArray secondaryIndexes = tableDefinition.get(SECONDARY_INDEX).getAsJsonArray();
traveledKeys.add(SECONDARY_INDEX);
for (JsonElement sIdx : secondaryIndexes) {
tableBuilder.addSecondaryIndex(sIdx.getAsString());
}
}
return tableBuilder.build();
}
use of com.scalar.db.io.DataType in project scalardb by scalar-labs.
the class StorageSingleClusteringKeyScanIntegrationTestBase method scan_WithClusteringKeyEndRange_ShouldReturnProperResult.
@Test
public void scan_WithClusteringKeyEndRange_ShouldReturnProperResult() throws ExecutionException, IOException {
for (DataType clusteringKeyType : clusteringKeyTypes) {
for (Order clusteringOrder : Order.values()) {
truncateTable(clusteringKeyType, clusteringOrder);
List<Value<?>> clusteringKeyValues = prepareRecords(clusteringKeyType, clusteringOrder);
for (boolean endInclusive : Arrays.asList(true, false)) {
for (OrderingType orderingType : OrderingType.values()) {
for (boolean withLimit : Arrays.asList(false, true)) {
scan_WithClusteringKeyEndRange_ShouldReturnProperResult(clusteringKeyValues, clusteringKeyType, clusteringOrder, endInclusive, orderingType, withLimit);
}
}
}
}
}
}
use of com.scalar.db.io.DataType in project scalardb by scalar-labs.
the class StorageSinglePartitionKeyIntegrationTestBase method getAndDelete_ShouldBehaveCorrectly.
@Test
public void getAndDelete_ShouldBehaveCorrectly() throws ExecutionException {
for (DataType partitionKeyType : partitionKeyTypes) {
truncateTable(partitionKeyType);
List<Value<?>> partitionKeyValues = prepareRecords(partitionKeyType);
String description = description(partitionKeyType);
// for get
for (Value<?> partitionKeyValue : partitionKeyValues) {
// Arrange
Get get = prepareGet(partitionKeyType, partitionKeyValue);
// Act
Optional<Result> result = storage.get(get);
// Assert
Assertions.assertThat(result).describedAs(description).isPresent();
Assertions.assertThat(result.get().getValue(PARTITION_KEY).isPresent()).describedAs(description).isTrue();
Assertions.assertThat(result.get().getValue(PARTITION_KEY).get()).describedAs(description).isEqualTo(partitionKeyValue);
Assertions.assertThat(result.get().getValue(COL_NAME).isPresent()).describedAs(description).isTrue();
Assertions.assertThat(result.get().getValue(COL_NAME).get().getAsInt()).describedAs(description).isEqualTo(1);
}
// for delete
for (Value<?> partitionKeyValue : partitionKeyValues) {
// Arrange
Delete delete = prepareDelete(partitionKeyType, partitionKeyValue);
// Act
storage.delete(delete);
// Assert
Optional<Result> result = storage.get(prepareGet(partitionKeyType, partitionKeyValue));
Assertions.assertThat(result).describedAs(description).isNotPresent();
}
}
}
use of com.scalar.db.io.DataType in project scalardb by scalar-labs.
the class StorageMultipleClusteringKeyScanIntegrationTestBase method execute.
private void execute(TestForFirstClusteringKeyScan test) throws java.util.concurrent.ExecutionException, InterruptedException {
List<Callable<Void>> testCallables = new ArrayList<>();
for (DataType firstClusteringKeyType : clusteringKeyTypes.keySet()) {
for (Order firstClusteringOrder : Order.values()) {
testCallables.add(() -> {
truncateTable(firstClusteringKeyType, firstClusteringOrder, DataType.INT, Order.ASC);
List<ClusteringKey> clusteringKeys = prepareRecords(firstClusteringKeyType, firstClusteringOrder, DataType.INT, Order.ASC);
test.execute(clusteringKeys, firstClusteringKeyType, firstClusteringOrder);
return null;
});
}
}
execute(testCallables);
}
use of com.scalar.db.io.DataType in project scalardb by scalar-labs.
the class StorageMultipleClusteringKeyScanIntegrationTestBase method deleteTables.
private static void deleteTables() throws java.util.concurrent.ExecutionException, InterruptedException {
List<Callable<Void>> testCallables = new ArrayList<>();
for (DataType firstClusteringKeyType : clusteringKeyTypes.keySet()) {
Callable<Void> testCallable = () -> {
for (DataType secondClusteringKeyType : clusteringKeyTypes.get(firstClusteringKeyType)) {
for (Order firstClusteringOrder : Order.values()) {
for (Order secondClusteringOrder : Order.values()) {
admin.dropTable(getNamespaceName(firstClusteringKeyType), getTableName(firstClusteringKeyType, firstClusteringOrder, secondClusteringKeyType, secondClusteringOrder));
}
}
}
admin.dropNamespace(getNamespaceName(firstClusteringKeyType));
return null;
};
testCallables.add(testCallable);
}
// We firstly execute the callables without the last one. And then we execute the last one. This
// is because the last table deletion deletes the metadata table, and this process can't be
// handled in multiple threads/processes at the same time.
execute(testCallables.subList(0, testCallables.size() - 1));
execute(testCallables.subList(testCallables.size() - 1, testCallables.size()));
}
Aggregations