use of io.crate.testing.SQLExecutor in project crate by crate.
the class CreateViewAnalyzerTest method testCreateViewCreatesViewInDefaultSchema.
@Test
public void testCreateViewCreatesViewInDefaultSchema() {
SQLExecutor sqlExecutor = SQLExecutor.builder(clusterService).setSearchPath("firstSchema", "secondSchema").build();
CreateViewStmt createView = sqlExecutor.analyze("create view v1 as select * from sys.nodes");
assertThat(createView.name(), is(new RelationName(sqlExecutor.getSessionContext().searchPath().currentSchema(), "v1")));
}
use of io.crate.testing.SQLExecutor in project crate by crate.
the class CreateTableAsAnalyzerTest method testSimpleCompareaAgainstAnalyzedCreateTable.
@Test
public void testSimpleCompareaAgainstAnalyzedCreateTable() throws IOException {
SQLExecutor e = SQLExecutor.builder(clusterService).addTable("create table tbl (" + " col_default_object object as (" + " col_nested_integer integer," + " col_nested_object object as (" + " col_nested_timestamp_with_time_zone timestamp with time zone" + " )" + " )" + ")").build();
var expected = (AnalyzedCreateTable) e.analyze("create table cpy (" + " col_default_object object as (" + " col_nested_integer integer," + " col_nested_object object as (" + " col_nested_timestamp_with_time_zone timestamp with time zone" + " )" + " )" + ")");
AnalyzedCreateTableAs analyzedCreateTableAs = e.analyze("create table cpy as select * from tbl");
var actual = analyzedCreateTableAs.analyzedCreateTable();
assertEquals(expected.relationName(), actual.relationName());
// used toString() to avoid testing nested elements. handled by SymbolToColumnDefinitionConverterTest
assertEquals(expected.createTable().toString(), actual.createTable().toString());
}
use of io.crate.testing.SQLExecutor in project crate by crate.
the class MetadataToASTNodeResolverTest method test_bit_string_length_is_shown_in_show_create_table_output.
@Test
public void test_bit_string_length_is_shown_in_show_create_table_output() throws Exception {
SQLExecutor e = SQLExecutor.builder(clusterService).addTable("create table tbl (xs bit(8))").build();
DocTableInfo table = e.resolveTableInfo("tbl");
CreateTable<?> node = MetadataToASTNodeResolver.resolveCreateTable(table);
assertThat(SqlFormatter.formatSql(node), Matchers.containsString("\"xs\" BIT(8)"));
}
use of io.crate.testing.SQLExecutor in project crate by crate.
the class MetadataToASTNodeResolverTest method testBuildCreateTableColumns.
@Test
public void testBuildCreateTableColumns() throws Exception {
SQLExecutor e = SQLExecutor.builder(clusterService).addTable("create table doc.test (" + " bools boolean," + " bytes byte," + " strings string," + " shorts short," + " floats float," + " doubles double," + " ints integer," + " longs long," + " timestamp timestamp with time zone," + " ip_addr ip," + " arr_simple array(string)," + " arr_geo_point array(geo_point)," + " arr_obj array(object(strict) as (" + " col_1 long," + " col_2 string" + " ))," + " obj object as (" + " col_1 long," + " col_2 string" + " )" + ") " + "clustered into 5 shards " + "with (" + " number_of_replicas = '0-all'," + " \"merge.scheduler.max_thread_count\" = 1" + ")").build();
DocTableInfo tableInfo = e.resolveTableInfo("doc.test");
CreateTable node = MetadataToASTNodeResolver.resolveCreateTable(tableInfo);
assertEquals("CREATE TABLE IF NOT EXISTS \"doc\".\"test\" (\n" + " \"bools\" BOOLEAN,\n" + " \"bytes\" CHAR,\n" + " \"strings\" TEXT,\n" + " \"shorts\" SMALLINT,\n" + " \"floats\" REAL,\n" + " \"doubles\" DOUBLE PRECISION,\n" + " \"ints\" INTEGER,\n" + " \"longs\" BIGINT,\n" + " \"timestamp\" TIMESTAMP WITH TIME ZONE,\n" + " \"ip_addr\" IP,\n" + " \"arr_simple\" ARRAY(TEXT),\n" + " \"arr_geo_point\" ARRAY(GEO_POINT),\n" + " \"arr_obj\" ARRAY(OBJECT(STRICT) AS (\n" + " \"col_1\" BIGINT,\n" + " \"col_2\" TEXT\n" + " )),\n" + " \"obj\" OBJECT(DYNAMIC) AS (\n" + " \"col_1\" BIGINT,\n" + " \"col_2\" TEXT\n" + " )\n" + ")\n" + "CLUSTERED INTO 5 SHARDS\n" + "WITH (\n" + " \"allocation.max_retries\" = 5,\n" + " \"blocks.metadata\" = false,\n" + " \"blocks.read\" = false,\n" + " \"blocks.read_only\" = false,\n" + " \"blocks.read_only_allow_delete\" = false,\n" + " \"blocks.write\" = false,\n" + " codec = 'default',\n" + " column_policy = 'strict',\n" + " \"mapping.total_fields.limit\" = 1000,\n" + " max_ngram_diff = 1,\n" + " max_shingle_diff = 3,\n" + " \"merge.scheduler.max_thread_count\" = 1,\n" + " number_of_replicas = '0-all',\n" + " \"routing.allocation.enable\" = 'all',\n" + " \"routing.allocation.total_shards_per_node\" = -1,\n" + " \"store.type\" = 'fs',\n" + " \"translog.durability\" = 'REQUEST',\n" + " \"translog.flush_threshold_size\" = 536870912,\n" + " \"translog.sync_interval\" = 5000,\n" + " \"unassigned.node_left.delayed_timeout\" = 60000,\n" + " \"write.wait_for_active_shards\" = '1'\n" + ")", SqlFormatter.formatSql(node));
}
use of io.crate.testing.SQLExecutor in project crate by crate.
the class MetadataToASTNodeResolverTest method testBuildCreateTableClusteredByPartitionedBy.
@Test
public void testBuildCreateTableClusteredByPartitionedBy() throws Exception {
SQLExecutor e = SQLExecutor.builder(clusterService).addPartitionedTable("create table myschema.test (" + " id long," + " partition_column string," + " cluster_column string" + ") " + "partitioned by (partition_column) " + "clustered by (cluster_column) into 5 shards " + "with (" + " number_of_replicas = '0-all'," + " \"merge.scheduler.max_thread_count\" = 1" + ")").build();
DocTableInfo tableInfo = e.resolveTableInfo("myschema.test");
CreateTable node = MetadataToASTNodeResolver.resolveCreateTable(tableInfo);
assertEquals("CREATE TABLE IF NOT EXISTS \"myschema\".\"test\" (\n" + " \"id\" BIGINT,\n" + " \"partition_column\" TEXT,\n" + " \"cluster_column\" TEXT\n" + ")\n" + "CLUSTERED BY (\"cluster_column\") INTO 5 SHARDS\n" + "PARTITIONED BY (\"partition_column\")\n" + "WITH (\n" + " \"allocation.max_retries\" = 5,\n" + " \"blocks.metadata\" = false,\n" + " \"blocks.read\" = false,\n" + " \"blocks.read_only\" = false,\n" + " \"blocks.read_only_allow_delete\" = false,\n" + " \"blocks.write\" = false,\n" + " codec = 'default',\n" + " column_policy = 'strict',\n" + " \"mapping.total_fields.limit\" = 1000,\n" + " max_ngram_diff = 1,\n" + " max_shingle_diff = 3,\n" + " \"merge.scheduler.max_thread_count\" = 1,\n" + " number_of_replicas = '0-all',\n" + " \"routing.allocation.enable\" = 'all',\n" + " \"routing.allocation.total_shards_per_node\" = -1,\n" + " \"store.type\" = 'fs',\n" + " \"translog.durability\" = 'REQUEST',\n" + " \"translog.flush_threshold_size\" = 536870912,\n" + " \"translog.sync_interval\" = 5000,\n" + " \"unassigned.node_left.delayed_timeout\" = 60000,\n" + " \"write.wait_for_active_shards\" = '1'\n" + ")", SqlFormatter.formatSql(node));
}
Aggregations