Search in sources :

Example 66 with DocTableInfo

use of io.crate.metadata.doc.DocTableInfo in project crate by crate.

the class MetadataToASTNodeResolverTest method testBuildCreateTableColumns.

@Test
public void testBuildCreateTableColumns() throws Exception {
    SQLExecutor e = SQLExecutor.builder(clusterService).addTable("create table doc.test (" + " bools boolean," + " bytes byte," + " strings string," + " shorts short," + " floats float," + " doubles double," + " ints integer," + " longs long," + " timestamp timestamp with time zone," + " ip_addr ip," + " arr_simple array(string)," + " arr_geo_point array(geo_point)," + " arr_obj array(object(strict) as (" + "  col_1 long," + "  col_2 string" + " ))," + " obj object as (" + "  col_1 long," + "  col_2 string" + " )" + ") " + "clustered into 5 shards " + "with (" + " number_of_replicas = '0-all'," + " \"merge.scheduler.max_thread_count\" = 1" + ")").build();
    DocTableInfo tableInfo = e.resolveTableInfo("doc.test");
    CreateTable node = MetadataToASTNodeResolver.resolveCreateTable(tableInfo);
    assertEquals("CREATE TABLE IF NOT EXISTS \"doc\".\"test\" (\n" + "   \"bools\" BOOLEAN,\n" + "   \"bytes\" CHAR,\n" + "   \"strings\" TEXT,\n" + "   \"shorts\" SMALLINT,\n" + "   \"floats\" REAL,\n" + "   \"doubles\" DOUBLE PRECISION,\n" + "   \"ints\" INTEGER,\n" + "   \"longs\" BIGINT,\n" + "   \"timestamp\" TIMESTAMP WITH TIME ZONE,\n" + "   \"ip_addr\" IP,\n" + "   \"arr_simple\" ARRAY(TEXT),\n" + "   \"arr_geo_point\" ARRAY(GEO_POINT),\n" + "   \"arr_obj\" ARRAY(OBJECT(STRICT) AS (\n" + "      \"col_1\" BIGINT,\n" + "      \"col_2\" TEXT\n" + "   )),\n" + "   \"obj\" OBJECT(DYNAMIC) AS (\n" + "      \"col_1\" BIGINT,\n" + "      \"col_2\" TEXT\n" + "   )\n" + ")\n" + "CLUSTERED INTO 5 SHARDS\n" + "WITH (\n" + "   \"allocation.max_retries\" = 5,\n" + "   \"blocks.metadata\" = false,\n" + "   \"blocks.read\" = false,\n" + "   \"blocks.read_only\" = false,\n" + "   \"blocks.read_only_allow_delete\" = false,\n" + "   \"blocks.write\" = false,\n" + "   codec = 'default',\n" + "   column_policy = 'strict',\n" + "   \"mapping.total_fields.limit\" = 1000,\n" + "   max_ngram_diff = 1,\n" + "   max_shingle_diff = 3,\n" + "   \"merge.scheduler.max_thread_count\" = 1,\n" + "   number_of_replicas = '0-all',\n" + "   \"routing.allocation.enable\" = 'all',\n" + "   \"routing.allocation.total_shards_per_node\" = -1,\n" + "   \"store.type\" = 'fs',\n" + "   \"translog.durability\" = 'REQUEST',\n" + "   \"translog.flush_threshold_size\" = 536870912,\n" + "   \"translog.sync_interval\" = 5000,\n" + "   \"unassigned.node_left.delayed_timeout\" = 60000,\n" + "   \"write.wait_for_active_shards\" = '1'\n" + ")", SqlFormatter.formatSql(node));
}
Also used : DocTableInfo(io.crate.metadata.doc.DocTableInfo) SQLExecutor(io.crate.testing.SQLExecutor) CreateTable(io.crate.sql.tree.CreateTable) Test(org.junit.Test) CrateDummyClusterServiceUnitTest(io.crate.test.integration.CrateDummyClusterServiceUnitTest)

Example 67 with DocTableInfo

use of io.crate.metadata.doc.DocTableInfo in project crate by crate.

the class MetadataToASTNodeResolverTest method testBuildCreateTableClusteredByPartitionedBy.

@Test
public void testBuildCreateTableClusteredByPartitionedBy() throws Exception {
    SQLExecutor e = SQLExecutor.builder(clusterService).addPartitionedTable("create table myschema.test (" + " id long," + " partition_column string," + " cluster_column string" + ") " + "partitioned by (partition_column) " + "clustered by (cluster_column) into 5 shards " + "with (" + " number_of_replicas = '0-all'," + " \"merge.scheduler.max_thread_count\" = 1" + ")").build();
    DocTableInfo tableInfo = e.resolveTableInfo("myschema.test");
    CreateTable node = MetadataToASTNodeResolver.resolveCreateTable(tableInfo);
    assertEquals("CREATE TABLE IF NOT EXISTS \"myschema\".\"test\" (\n" + "   \"id\" BIGINT,\n" + "   \"partition_column\" TEXT,\n" + "   \"cluster_column\" TEXT\n" + ")\n" + "CLUSTERED BY (\"cluster_column\") INTO 5 SHARDS\n" + "PARTITIONED BY (\"partition_column\")\n" + "WITH (\n" + "   \"allocation.max_retries\" = 5,\n" + "   \"blocks.metadata\" = false,\n" + "   \"blocks.read\" = false,\n" + "   \"blocks.read_only\" = false,\n" + "   \"blocks.read_only_allow_delete\" = false,\n" + "   \"blocks.write\" = false,\n" + "   codec = 'default',\n" + "   column_policy = 'strict',\n" + "   \"mapping.total_fields.limit\" = 1000,\n" + "   max_ngram_diff = 1,\n" + "   max_shingle_diff = 3,\n" + "   \"merge.scheduler.max_thread_count\" = 1,\n" + "   number_of_replicas = '0-all',\n" + "   \"routing.allocation.enable\" = 'all',\n" + "   \"routing.allocation.total_shards_per_node\" = -1,\n" + "   \"store.type\" = 'fs',\n" + "   \"translog.durability\" = 'REQUEST',\n" + "   \"translog.flush_threshold_size\" = 536870912,\n" + "   \"translog.sync_interval\" = 5000,\n" + "   \"unassigned.node_left.delayed_timeout\" = 60000,\n" + "   \"write.wait_for_active_shards\" = '1'\n" + ")", SqlFormatter.formatSql(node));
}
Also used : DocTableInfo(io.crate.metadata.doc.DocTableInfo) SQLExecutor(io.crate.testing.SQLExecutor) CreateTable(io.crate.sql.tree.CreateTable) Test(org.junit.Test) CrateDummyClusterServiceUnitTest(io.crate.test.integration.CrateDummyClusterServiceUnitTest)

Example 68 with DocTableInfo

use of io.crate.metadata.doc.DocTableInfo in project crate by crate.

the class DocLevelExpressionsTest method prepare.

@Before
public void prepare() throws Exception {
    SQLExecutor e = SQLExecutor.builder(clusterService).addTable(createTableStatement).build();
    indexEnv = new IndexEnv(THREAD_POOL, (DocTableInfo) StreamSupport.stream(e.schemas().spliterator(), false).filter(x -> x instanceof DocSchemaInfo).map(x -> (DocSchemaInfo) x).findFirst().orElseThrow(() -> new IllegalStateException("No doc schema found")).getTables().iterator().next(), clusterService.state(), Version.CURRENT, createTempDir());
    IndexWriter writer = indexEnv.writer();
    insertValues(writer);
    DirectoryReader directoryReader = DirectoryReader.open(writer, true, true);
    readerContext = directoryReader.leaves().get(0);
    ctx = new CollectorContext();
}
Also used : DocTableInfo(io.crate.metadata.doc.DocTableInfo) CollectorContext(io.crate.expression.reference.doc.lucene.CollectorContext) IndexEnv(io.crate.testing.IndexEnv) DirectoryReader(org.apache.lucene.index.DirectoryReader) CrateDummyClusterServiceUnitTest(io.crate.test.integration.CrateDummyClusterServiceUnitTest) IndexWriter(org.apache.lucene.index.IndexWriter) Version(org.elasticsearch.Version) DocSchemaInfo(io.crate.metadata.doc.DocSchemaInfo) After(org.junit.After) StreamSupport(java.util.stream.StreamSupport) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) SQLExecutor(io.crate.testing.SQLExecutor) Before(org.junit.Before) DocTableInfo(io.crate.metadata.doc.DocTableInfo) DocSchemaInfo(io.crate.metadata.doc.DocSchemaInfo) SQLExecutor(io.crate.testing.SQLExecutor) IndexEnv(io.crate.testing.IndexEnv) IndexWriter(org.apache.lucene.index.IndexWriter) DirectoryReader(org.apache.lucene.index.DirectoryReader) CollectorContext(io.crate.expression.reference.doc.lucene.CollectorContext) Before(org.junit.Before)

Example 69 with DocTableInfo

use of io.crate.metadata.doc.DocTableInfo in project crate by crate.

the class CommonQueryBuilderTest method testWhereRefEqNullWithDifferentTypes.

@Test
public void testWhereRefEqNullWithDifferentTypes() throws Exception {
    for (DataType<?> type : DataTypes.PRIMITIVE_TYPES) {
        if (type.storageSupport() == null) {
            continue;
        }
        // ensure the test is operating on a fresh, empty cluster state (no existing tables)
        resetClusterService();
        DocTableInfo tableInfo = SQLExecutor.tableInfo(new RelationName(DocSchemaInfo.NAME, "test_primitive"), "create table doc.test_primitive (" + "  x " + type.getName() + ")", clusterService);
        TableRelation tableRelation = new TableRelation(tableInfo);
        Map<RelationName, AnalyzedRelation> tableSources = Map.of(tableInfo.ident(), tableRelation);
        SqlExpressions sqlExpressions = new SqlExpressions(tableSources, tableRelation, User.CRATE_USER);
        Query query = convert(sqlExpressions.normalize(sqlExpressions.asSymbol("x = null")));
        // must always become a MatchNoDocsQuery
        // string: term query with null would cause NPE
        // int/numeric: rangeQuery from null to null would match all
        // bool:  term would match false too because of the condition in the eq query builder
        assertThat(query, instanceOf(MatchNoDocsQuery.class));
    }
}
Also used : DocTableInfo(io.crate.metadata.doc.DocTableInfo) CrateRegexQuery(io.crate.lucene.match.CrateRegexQuery) Query(org.apache.lucene.search.Query) MatchNoDocsQuery(org.apache.lucene.search.MatchNoDocsQuery) RegexpQuery(org.apache.lucene.search.RegexpQuery) PointInSetQuery(org.apache.lucene.search.PointInSetQuery) ConstantScoreQuery(org.apache.lucene.search.ConstantScoreQuery) IntersectsPrefixTreeQuery(org.apache.lucene.spatial.prefix.IntersectsPrefixTreeQuery) PointRangeQuery(org.apache.lucene.search.PointRangeQuery) TermInSetQuery(org.apache.lucene.search.TermInSetQuery) MatchAllDocsQuery(org.apache.lucene.search.MatchAllDocsQuery) DocValuesFieldExistsQuery(org.apache.lucene.search.DocValuesFieldExistsQuery) TermQuery(org.apache.lucene.search.TermQuery) BooleanQuery(org.apache.lucene.search.BooleanQuery) TermRangeQuery(org.apache.lucene.search.TermRangeQuery) MatchNoDocsQuery(org.apache.lucene.search.MatchNoDocsQuery) RelationName(io.crate.metadata.RelationName) AnalyzedRelation(io.crate.analyze.relations.AnalyzedRelation) SqlExpressions(io.crate.testing.SqlExpressions) TableRelation(io.crate.analyze.relations.TableRelation) Test(org.junit.Test)

Example 70 with DocTableInfo

use of io.crate.metadata.doc.DocTableInfo in project crate by crate.

the class CopyAnalyzer method convertCopyFrom.

CopyFromAnalyzedStatement convertCopyFrom(CopyFrom node, Analysis analysis) {
    DocTableInfo tableInfo = schemas.getWritableTable(TableIdent.of(node.table(), analysis.sessionContext().defaultSchema()));
    DocTableRelation tableRelation = new DocTableRelation(tableInfo);
    Operation.blockedRaiseException(tableInfo, Operation.INSERT);
    String partitionIdent = null;
    if (!node.table().partitionProperties().isEmpty()) {
        partitionIdent = PartitionPropertiesAnalyzer.toPartitionIdent(tableInfo, node.table().partitionProperties(), analysis.parameterContext().parameters());
    }
    EvaluatingNormalizer normalizer = new EvaluatingNormalizer(functions, RowGranularity.CLUSTER, ReplaceMode.MUTATE, null, tableRelation);
    ExpressionAnalyzer expressionAnalyzer = createExpressionAnalyzer(analysis, tableRelation);
    expressionAnalyzer.setResolveFieldsOperation(Operation.INSERT);
    ExpressionAnalysisContext expressionAnalysisContext = new ExpressionAnalysisContext();
    Predicate<DiscoveryNode> nodeFilters = Predicates.alwaysTrue();
    Settings settings = Settings.EMPTY;
    if (node.genericProperties().isPresent()) {
        // copy map as items are removed. The GenericProperties map is cached in the query cache and removing
        // items would cause subsequent queries that hit the cache to have different genericProperties
        Map<String, Expression> properties = new HashMap<>(node.genericProperties().get().properties());
        nodeFilters = discoveryNodePredicate(analysis.parameterContext().parameters(), properties.remove(NodeFilters.NAME));
        settings = settingsFromProperties(properties, expressionAnalyzer, expressionAnalysisContext);
    }
    Symbol uri = expressionAnalyzer.convert(node.path(), expressionAnalysisContext);
    uri = normalizer.normalize(uri, analysis.transactionContext());
    if (!(uri.valueType() == DataTypes.STRING || uri.valueType() instanceof CollectionType && ((CollectionType) uri.valueType()).innerType() == DataTypes.STRING)) {
        throw CopyFromAnalyzedStatement.raiseInvalidType(uri.valueType());
    }
    return new CopyFromAnalyzedStatement(tableInfo, settings, uri, partitionIdent, nodeFilters);
}
Also used : DocTableInfo(io.crate.metadata.doc.DocTableInfo) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) ExpressionAnalysisContext(io.crate.analyze.expressions.ExpressionAnalysisContext) Symbol(io.crate.analyze.symbol.Symbol) ExpressionAnalyzer(io.crate.analyze.expressions.ExpressionAnalyzer) CollectionType(io.crate.types.CollectionType) DocTableRelation(io.crate.analyze.relations.DocTableRelation) Settings(org.elasticsearch.common.settings.Settings)

Aggregations

DocTableInfo (io.crate.metadata.doc.DocTableInfo)127 Test (org.junit.Test)56 CrateDummyClusterServiceUnitTest (io.crate.test.integration.CrateDummyClusterServiceUnitTest)40 Symbol (io.crate.expression.symbol.Symbol)27 Reference (io.crate.metadata.Reference)27 SQLExecutor (io.crate.testing.SQLExecutor)25 RelationName (io.crate.metadata.RelationName)24 DocTableRelation (io.crate.analyze.relations.DocTableRelation)20 ColumnIdent (io.crate.metadata.ColumnIdent)20 TableInfo (io.crate.metadata.table.TableInfo)18 Assignments (io.crate.expression.symbol.Assignments)16 Row (io.crate.data.Row)14 PlannerContext (io.crate.planner.PlannerContext)13 Before (org.junit.Before)13 RowConsumer (io.crate.data.RowConsumer)12 CoordinatorTxnCtx (io.crate.metadata.CoordinatorTxnCtx)12 PartitionName (io.crate.metadata.PartitionName)12 DependencyCarrier (io.crate.planner.DependencyCarrier)12 ArrayList (java.util.ArrayList)12 Map (java.util.Map)12