Search in sources :

Example 1 with MetaData

use of org.elasticsearch.cluster.metadata.MetaData in project crate by crate.

the class SQLTransportIntegrationTest method getIndexMapping.

/**
     * Get all mappings from an index as JSON String
     *
     * @param index the name of the index
     * @return the index mapping as String
     * @throws IOException
     */
protected String getIndexMapping(String index) throws IOException {
    ClusterStateRequest request = Requests.clusterStateRequest().routingTable(false).nodes(false).metaData(true).indices(index);
    ClusterStateResponse response = client().admin().cluster().state(request).actionGet();
    MetaData metaData = response.getState().metaData();
    XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
    IndexMetaData indexMetaData = metaData.iterator().next();
    for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
        builder.field(cursor.value.type());
        builder.map(cursor.value.sourceAsMap());
    }
    builder.endObject();
    return builder.string();
}
Also used : ClusterStateResponse(org.elasticsearch.action.admin.cluster.state.ClusterStateResponse) MetaData(org.elasticsearch.cluster.metadata.MetaData) MappingMetaData(org.elasticsearch.cluster.metadata.MappingMetaData) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) ClusterStateRequest(org.elasticsearch.action.admin.cluster.state.ClusterStateRequest) MappingMetaData(org.elasticsearch.cluster.metadata.MappingMetaData) XContentBuilder(org.elasticsearch.common.xcontent.XContentBuilder) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData)

Example 2 with MetaData

use of org.elasticsearch.cluster.metadata.MetaData in project crate by crate.

the class PartitionedTableConcurrentIntegrationTest method deletePartitionWhileInsertingData.

private void deletePartitionWhileInsertingData(final boolean useBulk) throws Exception {
    execute("create table parted (id int, name string) " + "partitioned by (id) " + "with (number_of_replicas = 0)");
    ensureYellow();
    int numberOfDocs = 1000;
    final Object[][] bulkArgs = new Object[numberOfDocs][];
    for (int i = 0; i < numberOfDocs; i++) {
        bulkArgs[i] = new Object[] { i % 2, randomAsciiOfLength(10) };
    }
    // partition to delete
    final int idToDelete = 1;
    final AtomicReference<Exception> exceptionRef = new AtomicReference<>();
    final CountDownLatch insertLatch = new CountDownLatch(1);
    final String insertStmt = "insert into parted (id, name) values (?, ?)";
    Thread insertThread = new Thread(new Runnable() {

        @Override
        public void run() {
            try {
                if (useBulk) {
                    execute(insertStmt, bulkArgs);
                } else {
                    for (Object[] args : bulkArgs) {
                        execute(insertStmt, args);
                    }
                }
            } catch (Exception t) {
                exceptionRef.set(t);
            } finally {
                insertLatch.countDown();
            }
        }
    });
    final CountDownLatch deleteLatch = new CountDownLatch(1);
    final String partitionName = new PartitionName("parted", Collections.singletonList(new BytesRef(String.valueOf(idToDelete)))).asIndexName();
    final Object[] deleteArgs = new Object[] { idToDelete };
    Thread deleteThread = new Thread(new Runnable() {

        @Override
        public void run() {
            boolean deleted = false;
            while (!deleted) {
                try {
                    MetaData metaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData();
                    if (metaData.indices().get(partitionName) != null) {
                        execute("delete from parted where id = ?", deleteArgs);
                        deleted = true;
                    }
                } catch (Throwable t) {
                // ignore (mostly partition index does not exists yet)
                }
            }
            deleteLatch.countDown();
        }
    });
    insertThread.start();
    deleteThread.start();
    deleteLatch.await(SQLTransportExecutor.REQUEST_TIMEOUT.getSeconds() + 1, TimeUnit.SECONDS);
    insertLatch.await(SQLTransportExecutor.REQUEST_TIMEOUT.getSeconds() + 1, TimeUnit.SECONDS);
    Exception exception = exceptionRef.get();
    if (exception != null) {
        throw exception;
    }
    insertThread.join();
    deleteThread.join();
}
Also used : AtomicReference(java.util.concurrent.atomic.AtomicReference) CountDownLatch(java.util.concurrent.CountDownLatch) PartitionName(io.crate.metadata.PartitionName) MetaData(org.elasticsearch.cluster.metadata.MetaData) BytesRef(org.apache.lucene.util.BytesRef)

Example 3 with MetaData

use of org.elasticsearch.cluster.metadata.MetaData in project crate by crate.

the class PartitionedTableIntegrationTest method testInsertPartitionedTable.

@Test
public void testInsertPartitionedTable() throws Exception {
    execute("create table parted (id integer, name string, date timestamp)" + "partitioned by (date)");
    ensureYellow();
    String templateName = PartitionName.templateName(null, "parted");
    GetIndexTemplatesResponse templatesResponse = client().admin().indices().prepareGetTemplates(templateName).execute().actionGet();
    assertThat(templatesResponse.getIndexTemplates().get(0).template(), is(templateName + "*"));
    assertThat(templatesResponse.getIndexTemplates().get(0).name(), is(templateName));
    assertTrue(templatesResponse.getIndexTemplates().get(0).getAliases().get("parted") != null);
    execute("insert into parted (id, name, date) values (?, ?, ?)", new Object[] { 1, "Ford", 13959981214861L });
    assertThat(response.rowCount(), is(1L));
    ensureYellow();
    refresh();
    assertTrue(clusterService().state().metaData().hasAlias("parted"));
    String partitionName = new PartitionName("parted", Collections.singletonList(new BytesRef(String.valueOf(13959981214861L)))).asIndexName();
    MetaData metaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData();
    assertNotNull(metaData.indices().get(partitionName).getAliases().get("parted"));
    assertThat(client().prepareSearch(partitionName).setTypes(Constants.DEFAULT_MAPPING_TYPE).setSize(0).setQuery(new MatchAllQueryBuilder()).execute().actionGet().getHits().totalHits(), is(1L));
    execute("select id, name, date from parted");
    assertThat(response.rowCount(), is(1L));
    assertThat((Integer) response.rows()[0][0], is(1));
    assertThat((String) response.rows()[0][1], is("Ford"));
    assertThat((Long) response.rows()[0][2], is(13959981214861L));
}
Also used : PartitionName(io.crate.metadata.PartitionName) GetIndexTemplatesResponse(org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse) MetaData(org.elasticsearch.cluster.metadata.MetaData) MappingMetaData(org.elasticsearch.cluster.metadata.MappingMetaData) DocIndexMetaData(io.crate.metadata.doc.DocIndexMetaData) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) IndexTemplateMetaData(org.elasticsearch.cluster.metadata.IndexTemplateMetaData) BytesRef(org.apache.lucene.util.BytesRef) MatchAllQueryBuilder(org.elasticsearch.index.query.MatchAllQueryBuilder) Test(org.junit.Test)

Example 4 with MetaData

use of org.elasticsearch.cluster.metadata.MetaData in project crate by crate.

the class BulkRetryCoordinatorPoolTest method prepare.

@Before
public void prepare() {
    MetaData metaData = MetaData.builder().put(IndexMetaData.builder(TEST_INDEX).settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(0)).build();
    RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index(TEST_INDEX)).build();
    ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
    state = ClusterState.builder(state).nodes(DiscoveryNodes.builder().put(newNode(NODE_IDS[0])).localNodeId(NODE_IDS[0])).build();
    AllocationService allocationService = createAllocationService();
    routingTable = allocationService.reroute(state, "test").routingTable();
    state = ClusterState.builder(state).routingTable(routingTable).build();
    ClusterService clusterService = new NoopClusterService(state);
    this.state = state;
    pool = new BulkRetryCoordinatorPool(Settings.EMPTY, clusterService, mock(ThreadPool.class));
    pool.start();
}
Also used : ClusterState(org.elasticsearch.cluster.ClusterState) NoopClusterService(org.elasticsearch.test.cluster.NoopClusterService) ClusterService(org.elasticsearch.cluster.ClusterService) RoutingTable(org.elasticsearch.cluster.routing.RoutingTable) MetaData(org.elasticsearch.cluster.metadata.MetaData) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) NoopClusterService(org.elasticsearch.test.cluster.NoopClusterService) AllocationService(org.elasticsearch.cluster.routing.allocation.AllocationService) ESAllocationTestCase.createAllocationService(org.elasticsearch.test.ESAllocationTestCase.createAllocationService) Before(org.junit.Before)

Example 5 with MetaData

use of org.elasticsearch.cluster.metadata.MetaData in project crate by crate.

the class CreateAlterTableStatementAnalyzerTest method init.

@Before
public void init() throws Exception {
    String analyzerSettings = FulltextAnalyzerResolver.encodeSettings(Settings.builder().put("search", "foobar").build()).toUtf8();
    MetaData metaData = MetaData.builder().persistentSettings(Settings.builder().put("crate.analysis.custom.analyzer.ft_search", analyzerSettings).build()).build();
    ClusterState state = ClusterState.builder(ClusterName.DEFAULT).nodes(DiscoveryNodes.builder().put(new DiscoveryNode("n1", DummyTransportAddress.INSTANCE, org.elasticsearch.Version.CURRENT)).put(new DiscoveryNode("n2", DummyTransportAddress.INSTANCE, org.elasticsearch.Version.CURRENT)).put(new DiscoveryNode("n3", DummyTransportAddress.INSTANCE, org.elasticsearch.Version.CURRENT)).localNodeId("n1")).metaData(metaData).build();
    e = SQLExecutor.builder(new NoopClusterService(state)).enableDefaultTables().build();
}
Also used : ClusterState(org.elasticsearch.cluster.ClusterState) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) MetaData(org.elasticsearch.cluster.metadata.MetaData) DocIndexMetaData(io.crate.metadata.doc.DocIndexMetaData) TestingHelpers.mapToSortedString(io.crate.testing.TestingHelpers.mapToSortedString) NoopClusterService(org.elasticsearch.test.cluster.NoopClusterService) Before(org.junit.Before)

Aggregations

MetaData (org.elasticsearch.cluster.metadata.MetaData)244 IndexMetaData (org.elasticsearch.cluster.metadata.IndexMetaData)223 ClusterState (org.elasticsearch.cluster.ClusterState)179 RoutingTable (org.elasticsearch.cluster.routing.RoutingTable)138 RoutingNodes (org.elasticsearch.cluster.routing.RoutingNodes)52 ShardRouting (org.elasticsearch.cluster.routing.ShardRouting)48 Settings (org.elasticsearch.common.settings.Settings)43 AllocationService (org.elasticsearch.cluster.routing.allocation.AllocationService)32 IndexShardRoutingTable (org.elasticsearch.cluster.routing.IndexShardRoutingTable)30 DiscoveryNodes (org.elasticsearch.cluster.node.DiscoveryNodes)27 DiscoveryNode (org.elasticsearch.cluster.node.DiscoveryNode)26 ClusterSettings (org.elasticsearch.common.settings.ClusterSettings)26 Index (org.elasticsearch.index.Index)25 Matchers.containsString (org.hamcrest.Matchers.containsString)23 IndexRoutingTable (org.elasticsearch.cluster.routing.IndexRoutingTable)21 HashMap (java.util.HashMap)19 HashSet (java.util.HashSet)18 ShardId (org.elasticsearch.index.shard.ShardId)17 TestGatewayAllocator (org.elasticsearch.test.gateway.TestGatewayAllocator)17 ArrayList (java.util.ArrayList)15