use of org.elasticsearch.common.settings.Settings in project crate by crate.
the class NodeSettingsTest method createConfigPath.
private String createConfigPath() throws IOException {
File home = tmp.newFolder("crate");
File config = tmp.newFolder("crate", "config");
Settings pathSettings = Settings.builder().put("path.work", tmp.newFolder("crate", "work").getPath()).put("path.data", tmp.newFolder("crate", "data").getPath()).put("path.logs", tmp.newFolder("crate", "logs").getPath()).build();
try (Writer writer = new FileWriter(Paths.get(config.getPath(), "crate.yml").toFile())) {
Yaml yaml = new Yaml();
yaml.dump(pathSettings.getAsMap(), writer);
}
return home.getPath();
}
use of org.elasticsearch.common.settings.Settings in project crate by crate.
the class BlobPathITest method testDataIsStoredInGlobalBlobPath.
@Test
public void testDataIsStoredInGlobalBlobPath() throws Exception {
launchNodeAndInitClient(configureGlobalBlobPath());
Settings indexSettings = oneShardAndZeroReplicas();
blobAdminClient.createBlobTable("test", indexSettings).get();
client.put("test", "abcdefg");
String digest = "2fb5e13419fc89246865e7a324f476ec624e8740";
try (Stream<Path> files = Files.walk(globalBlobPath)) {
assertThat(files.anyMatch(i -> digest.equals(i.getFileName().toString())), is(true));
}
}
use of org.elasticsearch.common.settings.Settings in project crate by crate.
the class BlobPathITest method testDataStorageWithMultipleDataPaths.
@Test
public void testDataStorageWithMultipleDataPaths() throws Exception {
Path data1 = createTempDir("data1");
Path data2 = createTempDir("data2");
Settings settings = Settings.builder().put(nodeSettings(0)).put("path.data", data1.toString() + "," + data2.toString()).build();
launchNodeAndInitClient(settings);
Settings indexSettings = Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, 0).put(SETTING_NUMBER_OF_SHARDS, 2).build();
blobAdminClient.createBlobTable("test", indexSettings).get();
for (int i = 0; i < 10; i++) {
client.put("test", "body" + i);
}
List<String> data1Files = gatherDigests(data1);
List<String> data2Files = gatherDigests(data2);
assertThat(data1Files.size(), Matchers.allOf(lessThan(10), greaterThan(0)));
assertThat(data2Files.size(), Matchers.allOf(lessThan(10), greaterThan(0)));
assertThat(data1Files.size() + data2Files.size(), is(10));
}
use of org.elasticsearch.common.settings.Settings in project crate by crate.
the class RecoveryTests method testPrimaryRelocationWhileIndexing.
@Test
public void testPrimaryRelocationWhileIndexing() throws Exception {
final int numberOfRelocations = 1;
final int numberOfWriters = 2;
final String node1 = internalCluster().startNode();
BlobAdminClient blobAdminClient = internalCluster().getInstance(BlobAdminClient.class, node1);
logger.trace("--> creating test index ...");
Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).build();
blobAdminClient.createBlobTable("test", indexSettings).get();
logger.trace("--> starting [node2] ...");
final String node2 = internalCluster().startNode();
ensureGreen();
final AtomicLong idGenerator = new AtomicLong();
final AtomicLong indexCounter = new AtomicLong();
final AtomicBoolean stop = new AtomicBoolean(false);
Thread[] writers = new Thread[numberOfWriters];
final CountDownLatch stopLatch = new CountDownLatch(writers.length);
logger.trace("--> starting {} blob upload threads", writers.length);
final List<String> uploadedDigests = Collections.synchronizedList(new ArrayList<String>(writers.length));
for (int i = 0; i < writers.length; i++) {
final int indexerId = i;
writers[i] = new Thread() {
@Override
public void run() {
try {
logger.trace("**** starting blob upload thread {}", indexerId);
while (!stop.get()) {
long id = idGenerator.incrementAndGet();
String digest = uploadFile(internalCluster().client(node1), genFile(id));
uploadedDigests.add(digest);
indexCounter.incrementAndGet();
}
logger.trace("**** done indexing thread {}", indexerId);
} catch (Exception e) {
logger.warn("**** failed indexing thread {}", e, indexerId);
} finally {
stopLatch.countDown();
}
}
};
writers[i].setName("blob-uploader-thread");
// dispatch threads from parent, ignoring possible leaking threads
writers[i].setDaemon(true);
writers[i].start();
}
logger.trace("--> waiting for 2 blobs to be uploaded ...");
while (uploadedDigests.size() < 2) {
Thread.sleep(10);
}
logger.trace("--> 2 blobs uploaded");
// increase time between chunks in order to make sure that the upload is taking place while relocating
timeBetweenChunks.set(10);
logger.trace("--> starting relocations...");
for (int i = 0; i < numberOfRelocations; i++) {
String fromNode = (i % 2 == 0) ? node1 : node2;
String toNode = node1.equals(fromNode) ? node2 : node1;
logger.trace("--> START relocate the shard from {} to {}", fromNode, toNode);
internalCluster().client(node1).admin().cluster().prepareReroute().add(new MoveAllocationCommand(new ShardId(BlobIndex.fullIndexName("test"), 0), fromNode, toNode)).execute().actionGet();
ClusterHealthResponse clusterHealthResponse = internalCluster().client(node1).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
clusterHealthResponse = internalCluster().client(node2).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
logger.trace("--> DONE relocate the shard from {} to {}", fromNode, toNode);
}
logger.trace("--> done relocations");
logger.trace("--> marking and waiting for upload threads to stop ...");
timeBetweenChunks.set(0);
stop.set(true);
assertThat(stopLatch.await(60, TimeUnit.SECONDS), is(true));
logger.trace("--> uploading threads stopped");
logger.trace("--> expected {} got {}", indexCounter.get(), uploadedDigests.size());
assertEquals(indexCounter.get(), uploadedDigests.size());
BlobIndicesService blobIndicesService = internalCluster().getInstance(BlobIndicesService.class, node2);
for (String digest : uploadedDigests) {
BlobShard blobShard = blobIndicesService.localBlobShard(BlobIndex.fullIndexName("test"), digest);
long length = blobShard.blobContainer().getFile(digest).length();
assertThat(length, greaterThanOrEqualTo(1L));
}
for (Thread writer : writers) {
writer.join(6000);
}
}
use of org.elasticsearch.common.settings.Settings in project crate by crate.
the class LuceneQueryBuilderTest method prepare.
@Before
public void prepare() throws Exception {
DocTableInfo users = TestingTableInfo.builder(new TableIdent(null, "users"), null).add("name", DataTypes.STRING).add("x", DataTypes.INTEGER).add("d", DataTypes.DOUBLE).add("d_array", new ArrayType(DataTypes.DOUBLE)).add("y_array", new ArrayType(DataTypes.LONG)).add("shape", DataTypes.GEO_SHAPE).add("point", DataTypes.GEO_POINT).build();
TableRelation usersTr = new TableRelation(users);
sources = ImmutableMap.of(new QualifiedName("users"), usersTr);
expressions = new SqlExpressions(sources, usersTr);
builder = new LuceneQueryBuilder(expressions.getInstance(Functions.class));
indexCache = mock(IndexCache.class, Answers.RETURNS_MOCKS.get());
Path tempDir = createTempDir();
Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).put("path.home", tempDir).build();
Index index = new Index(users.ident().indexName());
when(indexCache.indexSettings()).thenReturn(indexSettings);
AnalysisService analysisService = createAnalysisService(indexSettings, index);
mapperService = createMapperService(index, indexSettings, analysisService);
// @formatter:off
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("default").startObject("properties").startObject("name").field("type", "string").endObject().startObject("x").field("type", "integer").endObject().startObject("d").field("type", "double").endObject().startObject("point").field("type", "geo_point").endObject().startObject("shape").field("type", "geo_shape").endObject().startObject("d_array").field("type", "array").startObject("inner").field("type", "double").endObject().endObject().startObject("y_array").field("type", "array").startObject("inner").field("type", "integer").endObject().endObject().endObject().endObject().endObject();
// @formatter:on
mapperService.merge("default", new CompressedXContent(xContentBuilder.bytes()), MapperService.MergeReason.MAPPING_UPDATE, true);
indexFieldDataService = mock(IndexFieldDataService.class);
IndexFieldData geoFieldData = mock(IndexGeoPointFieldData.class);
when(geoFieldData.getFieldNames()).thenReturn(new MappedFieldType.Names("point"));
when(indexFieldDataService.getForField(mapperService.smartNameFieldType("point"))).thenReturn(geoFieldData);
}
Aggregations