use of org.elasticsearch.common.Nullable in project elasticsearch by elastic.
the class CorruptedFileIT method testCorruptFileAndRecover.
/**
* Tests that we can actually recover from a corruption on the primary given that we have replica shards around.
*/
public void testCorruptFileAndRecover() throws ExecutionException, InterruptedException, IOException {
int numDocs = scaledRandomIntBetween(100, 1000);
// have enough space for 3 copies
internalCluster().ensureAtLeastNumDataNodes(3);
if (cluster().numDataNodes() == 3) {
logger.info("--> cluster has [3] data nodes, corrupted primary will be overwritten");
}
assertThat(cluster().numDataNodes(), greaterThanOrEqualTo(3));
assertAcked(prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1").put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1").put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), // no checkindex - we corrupt shards on purpose
false).put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), // no translog based flush - it might change the .liv / segments.N files
new ByteSizeValue(1, ByteSizeUnit.PB))));
ensureGreen();
disableAllocation("test");
IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
for (int i = 0; i < builders.length; i++) {
builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
}
indexRandom(true, builders);
ensureGreen();
assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).execute().actionGet());
// we have to flush at least once here since we don't corrupt the translog
SearchResponse countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
final int numShards = numShards("test");
ShardRouting corruptedShardRouting = corruptRandomPrimaryFile();
logger.info("--> {} corrupted", corruptedShardRouting);
enableAllocation("test");
/*
* we corrupted the primary shard - now lets make sure we never recover from it successfully
*/
Settings build = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "2").build();
client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
ClusterHealthResponse health = client().admin().cluster().health(Requests.clusterHealthRequest("test").waitForGreenStatus().timeout(// sometimes due to cluster rebalacing and random settings default timeout is just not enough.
"5m").waitForNoRelocatingShards(true)).actionGet();
if (health.isTimedOut()) {
logger.info("cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState(), client().admin().cluster().preparePendingClusterTasks().get());
assertThat("timed out waiting for green state", health.isTimedOut(), equalTo(false));
}
assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN));
final int numIterations = scaledRandomIntBetween(5, 20);
for (int i = 0; i < numIterations; i++) {
SearchResponse response = client().prepareSearch().setSize(numDocs).get();
assertHitCount(response, numDocs);
}
/*
* now hook into the IndicesService and register a close listener to
* run the checkindex. if the corruption is still there we will catch it.
*/
// primary + 2 replicas
final CountDownLatch latch = new CountDownLatch(numShards * 3);
final CopyOnWriteArrayList<Exception> exception = new CopyOnWriteArrayList<>();
final IndexEventListener listener = new IndexEventListener() {
@Override
public void afterIndexShardClosed(ShardId sid, @Nullable IndexShard indexShard, Settings indexSettings) {
if (indexShard != null) {
Store store = indexShard.store();
store.incRef();
try {
if (!Lucene.indexExists(store.directory()) && indexShard.state() == IndexShardState.STARTED) {
return;
}
try (CheckIndex checkIndex = new CheckIndex(store.directory())) {
BytesStreamOutput os = new BytesStreamOutput();
PrintStream out = new PrintStream(os, false, StandardCharsets.UTF_8.name());
checkIndex.setInfoStream(out);
out.flush();
CheckIndex.Status status = checkIndex.checkIndex();
if (!status.clean) {
logger.warn("check index [failure]\n{}", os.bytes().utf8ToString());
throw new IOException("index check failure");
}
}
} catch (Exception e) {
exception.add(e);
} finally {
store.decRef();
latch.countDown();
}
}
}
};
for (MockIndexEventListener.TestEventListener eventListener : internalCluster().getDataNodeInstances(MockIndexEventListener.TestEventListener.class)) {
eventListener.setNewDelegate(listener);
}
try {
client().admin().indices().prepareDelete("test").get();
latch.await();
assertThat(exception, empty());
} finally {
for (MockIndexEventListener.TestEventListener eventListener : internalCluster().getDataNodeInstances(MockIndexEventListener.TestEventListener.class)) {
eventListener.setNewDelegate(null);
}
}
}
use of org.elasticsearch.common.Nullable in project crate by crate.
the class PluginLoader method findImplementations.
@Nullable
private Collection<Class<? extends Plugin>> findImplementations(Collection<URL> pluginUrls) {
URL[] urls = pluginUrls.toArray(new URL[pluginUrls.size()]);
ClassLoader loader = URLClassLoader.newInstance(urls, getClass().getClassLoader());
ResourceFinder finder = new ResourceFinder(RESOURCE_PATH, loader, urls);
try {
return finder.findAllImplementations(Plugin.class);
} catch (ClassCastException e) {
logger.error("plugin does not implement io.crate.Plugin interface", e);
} catch (ClassNotFoundException e) {
logger.error("error while loading plugin, misconfigured plugin", e);
} catch (Throwable t) {
logger.error("error while loading plugins", t);
}
return null;
}
use of org.elasticsearch.common.Nullable in project crate by crate.
the class PartitionName method encodeIdent.
@Nullable
public static String encodeIdent(Collection<? extends BytesRef> values) {
if (values.size() == 0) {
return null;
}
BytesStreamOutput streamOutput = new BytesStreamOutput(estimateSize(values));
try {
streamOutput.writeVInt(values.size());
for (BytesRef value : values) {
StringType.INSTANCE.streamer().writeValueTo(streamOutput, value);
}
} catch (IOException e) {
throw Throwables.propagate(e);
}
String identBase32 = BASE32.encodeAsString(streamOutput.bytes().toBytes()).toLowerCase(Locale.ROOT);
// decode doesn't need padding, remove it
int idx = identBase32.indexOf('=');
if (idx > -1) {
return identBase32.substring(0, idx);
}
return identBase32;
}
use of org.elasticsearch.common.Nullable in project crate by crate.
the class NodeStatsCollectSource method nodeIds.
@Nullable
static Collection<DiscoveryNode> nodeIds(WhereClause whereClause, Collection<DiscoveryNode> nodes, Functions functions) {
if (!whereClause.hasQuery()) {
return nodes;
}
LocalSysColReferenceResolver localSysColReferenceResolver = new LocalSysColReferenceResolver(ImmutableList.of(SysNodesTableInfo.Columns.NAME, SysNodesTableInfo.Columns.ID));
EvaluatingNormalizer normalizer = new EvaluatingNormalizer(functions, RowGranularity.DOC, ReplaceMode.COPY, localSysColReferenceResolver, null);
List<DiscoveryNode> newNodes = new ArrayList<>();
for (DiscoveryNode node : nodes) {
String nodeId = node.getId();
for (RowCollectExpression<NodeStatsContext, ?> expression : localSysColReferenceResolver.expressions()) {
expression.setNextRow(new NodeStatsContext(nodeId, node.name()));
}
Symbol normalized = normalizer.normalize(whereClause.query(), null);
if (normalized.equals(whereClause.query())) {
// No local available sys nodes columns in where clause
return nodes;
}
if (WhereClause.canMatch(normalized)) {
newNodes.add(node);
}
}
return newNodes;
}
use of org.elasticsearch.common.Nullable in project elasticsearch-skywalker by jprante.
the class Skywalker method loadIndex.
@Nullable
private static IndexMetaData loadIndex(List<File> files, String index, NodeEnvironment nodeEnv) {
long highestVersion = -1;
IndexMetaData indexMetaData = null;
for (File indexLocation : nodeEnv.indexLocations(new Index(index))) {
File stateDir = new File(indexLocation, "_state");
if (!stateDir.exists() || !stateDir.isDirectory()) {
continue;
}
// now, iterate over the current versions, and find latest one
File[] stateFiles = stateDir.listFiles();
if (stateFiles == null) {
continue;
}
for (File stateFile : stateFiles) {
if (!stateFile.getName().startsWith("state-")) {
continue;
}
files.add(stateFile);
try {
long version = Long.parseLong(stateFile.getName().substring("state-".length()));
if (version > highestVersion) {
byte[] data = Streams.copyToByteArray(new FileInputStream(stateFile));
if (data.length == 0) {
continue;
}
XContentParser parser = null;
try {
parser = XContentHelper.createParser(data, 0, data.length);
// move to START_OBJECT
parser.nextToken();
indexMetaData = IndexMetaData.Builder.fromXContent(parser);
highestVersion = version;
} finally {
if (parser != null) {
parser.close();
}
}
}
} catch (Exception e) {
continue;
}
}
}
return indexMetaData;
}
Aggregations