use of java.util.function.Function in project elasticsearch by elastic.
the class RecoverySourceHandler method phase1.
/**
* Perform phase1 of the recovery operations. Once this {@link IndexCommit}
* snapshot has been performed no commit operations (files being fsync'd)
* are effectively allowed on this index until all recovery phases are done
* <p>
* Phase1 examines the segment files on the target node and copies over the
* segments that are missing. Only segments that have the same size and
* checksum can be reused
*/
public void phase1(final IndexCommit snapshot, final Translog.View translogView) {
cancellableThreads.checkForCancel();
// Total size of segment files that are recovered
long totalSize = 0;
// Total size of segment files that were able to be re-used
long existingTotalSize = 0;
final Store store = shard.store();
store.incRef();
try {
StopWatch stopWatch = new StopWatch().start();
final Store.MetadataSnapshot recoverySourceMetadata;
try {
recoverySourceMetadata = store.getMetadata(snapshot);
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
shard.failShard("recovery", ex);
throw ex;
}
for (String name : snapshot.getFileNames()) {
final StoreFileMetaData md = recoverySourceMetadata.get(name);
if (md == null) {
logger.info("Snapshot differs from actual index for file: {} meta: {}", name, recoverySourceMetadata.asMap());
throw new CorruptIndexException("Snapshot differs from actual index - maybe index was removed metadata has " + recoverySourceMetadata.asMap().size() + " files", name);
}
}
// Generate a "diff" of all the identical, different, and missing
// segment files on the target node, using the existing files on
// the source node
String recoverySourceSyncId = recoverySourceMetadata.getSyncId();
String recoveryTargetSyncId = request.metadataSnapshot().getSyncId();
final boolean recoverWithSyncId = recoverySourceSyncId != null && recoverySourceSyncId.equals(recoveryTargetSyncId);
if (recoverWithSyncId) {
final long numDocsTarget = request.metadataSnapshot().getNumDocs();
final long numDocsSource = recoverySourceMetadata.getNumDocs();
if (numDocsTarget != numDocsSource) {
throw new IllegalStateException("try to recover " + request.shardId() + " from primary shard with sync id but number " + "of docs differ: " + numDocsSource + " (" + request.sourceNode().getName() + ", primary) vs " + numDocsTarget + "(" + request.targetNode().getName() + ")");
}
// we shortcut recovery here because we have nothing to copy. but we must still start the engine on the target.
// so we don't return here
logger.trace("skipping [phase1]- identical sync id [{}] found on both source and target", recoverySourceSyncId);
} else {
final Store.RecoveryDiff diff = recoverySourceMetadata.recoveryDiff(request.metadataSnapshot());
for (StoreFileMetaData md : diff.identical) {
response.phase1ExistingFileNames.add(md.name());
response.phase1ExistingFileSizes.add(md.length());
existingTotalSize += md.length();
if (logger.isTraceEnabled()) {
logger.trace("recovery [phase1]: not recovering [{}], exist in local store and has checksum [{}]," + " size [{}]", md.name(), md.checksum(), md.length());
}
totalSize += md.length();
}
List<StoreFileMetaData> phase1Files = new ArrayList<>(diff.different.size() + diff.missing.size());
phase1Files.addAll(diff.different);
phase1Files.addAll(diff.missing);
for (StoreFileMetaData md : phase1Files) {
if (request.metadataSnapshot().asMap().containsKey(md.name())) {
logger.trace("recovery [phase1]: recovering [{}], exists in local store, but is different: remote [{}], local [{}]", md.name(), request.metadataSnapshot().asMap().get(md.name()), md);
} else {
logger.trace("recovery [phase1]: recovering [{}], does not exist in remote", md.name());
}
response.phase1FileNames.add(md.name());
response.phase1FileSizes.add(md.length());
totalSize += md.length();
}
response.phase1TotalSize = totalSize;
response.phase1ExistingTotalSize = existingTotalSize;
logger.trace("recovery [phase1]: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]", response.phase1FileNames.size(), new ByteSizeValue(totalSize), response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize));
cancellableThreads.execute(() -> recoveryTarget.receiveFileInfo(response.phase1FileNames, response.phase1FileSizes, response.phase1ExistingFileNames, response.phase1ExistingFileSizes, translogView.totalOperations()));
// How many bytes we've copied since we last called RateLimiter.pause
final Function<StoreFileMetaData, OutputStream> outputStreamFactories = md -> new BufferedOutputStream(new RecoveryOutputStream(md, translogView), chunkSizeInBytes);
sendFiles(store, phase1Files.toArray(new StoreFileMetaData[phase1Files.size()]), outputStreamFactories);
// are deleted
try {
cancellableThreads.executeIO(() -> recoveryTarget.cleanFiles(translogView.totalOperations(), recoverySourceMetadata));
} catch (RemoteTransportException | IOException targetException) {
final IOException corruptIndexException;
// - maybe due to old segments without checksums or length only checks
if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(targetException)) != null) {
try {
final Store.MetadataSnapshot recoverySourceMetadata1 = store.getMetadata(snapshot);
StoreFileMetaData[] metadata = StreamSupport.stream(recoverySourceMetadata1.spliterator(), false).toArray(size -> new StoreFileMetaData[size]);
ArrayUtil.timSort(metadata, (o1, o2) -> {
// check small files first
return Long.compare(o1.length(), o2.length());
});
for (StoreFileMetaData md : metadata) {
cancellableThreads.checkForCancel();
logger.debug("checking integrity for file {} after remove corruption exception", md);
if (store.checkIntegrityNoException(md) == false) {
// we are corrupted on the primary -- fail!
shard.failShard("recovery", corruptIndexException);
logger.warn("Corrupted file detected {} checksum mismatch", md);
throw corruptIndexException;
}
}
} catch (IOException ex) {
targetException.addSuppressed(ex);
throw targetException;
}
// corruption has happened on the way to replica
RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " + "checksums are ok", null);
exception.addSuppressed(targetException);
logger.warn((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("{} Remote file corruption during finalization of recovery on node {}. local checksum OK", shard.shardId(), request.targetNode()), corruptIndexException);
throw exception;
} else {
throw targetException;
}
}
}
logger.trace("recovery [phase1]: took [{}]", stopWatch.totalTime());
response.phase1Time = stopWatch.totalTime().millis();
} catch (Exception e) {
throw new RecoverFilesRecoveryException(request.shardId(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), e);
} finally {
store.decRef();
}
}
use of java.util.function.Function in project elasticsearch by elastic.
the class ShardSearchRequest method parseAliasFilter.
/**
* Returns the filter associated with listed filtering aliases.
* <p>
* The list of filtering aliases should be obtained by calling MetaData.filteringAliases.
* Returns <tt>null</tt> if no filtering is required.</p>
*/
static QueryBuilder parseAliasFilter(CheckedFunction<byte[], QueryBuilder, IOException> filterParser, IndexMetaData metaData, String... aliasNames) {
if (aliasNames == null || aliasNames.length == 0) {
return null;
}
Index index = metaData.getIndex();
ImmutableOpenMap<String, AliasMetaData> aliases = metaData.getAliases();
Function<AliasMetaData, QueryBuilder> parserFunction = (alias) -> {
if (alias.filter() == null) {
return null;
}
try {
return filterParser.apply(alias.filter().uncompressed());
} catch (IOException ex) {
throw new AliasFilterParsingException(index, alias.getAlias(), "Invalid alias filter", ex);
}
};
if (aliasNames.length == 1) {
AliasMetaData alias = aliases.get(aliasNames[0]);
if (alias == null) {
// This shouldn't happen unless alias disappeared after filteringAliases was called.
throw new InvalidAliasNameException(index, aliasNames[0], "Unknown alias name was passed to alias Filter");
}
return parserFunction.apply(alias);
} else {
// we need to bench here a bit, to see maybe it makes sense to use OrFilter
BoolQueryBuilder combined = new BoolQueryBuilder();
for (String aliasName : aliasNames) {
AliasMetaData alias = aliases.get(aliasName);
if (alias == null) {
// This shouldn't happen unless alias disappeared after filteringAliases was called.
throw new InvalidAliasNameException(index, aliasNames[0], "Unknown alias name was passed to alias Filter");
}
QueryBuilder parsedFilter = parserFunction.apply(alias);
if (parsedFilter != null) {
combined.should(parsedFilter);
} else {
// The filter might be null only if filter was removed after filteringAliases was called
return null;
}
}
return combined;
}
}
use of java.util.function.Function in project elasticsearch by elastic.
the class QueryShardContext method getLazySearchScript.
/**
* Returns a lazily created {@link SearchScript} that is compiled immediately but can be pulled later once all
* parameters are available.
*/
public final Function<Map<String, Object>, SearchScript> getLazySearchScript(Script script, ScriptContext context) {
failIfFrozen();
CompiledScript compile = scriptService.compile(script, context);
return (p) -> scriptService.search(lookup(), compile, p);
}
use of java.util.function.Function in project vert.x by eclipse.
the class MetricsContextTest method testHttpClientWebsocket.
private void testHttpClientWebsocket(Function<Vertx, Context> contextFactory, BiConsumer<Thread, Context> checker) throws Exception {
AtomicReference<Thread> expectedThread = new AtomicReference<>();
AtomicReference<Context> expectedContext = new AtomicReference<>();
AtomicBoolean websocketConnected = new AtomicBoolean();
AtomicBoolean websocketDisconnected = new AtomicBoolean();
AtomicBoolean socketConnectedCalled = new AtomicBoolean();
AtomicBoolean socketDisconnectedCalled = new AtomicBoolean();
AtomicBoolean bytesReadCalled = new AtomicBoolean();
AtomicBoolean bytesWrittenCalled = new AtomicBoolean();
AtomicBoolean closeCalled = new AtomicBoolean();
VertxMetricsFactory factory = (vertx, options) -> new DummyVertxMetrics() {
@Override
public HttpClientMetrics createMetrics(HttpClient client, HttpClientOptions options) {
return new DummyHttpClientMetrics() {
@Override
public Void connected(Void endpointMetric, Void socketMetric, WebSocket webSocket) {
websocketConnected.set(true);
checker.accept(expectedThread.get(), expectedContext.get());
return null;
}
@Override
public void disconnected(Void webSocketMetric) {
websocketDisconnected.set(true);
}
@Override
public Void connected(SocketAddress remoteAddress, String remoteName) {
socketConnectedCalled.set(true);
checker.accept(expectedThread.get(), expectedContext.get());
return null;
}
@Override
public void disconnected(Void socketMetric, SocketAddress remoteAddress) {
socketDisconnectedCalled.set(true);
checker.accept(expectedThread.get(), expectedContext.get());
}
@Override
public void bytesRead(Void socketMetric, SocketAddress remoteAddress, long numberOfBytes) {
bytesReadCalled.set(true);
checker.accept(expectedThread.get(), expectedContext.get());
}
@Override
public void bytesWritten(Void socketMetric, SocketAddress remoteAddress, long numberOfBytes) {
bytesWrittenCalled.set(true);
checker.accept(expectedThread.get(), expectedContext.get());
}
@Override
public void close() {
closeCalled.set(true);
}
@Override
public boolean isEnabled() {
return true;
}
};
}
};
Vertx vertx = vertx(new VertxOptions().setMetricsOptions(new MetricsOptions().setEnabled(true).setFactory(factory)));
HttpServer server = vertx.createHttpServer();
server.websocketHandler(ws -> {
ws.handler(buf -> {
ws.write(Buffer.buffer("bye"));
});
});
CountDownLatch latch = new CountDownLatch(1);
server.listen(8080, "localhost", onSuccess(s -> {
latch.countDown();
}));
awaitLatch(latch);
Context ctx = contextFactory.apply(vertx);
ctx.runOnContext(v1 -> {
expectedThread.set(Thread.currentThread());
expectedContext.set(Vertx.currentContext());
HttpClient client = vertx.createHttpClient();
checker.accept(expectedThread.get(), expectedContext.get());
client.websocket(8080, "localhost", "/", ws -> {
ws.handler(buf -> {
ws.closeHandler(v2 -> {
executeInVanillaThread(() -> {
client.close();
vertx.close(v3 -> {
assertTrue(websocketConnected.get());
assertTrue(websocketDisconnected.get());
assertTrue(socketConnectedCalled.get());
assertTrue(socketDisconnectedCalled.get());
assertTrue(bytesReadCalled.get());
assertTrue(bytesWrittenCalled.get());
assertTrue(closeCalled.get());
testComplete();
});
});
});
ws.close();
});
ws.write(Buffer.buffer("hello"));
});
});
await();
}
use of java.util.function.Function in project vert.x by eclipse.
the class MetricsContextTest method testHttpServerWebsocket.
private void testHttpServerWebsocket(Function<Vertx, Context> contextFactory, BiConsumer<Thread, Context> checker) throws Exception {
AtomicReference<Thread> expectedThread = new AtomicReference<>();
AtomicReference<Context> expectedContext = new AtomicReference<>();
AtomicBoolean websocketConnected = new AtomicBoolean();
AtomicBoolean websocketDisconnected = new AtomicBoolean();
AtomicBoolean socketConnectedCalled = new AtomicBoolean();
AtomicBoolean socketDisconnectedCalled = new AtomicBoolean();
AtomicBoolean bytesReadCalled = new AtomicBoolean();
AtomicBoolean bytesWrittenCalled = new AtomicBoolean();
AtomicBoolean closeCalled = new AtomicBoolean();
VertxMetricsFactory factory = (vertx, options) -> new DummyVertxMetrics() {
@Override
public HttpServerMetrics createMetrics(HttpServer server, SocketAddress localAddress, HttpServerOptions options) {
return new DummyHttpServerMetrics() {
@Override
public Void connected(Void socketMetric, ServerWebSocket serverWebSocket) {
websocketConnected.set(true);
checker.accept(expectedThread.get(), expectedContext.get());
return null;
}
@Override
public void disconnected(Void serverWebSocketMetric) {
websocketDisconnected.set(true);
checker.accept(expectedThread.get(), expectedContext.get());
}
@Override
public Void connected(SocketAddress remoteAddress, String remoteName) {
socketConnectedCalled.set(true);
checker.accept(expectedThread.get(), expectedContext.get());
return null;
}
@Override
public void disconnected(Void socketMetric, SocketAddress remoteAddress) {
socketDisconnectedCalled.set(true);
checker.accept(expectedThread.get(), expectedContext.get());
}
@Override
public void bytesRead(Void socketMetric, SocketAddress remoteAddress, long numberOfBytes) {
bytesReadCalled.set(true);
checker.accept(expectedThread.get(), expectedContext.get());
}
@Override
public void bytesWritten(Void socketMetric, SocketAddress remoteAddress, long numberOfBytes) {
bytesWrittenCalled.set(true);
checker.accept(expectedThread.get(), expectedContext.get());
}
@Override
public boolean isEnabled() {
return true;
}
@Override
public void close() {
closeCalled.set(true);
}
};
}
};
CountDownLatch latch = new CountDownLatch(1);
Vertx vertx = vertx(new VertxOptions().setMetricsOptions(new MetricsOptions().setEnabled(true).setFactory(factory)));
Context ctx = contextFactory.apply(vertx);
ctx.runOnContext(v1 -> {
HttpServer server = vertx.createHttpServer().websocketHandler(ws -> {
ws.handler(buf -> {
ws.write(Buffer.buffer("bye"));
});
});
server.listen(8080, "localhost", onSuccess(s -> {
expectedThread.set(Thread.currentThread());
expectedContext.set(Vertx.currentContext());
latch.countDown();
}));
});
awaitLatch(latch);
HttpClient client = vertx.createHttpClient();
client.websocket(8080, "localhost", "/", ws -> {
ws.handler(buf -> {
ws.closeHandler(v -> {
vertx.close(v4 -> {
assertTrue(websocketConnected.get());
assertTrue(websocketDisconnected.get());
assertTrue(bytesReadCalled.get());
assertTrue(bytesWrittenCalled.get());
assertTrue(socketConnectedCalled.get());
assertTrue(socketDisconnectedCalled.get());
assertTrue(closeCalled.get());
testComplete();
});
});
ws.close();
});
ws.write(Buffer.buffer("hello"));
});
await();
}
Aggregations