use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class IndicesService method processPendingDeletes.
/**
* Processes all pending deletes for the given index. This method will acquire all locks for the given index and will
* process all pending deletes for this index. Pending deletes might occur if the OS doesn't allow deletion of files because
* they are used by a different process ie. on Windows where files might still be open by a virus scanner. On a shared
* filesystem a replica might not have been closed when the primary is deleted causing problems on delete calls so we
* schedule there deletes later.
* @param index the index to process the pending deletes for
* @param timeout the timeout used for processing pending deletes
*/
@Override
public void processPendingDeletes(Index index, IndexSettings indexSettings, TimeValue timeout) throws IOException, InterruptedException, ShardLockObtainFailedException {
logger.debug("{} processing pending deletes", index);
final long startTimeNS = System.nanoTime();
final List<ShardLock> shardLocks = nodeEnv.lockAllForIndex(index, indexSettings, timeout.millis());
int numRemoved = 0;
try {
Map<ShardId, ShardLock> locks = new HashMap<>();
for (ShardLock lock : shardLocks) {
locks.put(lock.getShardId(), lock);
}
final List<PendingDelete> remove;
synchronized (pendingDeletes) {
remove = pendingDeletes.remove(index);
}
if (remove != null && remove.isEmpty() == false) {
numRemoved = remove.size();
// make sure we delete indices first
CollectionUtil.timSort(remove);
// ensure we retry after 10 sec
final long maxSleepTimeMs = 10 * 1000;
long sleepTime = 10;
do {
if (remove.isEmpty()) {
break;
}
Iterator<PendingDelete> iterator = remove.iterator();
while (iterator.hasNext()) {
PendingDelete delete = iterator.next();
if (delete.deleteIndex) {
assert delete.shardId == -1;
logger.debug("{} deleting index store reason [{}]", index, "pending delete");
try {
nodeEnv.deleteIndexDirectoryUnderLock(index, indexSettings);
iterator.remove();
} catch (IOException ex) {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} retry pending delete", index), ex);
}
} else {
assert delete.shardId != -1;
ShardLock shardLock = locks.get(new ShardId(delete.index, delete.shardId));
if (shardLock != null) {
try {
deleteShardStore("pending delete", shardLock, delete.settings);
iterator.remove();
} catch (IOException ex) {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} retry pending delete", shardLock.getShardId()), ex);
}
} else {
logger.warn("{} no shard lock for pending delete", delete.shardId);
iterator.remove();
}
}
}
if (remove.isEmpty() == false) {
logger.warn("{} still pending deletes present for shards {} - retrying", index, remove.toString());
Thread.sleep(sleepTime);
// increase the sleep time gradually
sleepTime = Math.min(maxSleepTimeMs, sleepTime * 2);
logger.debug("{} schedule pending delete retry after {} ms", index, sleepTime);
}
} while ((System.nanoTime() - startTimeNS) < timeout.nanos());
}
} finally {
IOUtils.close(shardLocks);
if (numRemoved > 0) {
int remainingUncompletedDeletes = numUncompletedDeletes.addAndGet(-numRemoved);
assert remainingUncompletedDeletes >= 0;
}
}
}
use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class Translog method recoverFromFiles.
/** recover all translog files found on disk */
private ArrayList<TranslogReader> recoverFromFiles(TranslogGeneration translogGeneration, Checkpoint checkpoint) throws IOException {
boolean success = false;
ArrayList<TranslogReader> foundTranslogs = new ArrayList<>();
// a temp file to copy checkpoint to - note it must be in on the same FS otherwise atomic move won't work
final Path tempFile = Files.createTempFile(location, TRANSLOG_FILE_PREFIX, TRANSLOG_FILE_SUFFIX);
boolean tempFileRenamed = false;
try (ReleasableLock lock = writeLock.acquire()) {
logger.debug("open uncommitted translog checkpoint {}", checkpoint);
final String checkpointTranslogFile = getFilename(checkpoint.generation);
for (long i = translogGeneration.translogFileGeneration; i < checkpoint.generation; i++) {
Path committedTranslogFile = location.resolve(getFilename(i));
if (Files.exists(committedTranslogFile) == false) {
throw new IllegalStateException("translog file doesn't exist with generation: " + i + " lastCommitted: " + lastCommittedTranslogFileGeneration + " checkpoint: " + checkpoint.generation + " - translog ids must be consecutive");
}
final TranslogReader reader = openReader(committedTranslogFile, Checkpoint.read(location.resolve(getCommitCheckpointFileName(i))));
foundTranslogs.add(reader);
logger.debug("recovered local translog from checkpoint {}", checkpoint);
}
foundTranslogs.add(openReader(location.resolve(checkpointTranslogFile), checkpoint));
Path commitCheckpoint = location.resolve(getCommitCheckpointFileName(checkpoint.generation));
if (Files.exists(commitCheckpoint)) {
Checkpoint checkpointFromDisk = Checkpoint.read(commitCheckpoint);
if (checkpoint.equals(checkpointFromDisk) == false) {
throw new IllegalStateException("Checkpoint file " + commitCheckpoint.getFileName() + " already exists but has corrupted content expected: " + checkpoint + " but got: " + checkpointFromDisk);
}
} else {
// we first copy this into the temp-file and then fsync it followed by an atomic move into the target file
// that way if we hit a disk-full here we are still in an consistent state.
Files.copy(location.resolve(CHECKPOINT_FILE_NAME), tempFile, StandardCopyOption.REPLACE_EXISTING);
IOUtils.fsync(tempFile, false);
Files.move(tempFile, commitCheckpoint, StandardCopyOption.ATOMIC_MOVE);
tempFileRenamed = true;
// we only fsync the directory the tempFile was already fsynced
IOUtils.fsync(commitCheckpoint.getParent(), true);
}
success = true;
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(foundTranslogs);
}
if (tempFileRenamed == false) {
try {
Files.delete(tempFile);
} catch (IOException ex) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to delete temp file {}", tempFile), ex);
}
}
}
return foundTranslogs;
}
use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class ClusterServiceIT method testAckedUpdateTaskSameClusterState.
public void testAckedUpdateTaskSameClusterState() throws Exception {
internalCluster().startNode();
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
final AtomicBoolean ackTimeout = new AtomicBoolean(false);
final AtomicBoolean onFailure = new AtomicBoolean(false);
final AtomicBoolean executed = new AtomicBoolean(false);
final CountDownLatch latch = new CountDownLatch(1);
final CountDownLatch processedLatch = new CountDownLatch(1);
clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask<Void>(null, null) {
@Override
protected Void newResponse(boolean acknowledged) {
return null;
}
@Override
public void onAllNodesAcked(@Nullable Exception e) {
allNodesAcked.set(true);
latch.countDown();
}
@Override
public void onAckTimeout() {
ackTimeout.set(true);
latch.countDown();
}
@Override
public TimeValue ackTimeout() {
return TimeValue.timeValueSeconds(10);
}
@Override
public TimeValue timeout() {
return TimeValue.timeValueSeconds(10);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
processedLatch.countDown();
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
executed.set(true);
return currentState;
}
@Override
public void onFailure(String source, Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e);
onFailure.set(true);
latch.countDown();
}
});
ensureGreen();
assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true));
assertThat(allNodesAcked.get(), equalTo(true));
assertThat(ackTimeout.get(), equalTo(false));
assertThat(executed.get(), equalTo(true));
assertThat(onFailure.get(), equalTo(false));
assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true));
}
use of org.apache.logging.log4j.message.ParameterizedMessage in project geode by apache.
the class LogWriterAppenderJUnitTest method testLogWriterLevels.
/**
* Verifies that logging occurs at the levels set in the LogWriter
*/
@Test
public void testLogWriterLevels() throws IOException {
// this.getClass().getName();
final String loggerName = LogService.MAIN_LOGGER_NAME;
// Force logging to be initialized
LogService.getLogger();
// Create the LogWriterLogger that will be attached to the appender
final LogWriterLogger logWriterLogger = LogWriterLogger.create(loggerName, false);
logWriterLogger.setLevel(Level.INFO);
// Create the appender
final StringWriter stringWriter = new StringWriter();
final PureLogWriter logWriter = new PureLogWriter(InternalLogWriter.FINEST_LEVEL, new PrintWriter(stringWriter), "");
final AppenderContext[] contexts = new AppenderContext[2];
// root context
contexts[0] = LogService.getAppenderContext();
// "org.apache"
contexts[1] = LogService.getAppenderContext(LogService.BASE_LOGGER_NAME);
// context
this.appender = LogWriterAppender.create(contexts, loggerName, logWriter, null);
logWriter.finest("DIRECT MESSAGE");
assertTrue(Pattern.compile(".*\\[finest .*DIRECT MESSAGE.*", Pattern.DOTALL).matcher(stringWriter.toString()).matches());
stringWriter.getBuffer().setLength(0);
LogEvent event = Log4jLogEvent.newBuilder().setLevel(Level.INFO).setLoggerFqcn("NAME").setLoggerName("NAME").setMessage(new ParameterizedMessage("LOGEVENT MESSAGE")).build();
this.appender.append(event);
assertTrue(Pattern.compile(".*\\[info .*LOGEVENT MESSAGE.*", Pattern.DOTALL).matcher(stringWriter.toString()).matches());
stringWriter.getBuffer().setLength(0);
logWriterLogger.finest("FINEST MESSAGE");
assertFalse(Pattern.compile(".*\\[finest .*FINEST MESSAGE.*", Pattern.DOTALL).matcher(stringWriter.toString()).matches());
stringWriter.getBuffer().setLength(0);
logWriterLogger.fine("FINE MESSAGE");
assertFalse(Pattern.compile(".*\\[fine .*FINE MESSAGE.*", Pattern.DOTALL).matcher(stringWriter.toString()).matches());
stringWriter.getBuffer().setLength(0);
logWriterLogger.info("INFO MESSAGE");
assertTrue(stringWriter.toString(), Pattern.compile(".*\\[info .*INFO MESSAGE.*", Pattern.DOTALL).matcher(stringWriter.toString()).matches());
stringWriter.getBuffer().setLength(0);
// Change the level
logWriterLogger.setLevel(Level.DEBUG);
logWriterLogger.finest("FINEST MESSAGE");
assertFalse(Pattern.compile(".*\\[finest .*FINEST MESSAGE.*", Pattern.DOTALL).matcher(stringWriter.toString()).matches());
stringWriter.getBuffer().setLength(0);
logWriterLogger.fine("FINE MESSAGE");
assertTrue(Pattern.compile(".*\\[fine .*FINE MESSAGE.*", Pattern.DOTALL).matcher(stringWriter.toString()).matches());
stringWriter.getBuffer().setLength(0);
logWriterLogger.info("INFO MESSAGE");
assertTrue(Pattern.compile(".*\\[info .*INFO MESSAGE.*", Pattern.DOTALL).matcher(stringWriter.toString()).matches());
stringWriter.getBuffer().setLength(0);
this.appender.destroy();
}
use of org.apache.logging.log4j.message.ParameterizedMessage in project logging-log4j2 by apache.
the class TraceLoggingTest method testTraceEntryExit.
@Test
public void testTraceEntryExit() {
currentLevel = Level.TRACE;
final FlowMessageFactory fact = new DefaultFlowMessageFactory();
final ParameterizedMessage paramMsg = new ParameterizedMessage("Tracy {}", "Logan");
currentEvent = new LogEvent(ENTRY_MARKER.getName(), fact.newEntryMessage(paramMsg), null);
final EntryMessage entry = traceEntry("Tracy {}", "Logan");
final ReusableParameterizedMessage msg = ReusableParameterizedMessageTest.set(new ReusableParameterizedMessage(), "Tracy {}", "Logan");
ReusableParameterizedMessageTest.set(msg, "Some other message {}", 123);
currentEvent = new LogEvent(null, msg, null);
trace("Some other message {}", 123);
// ensure original entry message not overwritten
assertEquals("Tracy Logan", entry.getMessage().getFormattedMessage());
currentEvent = new LogEvent(EXIT_MARKER.getName(), fact.newExitMessage(entry), null);
traceExit(entry);
// ensure original entry message not overwritten
assertEquals("Tracy Logan", entry.getMessage().getFormattedMessage());
}
Aggregations