Search in sources :

Example 26 with FutureCallback

use of com.google.common.util.concurrent.FutureCallback in project cassandra by apache.

the class LocalSessions method handlePrepareMessage.

/**
 * The PrepareConsistentRequest promotes the parent repair session to a consistent incremental
 * session, and isolates the data to be repaired from the rest of the table's data
 *
 * No response is sent to the repair coordinator until the data preparation / isolation has completed
 * successfully. If the data preparation fails, a failure message is sent to the coordinator,
 * cancelling the session.
 */
public void handlePrepareMessage(InetAddressAndPort from, PrepareConsistentRequest request) {
    logger.trace("received {} from {}", request, from);
    UUID sessionID = request.parentSession;
    InetAddressAndPort coordinator = request.coordinator;
    Set<InetAddressAndPort> peers = request.participants;
    ActiveRepairService.ParentRepairSession parentSession;
    try {
        parentSession = getParentRepairSession(sessionID);
    } catch (Throwable e) {
        logger.error("Error retrieving ParentRepairSession for session {}, responding with failure", sessionID);
        sendMessage(coordinator, Message.out(PREPARE_CONSISTENT_RSP, new PrepareConsistentResponse(sessionID, getBroadcastAddressAndPort(), false)));
        return;
    }
    LocalSession session = createSessionUnsafe(sessionID, parentSession, peers);
    putSessionUnsafe(session);
    logger.info("Beginning local incremental repair session {}", session);
    ExecutorService executor = executorFactory().pooled("Repair-" + sessionID, parentSession.getColumnFamilyStores().size());
    KeyspaceRepairManager repairManager = parentSession.getKeyspace().getRepairManager();
    RangesAtEndpoint tokenRanges = filterLocalRanges(parentSession.getKeyspace().getName(), parentSession.getRanges());
    Future<List<Void>> repairPreparation = prepareSession(repairManager, sessionID, parentSession.getColumnFamilyStores(), tokenRanges, executor, () -> session.getState() != PREPARING);
    repairPreparation.addCallback(new FutureCallback<List<Void>>() {

        public void onSuccess(@Nullable List<Void> result) {
            try {
                logger.info("Prepare phase for incremental repair session {} completed", sessionID);
                if (session.getState() != FAILED)
                    setStateAndSave(session, PREPARED);
                else
                    logger.info("Session {} failed before anticompaction completed", sessionID);
                Message<PrepareConsistentResponse> message = Message.out(PREPARE_CONSISTENT_RSP, new PrepareConsistentResponse(sessionID, getBroadcastAddressAndPort(), session.getState() != FAILED));
                sendMessage(coordinator, message);
            } finally {
                executor.shutdown();
            }
        }

        public void onFailure(Throwable t) {
            try {
                if (Throwables.anyCauseMatches(t, (throwable) -> throwable instanceof CompactionInterruptedException))
                    logger.info("Anticompaction interrupted for session {}: {}", sessionID, t.getMessage());
                else if (Throwables.anyCauseMatches(t, (throwable) -> throwable instanceof NoSuchRepairSessionException))
                    logger.warn("No such repair session: {}", sessionID);
                else
                    logger.error("Prepare phase for incremental repair session {} failed", sessionID, t);
                sendMessage(coordinator, Message.out(PREPARE_CONSISTENT_RSP, new PrepareConsistentResponse(sessionID, getBroadcastAddressAndPort(), false)));
                failSession(sessionID, false);
            } finally {
                executor.shutdown();
            }
        }
    });
}
Also used : NoSuchRepairSessionException(org.apache.cassandra.repair.NoSuchRepairSessionException) KeyspaceRepairManager(org.apache.cassandra.repair.KeyspaceRepairManager) Date(java.util.Date) LoggerFactory(org.slf4j.LoggerFactory) STATUS_REQ(org.apache.cassandra.net.Verb.STATUS_REQ) CompactionInterruptedException(org.apache.cassandra.db.compaction.CompactionInterruptedException) ByteBuffer(java.nio.ByteBuffer) BooleanSupplier(java.util.function.BooleanSupplier) Map(java.util.Map) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) PendingStats(org.apache.cassandra.repair.consistent.admin.PendingStats) Verify(com.google.common.base.Verify) ImmutableSet(com.google.common.collect.ImmutableSet) FBUtilities(org.apache.cassandra.utils.FBUtilities) ImmutableMap(com.google.common.collect.ImmutableMap) Predicate(java.util.function.Predicate) Collection(java.util.Collection) FinalizeCommit(org.apache.cassandra.repair.messages.FinalizeCommit) Set(java.util.Set) UUID(java.util.UUID) Instant(java.time.Instant) CopyOnWriteArraySet(java.util.concurrent.CopyOnWriteArraySet) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) RangesAtEndpoint(org.apache.cassandra.locator.RangesAtEndpoint) FinalizePropose(org.apache.cassandra.repair.messages.FinalizePropose) State(org.apache.cassandra.repair.consistent.ConsistentSession.State) List(java.util.List) FAILED_SESSION_MSG(org.apache.cassandra.net.Verb.FAILED_SESSION_MSG) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Future(org.apache.cassandra.utils.concurrent.Future) Throwables(org.apache.cassandra.utils.Throwables) FINALIZE_PROMISE_MSG(org.apache.cassandra.net.Verb.FINALIZE_PROMISE_MSG) FailureDetector(org.apache.cassandra.gms.FailureDetector) SchemaConstants(org.apache.cassandra.schema.SchemaConstants) DataInputBuffer(org.apache.cassandra.io.util.DataInputBuffer) InetAddressAndPort(org.apache.cassandra.locator.InetAddressAndPort) Iterables(com.google.common.collect.Iterables) PendingStat(org.apache.cassandra.repair.consistent.admin.PendingStat) PrepareConsistentResponse(org.apache.cassandra.repair.messages.PrepareConsistentResponse) DataOutputBuffer(org.apache.cassandra.io.util.DataOutputBuffer) TableId(org.apache.cassandra.schema.TableId) PREPARE_CONSISTENT_RSP(org.apache.cassandra.net.Verb.PREPARE_CONSISTENT_RSP) Range(org.apache.cassandra.dht.Range) HashMap(java.util.HashMap) Message(org.apache.cassandra.net.Message) QueryProcessor(org.apache.cassandra.cql3.QueryProcessor) FinalizePromise(org.apache.cassandra.repair.messages.FinalizePromise) SystemKeyspace(org.apache.cassandra.db.SystemKeyspace) ArrayList(java.util.ArrayList) Schema(org.apache.cassandra.schema.Schema) HashSet(java.util.HashSet) UTF8Type(org.apache.cassandra.db.marshal.UTF8Type) Token(org.apache.cassandra.dht.Token) ActiveRepairService(org.apache.cassandra.service.ActiveRepairService) Lists(com.google.common.collect.Lists) CleanupSummary(org.apache.cassandra.repair.consistent.admin.CleanupSummary) STATUS_RSP(org.apache.cassandra.net.Verb.STATUS_RSP) ExecutorService(java.util.concurrent.ExecutorService) Nullable(javax.annotation.Nullable) MessagingService(org.apache.cassandra.net.MessagingService) Logger(org.slf4j.Logger) BytesType(org.apache.cassandra.db.marshal.BytesType) StatusResponse(org.apache.cassandra.repair.messages.StatusResponse) StorageService(org.apache.cassandra.service.StorageService) IOException(java.io.IOException) PrepareConsistentRequest(org.apache.cassandra.repair.messages.PrepareConsistentRequest) UnknownHostException(java.net.UnknownHostException) Ints(com.google.common.primitives.Ints) FutureCallback(com.google.common.util.concurrent.FutureCallback) Replica(org.apache.cassandra.locator.Replica) FailSession(org.apache.cassandra.repair.messages.FailSession) TimeUnit(java.util.concurrent.TimeUnit) RepairMessage(org.apache.cassandra.repair.messages.RepairMessage) UUIDType(org.apache.cassandra.db.marshal.UUIDType) IPartitioner(org.apache.cassandra.dht.IPartitioner) Global.executorFactory(org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory) UntypedResultSet(org.apache.cassandra.cql3.UntypedResultSet) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) StatusRequest(org.apache.cassandra.repair.messages.StatusRequest) InetAddressAndPort(org.apache.cassandra.locator.InetAddressAndPort) RangesAtEndpoint(org.apache.cassandra.locator.RangesAtEndpoint) ActiveRepairService(org.apache.cassandra.service.ActiveRepairService) PrepareConsistentResponse(org.apache.cassandra.repair.messages.PrepareConsistentResponse) Message(org.apache.cassandra.net.Message) RepairMessage(org.apache.cassandra.repair.messages.RepairMessage) CompactionInterruptedException(org.apache.cassandra.db.compaction.CompactionInterruptedException) KeyspaceRepairManager(org.apache.cassandra.repair.KeyspaceRepairManager) ExecutorService(java.util.concurrent.ExecutorService) NoSuchRepairSessionException(org.apache.cassandra.repair.NoSuchRepairSessionException) List(java.util.List) ArrayList(java.util.ArrayList) UUID(java.util.UUID)

Example 27 with FutureCallback

use of com.google.common.util.concurrent.FutureCallback in project flink by apache.

the class CassandraRowWriteAheadSink method sendValues.

@Override
protected boolean sendValues(Iterable<Row> values, long checkpointId, long timestamp) throws Exception {
    final AtomicInteger updatesCount = new AtomicInteger(0);
    final AtomicInteger updatesConfirmed = new AtomicInteger(0);
    final AtomicReference<Throwable> exception = new AtomicReference<>();
    FutureCallback<ResultSet> callback = new FutureCallback<ResultSet>() {

        @Override
        public void onSuccess(ResultSet resultSet) {
            updatesConfirmed.incrementAndGet();
            if (updatesCount.get() > 0) {
                // only set if all updates have been sent
                if (updatesCount.get() == updatesConfirmed.get()) {
                    synchronized (updatesConfirmed) {
                        updatesConfirmed.notifyAll();
                    }
                }
            }
        }

        @Override
        public void onFailure(Throwable throwable) {
            if (exception.compareAndSet(null, throwable)) {
                LOG.error("Error while sending value.", throwable);
                synchronized (updatesConfirmed) {
                    updatesConfirmed.notifyAll();
                }
            }
        }
    };
    // set values for prepared statement
    int updatesSent = 0;
    for (Row value : values) {
        for (int x = 0; x < arity; x++) {
            fields[x] = value.getField(x);
        }
        // insert values and send to cassandra
        BoundStatement s = preparedStatement.bind(fields);
        s.setDefaultTimestamp(timestamp);
        ResultSetFuture result = session.executeAsync(s);
        updatesSent++;
        if (result != null) {
            // add callback to detect errors
            Futures.addCallback(result, callback);
        }
    }
    updatesCount.set(updatesSent);
    synchronized (updatesConfirmed) {
        while (exception.get() == null && updatesSent != updatesConfirmed.get()) {
            updatesConfirmed.wait();
        }
    }
    if (exception.get() != null) {
        LOG.warn("Sending a value failed.", exception.get());
        return false;
    } else {
        return true;
    }
}
Also used : ResultSetFuture(com.datastax.driver.core.ResultSetFuture) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ResultSet(com.datastax.driver.core.ResultSet) AtomicReference(java.util.concurrent.atomic.AtomicReference) Row(org.apache.flink.types.Row) BoundStatement(com.datastax.driver.core.BoundStatement) FutureCallback(com.google.common.util.concurrent.FutureCallback)

Example 28 with FutureCallback

use of com.google.common.util.concurrent.FutureCallback in project flink by apache.

the class CassandraTupleWriteAheadSink method sendValues.

@Override
protected boolean sendValues(Iterable<IN> values, long checkpointId, long timestamp) throws Exception {
    final AtomicInteger updatesCount = new AtomicInteger(0);
    final AtomicInteger updatesConfirmed = new AtomicInteger(0);
    final AtomicReference<Throwable> exception = new AtomicReference<>();
    FutureCallback<ResultSet> callback = new FutureCallback<ResultSet>() {

        @Override
        public void onSuccess(ResultSet resultSet) {
            updatesConfirmed.incrementAndGet();
            if (updatesCount.get() > 0) {
                // only set if all updates have been sent
                if (updatesCount.get() == updatesConfirmed.get()) {
                    synchronized (updatesConfirmed) {
                        updatesConfirmed.notifyAll();
                    }
                }
            }
        }

        @Override
        public void onFailure(Throwable throwable) {
            if (exception.compareAndSet(null, throwable)) {
                LOG.error("Error while sending value.", throwable);
                synchronized (updatesConfirmed) {
                    updatesConfirmed.notifyAll();
                }
            }
        }
    };
    // set values for prepared statement
    int updatesSent = 0;
    for (IN value : values) {
        for (int x = 0; x < value.getArity(); x++) {
            fields[x] = value.getField(x);
        }
        // insert values and send to cassandra
        BoundStatement s = preparedStatement.bind(fields);
        s.setDefaultTimestamp(timestamp);
        ResultSetFuture result = session.executeAsync(s);
        updatesSent++;
        if (result != null) {
            // add callback to detect errors
            Futures.addCallback(result, callback);
        }
    }
    updatesCount.set(updatesSent);
    synchronized (updatesConfirmed) {
        while (exception.get() == null && updatesSent != updatesConfirmed.get()) {
            updatesConfirmed.wait();
        }
    }
    if (exception.get() != null) {
        LOG.warn("Sending a value failed.", exception.get());
        return false;
    } else {
        return true;
    }
}
Also used : ResultSetFuture(com.datastax.driver.core.ResultSetFuture) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ResultSet(com.datastax.driver.core.ResultSet) AtomicReference(java.util.concurrent.atomic.AtomicReference) BoundStatement(com.datastax.driver.core.BoundStatement) FutureCallback(com.google.common.util.concurrent.FutureCallback)

Example 29 with FutureCallback

use of com.google.common.util.concurrent.FutureCallback in project Minigames by AddstarMC.

the class BackendCommand method onCommand.

@Override
public boolean onCommand(final CommandSender sender, Minigame minigame, String label, String[] args) {
    if (args == null || args.length != 2) {
        return false;
    }
    BackendManager manager = Minigames.getPlugin().getBackend();
    if (args[0].equalsIgnoreCase("export")) {
        try {
            ListenableFuture<Void> future = manager.exportTo(args[1], Minigames.getPlugin().getConfig(), new Notifier(sender));
            sender.sendMessage(ChatColor.GOLD + "Exporting backend to " + args[1] + "...");
            Futures.addCallback(future, new FutureCallback<Void>() {

                @Override
                public void onFailure(Throwable t) {
                    sender.sendMessage(ChatColor.RED + "An internal error occured while exporting.");
                }

                @Override
                public void onSuccess(Void result) {
                }
            });
        } catch (IllegalArgumentException e) {
            sender.sendMessage(ChatColor.RED + e.getMessage());
        }
    } else if (args[0].equalsIgnoreCase("switch")) {
        try {
            ListenableFuture<Void> future = manager.switchBackend(args[1], Minigames.getPlugin().getConfig());
            sender.sendMessage(ChatColor.GOLD + "Switching minigames backend to " + args[1] + "...");
            Futures.addCallback(future, new FutureCallback<Void>() {

                @Override
                public void onFailure(Throwable t) {
                    sender.sendMessage(ChatColor.RED + "An internal error occured while switching backend.");
                }

                @Override
                public void onSuccess(Void result) {
                    sender.sendMessage(ChatColor.GOLD + "The backend has been successfully switched");
                    sender.sendMessage(ChatColor.GOLD + "!!! This change is " + ChatColor.BOLD + "temporary" + ChatColor.GOLD + ". Please update the config !!!");
                }
            });
        } catch (IllegalArgumentException e) {
            sender.sendMessage(ChatColor.RED + e.getMessage());
        }
    } else {
        sender.sendMessage(ChatColor.RED + "Unknown option " + args[0]);
    }
    return true;
}
Also used : ListenableFuture(com.google.common.util.concurrent.ListenableFuture) BackendManager(au.com.mineauz.minigames.backend.BackendManager) FutureCallback(com.google.common.util.concurrent.FutureCallback) ExportNotifier(au.com.mineauz.minigames.backend.ExportNotifier)

Example 30 with FutureCallback

use of com.google.common.util.concurrent.FutureCallback in project druid by druid-io.

the class BatchAppenderator method add.

@Override
public AppenderatorAddResult add(final SegmentIdWithShardSpec identifier, final InputRow row, @Nullable final Supplier<Committer> committerSupplier, final boolean allowIncrementalPersists) throws IndexSizeExceededException, SegmentNotWritableException {
    throwPersistErrorIfExists();
    Preconditions.checkArgument(committerSupplier == null, "Batch appenderator does not need a committer!");
    Preconditions.checkArgument(allowIncrementalPersists, "Batch appenderator should always allow incremental persists!");
    if (!identifier.getDataSource().equals(schema.getDataSource())) {
        throw new IAE("Expected dataSource[%s] but was asked to insert row for dataSource[%s]?!", schema.getDataSource(), identifier.getDataSource());
    }
    final Sink sink = getOrCreateSink(identifier);
    metrics.reportMessageMaxTimestamp(row.getTimestampFromEpoch());
    final int sinkRowsInMemoryBeforeAdd = sink.getNumRowsInMemory();
    final int sinkRowsInMemoryAfterAdd;
    final long bytesInMemoryBeforeAdd = sink.getBytesInMemory();
    final long bytesInMemoryAfterAdd;
    final IncrementalIndexAddResult addResult;
    try {
        // allow incrememtal persis is always true for batch
        addResult = sink.add(row, false);
        sinkRowsInMemoryAfterAdd = addResult.getRowCount();
        bytesInMemoryAfterAdd = addResult.getBytesInMemory();
    } catch (IndexSizeExceededException e) {
        // Uh oh, we can't do anything about this! We can't persist (commit metadata would be out of sync) and we
        // can't add the row (it just failed). This should never actually happen, though, because we check
        // sink.canAddRow after returning from add.
        log.error(e, "Sink for segment[%s] was unexpectedly full!", identifier);
        throw e;
    }
    if (sinkRowsInMemoryAfterAdd < 0) {
        throw new SegmentNotWritableException("Attempt to add row to swapped-out sink for segment[%s].", identifier);
    }
    if (addResult.isRowAdded()) {
        rowIngestionMeters.incrementProcessed();
    } else if (addResult.hasParseException()) {
        parseExceptionHandler.handle(addResult.getParseException());
    }
    final int numAddedRows = sinkRowsInMemoryAfterAdd - sinkRowsInMemoryBeforeAdd;
    rowsCurrentlyInMemory += numAddedRows;
    bytesCurrentlyInMemory += (bytesInMemoryAfterAdd - bytesInMemoryBeforeAdd);
    totalRows += numAddedRows;
    sinksMetadata.computeIfAbsent(identifier, unused -> new SinkMetadata()).addRows(numAddedRows);
    boolean persist = false;
    List<String> persistReasons = new ArrayList<>();
    if (!sink.canAppendRow()) {
        persist = true;
        persistReasons.add("No more rows can be appended to sink");
    }
    if (rowsCurrentlyInMemory >= tuningConfig.getMaxRowsInMemory()) {
        persist = true;
        persistReasons.add(StringUtils.format("rowsCurrentlyInMemory[%d] is greater than maxRowsInMemory[%d]", rowsCurrentlyInMemory, tuningConfig.getMaxRowsInMemory()));
    }
    if (bytesCurrentlyInMemory >= maxBytesTuningConfig) {
        persist = true;
        persistReasons.add(StringUtils.format("bytesCurrentlyInMemory[%d] is greater than maxBytesInMemory[%d]", bytesCurrentlyInMemory, maxBytesTuningConfig));
    }
    if (persist) {
        // persistAll clears rowsCurrentlyInMemory, no need to update it.
        log.info("Incremental persist to disk because %s.", String.join(",", persistReasons));
        long bytesToBePersisted = 0L;
        for (Map.Entry<SegmentIdWithShardSpec, Sink> entry : sinks.entrySet()) {
            final Sink sinkEntry = entry.getValue();
            if (sinkEntry != null) {
                bytesToBePersisted += sinkEntry.getBytesInMemory();
                if (sinkEntry.swappable()) {
                    // Code for batch no longer memory maps hydrants, but they still take memory...
                    int memoryStillInUse = calculateMemoryUsedByHydrant();
                    bytesCurrentlyInMemory += memoryStillInUse;
                }
            }
        }
        if (!skipBytesInMemoryOverheadCheck && bytesCurrentlyInMemory - bytesToBePersisted > maxBytesTuningConfig) {
            // We are still over maxBytesTuningConfig even after persisting.
            // This means that we ran out of all available memory to ingest (due to overheads created as part of ingestion)
            final String alertMessage = StringUtils.format("Task has exceeded safe estimated heap usage limits, failing " + "(numSinks: [%d] numHydrantsAcrossAllSinks: [%d] totalRows: [%d])" + "(bytesCurrentlyInMemory: [%d] - bytesToBePersisted: [%d] > maxBytesTuningConfig: [%d])", sinks.size(), sinks.values().stream().mapToInt(Iterables::size).sum(), getTotalRowCount(), bytesCurrentlyInMemory, bytesToBePersisted, maxBytesTuningConfig);
            final String errorMessage = StringUtils.format("%s.\nThis can occur when the overhead from too many intermediary segment persists becomes to " + "great to have enough space to process additional input rows. This check, along with metering the overhead " + "of these objects to factor into the 'maxBytesInMemory' computation, can be disabled by setting " + "'skipBytesInMemoryOverheadCheck' to 'true' (note that doing so might allow the task to naturally encounter " + "a 'java.lang.OutOfMemoryError'). Alternatively, 'maxBytesInMemory' can be increased which will cause an " + "increase in heap footprint, but will allow for more intermediary segment persists to occur before " + "reaching this condition.", alertMessage);
            log.makeAlert(alertMessage).addData("dataSource", schema.getDataSource()).emit();
            throw new RuntimeException(errorMessage);
        }
        Futures.addCallback(persistAll(null), new FutureCallback<Object>() {

            @Override
            public void onSuccess(@Nullable Object result) {
            // do nothing
            }

            @Override
            public void onFailure(Throwable t) {
                persistError = t;
            }
        });
    }
    return new AppenderatorAddResult(identifier, sinksMetadata.get(identifier).numRowsInSegment, false);
}
Also used : Arrays(java.util.Arrays) FireDepartmentMetrics(org.apache.druid.segment.realtime.FireDepartmentMetrics) Pair(org.apache.druid.java.util.common.Pair) FileLock(java.nio.channels.FileLock) Map(java.util.Map) QueryRunner(org.apache.druid.query.QueryRunner) IAE(org.apache.druid.java.util.common.IAE) FileUtils(org.apache.druid.java.util.common.FileUtils) Function(com.google.common.base.Function) Execs(org.apache.druid.java.util.common.concurrent.Execs) ImmutableMap(com.google.common.collect.ImmutableMap) Closer(org.apache.druid.java.util.common.io.Closer) Collection(java.util.Collection) QueryableIndex(org.apache.druid.segment.QueryableIndex) StandardOpenOption(java.nio.file.StandardOpenOption) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) StringUtils(org.apache.druid.java.util.common.StringUtils) ISE(org.apache.druid.java.util.common.ISE) Collectors(java.util.stream.Collectors) InputRow(org.apache.druid.data.input.InputRow) IndexSizeExceededException(org.apache.druid.segment.incremental.IndexSizeExceededException) List(java.util.List) DataSegment(org.apache.druid.timeline.DataSegment) QueryableIndexSegment(org.apache.druid.segment.QueryableIndexSegment) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) DataSegmentPusher(org.apache.druid.segment.loading.DataSegmentPusher) Iterables(com.google.common.collect.Iterables) MoreExecutors(com.google.common.util.concurrent.MoreExecutors) ParseExceptionHandler(org.apache.druid.segment.incremental.ParseExceptionHandler) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) Stopwatch(com.google.common.base.Stopwatch) Supplier(com.google.common.base.Supplier) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) RowIngestionMeters(org.apache.druid.segment.incremental.RowIngestionMeters) ArrayList(java.util.ArrayList) BaseProgressIndicator(org.apache.druid.segment.BaseProgressIndicator) Interval(org.joda.time.Interval) Lists(com.google.common.collect.Lists) ImmutableList(com.google.common.collect.ImmutableList) Query(org.apache.druid.query.Query) Sink(org.apache.druid.segment.realtime.plumber.Sink) RetryUtils(org.apache.druid.java.util.common.RetryUtils) Nullable(javax.annotation.Nullable) EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) Iterator(java.util.Iterator) RE(org.apache.druid.java.util.common.RE) IndexMerger(org.apache.druid.segment.IndexMerger) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) FireHydrant(org.apache.druid.segment.realtime.FireHydrant) IOException(java.io.IOException) Ints(com.google.common.primitives.Ints) ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) FutureCallback(com.google.common.util.concurrent.FutureCallback) File(java.io.File) TimeUnit(java.util.concurrent.TimeUnit) Futures(com.google.common.util.concurrent.Futures) Closeable(java.io.Closeable) Committer(org.apache.druid.data.input.Committer) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) IndexIO(org.apache.druid.segment.IndexIO) IncrementalIndexAddResult(org.apache.druid.segment.incremental.IncrementalIndexAddResult) DataSchema(org.apache.druid.segment.indexing.DataSchema) FileChannel(java.nio.channels.FileChannel) ArrayList(java.util.ArrayList) IAE(org.apache.druid.java.util.common.IAE) Iterables(com.google.common.collect.Iterables) Sink(org.apache.druid.segment.realtime.plumber.Sink) IncrementalIndexAddResult(org.apache.druid.segment.incremental.IncrementalIndexAddResult) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) IndexSizeExceededException(org.apache.druid.segment.incremental.IndexSizeExceededException)

Aggregations

FutureCallback (com.google.common.util.concurrent.FutureCallback)98 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)56 List (java.util.List)48 Futures (com.google.common.util.concurrent.Futures)43 ArrayList (java.util.ArrayList)41 Nullable (javax.annotation.Nullable)38 MoreExecutors (com.google.common.util.concurrent.MoreExecutors)26 Map (java.util.Map)25 Set (java.util.Set)25 TimeUnit (java.util.concurrent.TimeUnit)24 IOException (java.io.IOException)23 Collectors (java.util.stream.Collectors)23 PostConstruct (javax.annotation.PostConstruct)23 ExecutorService (java.util.concurrent.ExecutorService)22 Function (com.google.common.base.Function)21 Collections (java.util.Collections)21 TenantId (org.thingsboard.server.common.data.id.TenantId)21 PreDestroy (javax.annotation.PreDestroy)20 Logger (org.slf4j.Logger)19 LoggerFactory (org.slf4j.LoggerFactory)19