Search in sources :

Example 86 with Duration

use of org.joda.time.Duration in project druid by druid-io.

the class RetryPolicy method getAndIncrementRetryDelay.

public Duration getAndIncrementRetryDelay() {
    if (hasExceededRetryThreshold()) {
        return null;
    }
    Duration retVal = currRetryDelay;
    currRetryDelay = new Duration(Math.min(currRetryDelay.getMillis() * 2, maxRetryDelay.getMillis()));
    ++retryCount;
    return retVal;
}
Also used : Duration(org.joda.time.Duration)

Example 87 with Duration

use of org.joda.time.Duration in project druid by druid-io.

the class SeekableStreamIndexTaskClient method pause.

public Map<PartitionIdType, SequenceOffsetType> pause(final String id) {
    log.debug("Pause task[%s]", id);
    try {
        final StringFullResponseHolder response = submitRequestWithEmptyContent(id, HttpMethod.POST, "pause", null, true);
        final HttpResponseStatus responseStatus = response.getStatus();
        final String responseContent = response.getContent();
        if (responseStatus.equals(HttpResponseStatus.OK)) {
            log.info("Task [%s] paused successfully", id);
            return deserializeMap(responseContent, Map.class, getPartitionType(), getSequenceType());
        } else if (responseStatus.equals(HttpResponseStatus.ACCEPTED)) {
            // The task received the pause request, but its status hasn't been changed yet.
            final RetryPolicy retryPolicy = newRetryPolicy();
            while (true) {
                final SeekableStreamIndexTaskRunner.Status status = getStatus(id);
                if (status == SeekableStreamIndexTaskRunner.Status.PAUSED) {
                    return getCurrentOffsets(id, true);
                }
                final Duration delay = retryPolicy.getAndIncrementRetryDelay();
                if (delay == null) {
                    throw new ISE("Task [%s] failed to change its status from [%s] to [%s], aborting", id, status, SeekableStreamIndexTaskRunner.Status.PAUSED);
                } else {
                    final long sleepTime = delay.getMillis();
                    log.info("Still waiting for task [%s] to change its status to [%s]; will try again in [%s]", id, SeekableStreamIndexTaskRunner.Status.PAUSED, new Duration(sleepTime).toString());
                    Thread.sleep(sleepTime);
                }
            }
        } else {
            throw new ISE("Pause request for task [%s] failed with response [%s] : [%s]", id, responseStatus, responseContent);
        }
    } catch (NoTaskLocationException e) {
        log.error("Exception [%s] while pausing Task [%s]", e.getMessage(), id);
        return ImmutableMap.of();
    } catch (IOException | InterruptedException e) {
        throw new RE(e, "Exception [%s] while pausing Task [%s]", e.getMessage(), id);
    }
}
Also used : HttpResponseStatus(org.jboss.netty.handler.codec.http.HttpResponseStatus) StringFullResponseHolder(org.apache.druid.java.util.http.client.response.StringFullResponseHolder) RE(org.apache.druid.java.util.common.RE) HttpResponseStatus(org.jboss.netty.handler.codec.http.HttpResponseStatus) Duration(org.joda.time.Duration) ISE(org.apache.druid.java.util.common.ISE) IOException(java.io.IOException) RetryPolicy(org.apache.druid.indexing.common.RetryPolicy)

Example 88 with Duration

use of org.joda.time.Duration in project druid by druid-io.

the class BaseQuery method getDuration.

@Override
public Duration getDuration() {
    if (duration == null) {
        Duration totalDuration = new Duration(0);
        for (Interval interval : querySegmentSpec.getIntervals()) {
            if (interval != null) {
                totalDuration = totalDuration.plus(interval.toDuration());
            }
        }
        duration = totalDuration;
    }
    return duration;
}
Also used : Duration(org.joda.time.Duration) Interval(org.joda.time.Interval)

Example 89 with Duration

use of org.joda.time.Duration in project druid by druid-io.

the class HttpLoadQueuePeon method doSegmentManagement.

private void doSegmentManagement() {
    if (stopped || !mainLoopInProgress.compareAndSet(false, true)) {
        log.trace("[%s]Ignoring tick. Either in-progress already or stopped.", serverId);
        return;
    }
    final int batchSize = config.getHttpLoadQueuePeonBatchSize();
    final List<DataSegmentChangeRequest> newRequests = new ArrayList<>(batchSize);
    synchronized (lock) {
        Iterator<Map.Entry<DataSegment, SegmentHolder>> iter = Iterators.concat(segmentsToDrop.entrySet().iterator(), segmentsToLoad.entrySet().iterator());
        while (newRequests.size() < batchSize && iter.hasNext()) {
            Map.Entry<DataSegment, SegmentHolder> entry = iter.next();
            if (entry.getValue().hasTimedOut()) {
                entry.getValue().requestFailed("timed out");
                iter.remove();
            } else {
                newRequests.add(entry.getValue().getChangeRequest());
            }
        }
    }
    if (newRequests.size() == 0) {
        log.trace("[%s]Found no load/drop requests. SegmentsToLoad[%d], SegmentsToDrop[%d], batchSize[%d].", serverId, segmentsToLoad.size(), segmentsToDrop.size(), config.getHttpLoadQueuePeonBatchSize());
        mainLoopInProgress.set(false);
        return;
    }
    try {
        log.trace("Sending [%d] load/drop requests to Server[%s].", newRequests.size(), serverId);
        BytesAccumulatingResponseHandler responseHandler = new BytesAccumulatingResponseHandler();
        ListenableFuture<InputStream> future = httpClient.go(new Request(HttpMethod.POST, changeRequestURL).addHeader(HttpHeaders.Names.ACCEPT, MediaType.APPLICATION_JSON).addHeader(HttpHeaders.Names.CONTENT_TYPE, MediaType.APPLICATION_JSON).setContent(requestBodyWriter.writeValueAsBytes(newRequests)), responseHandler, new Duration(config.getHttpLoadQueuePeonHostTimeout().getMillis() + 5000));
        Futures.addCallback(future, new FutureCallback<InputStream>() {

            @Override
            public void onSuccess(InputStream result) {
                boolean scheduleNextRunImmediately = true;
                try {
                    if (responseHandler.getStatus() == HttpServletResponse.SC_NO_CONTENT) {
                        log.trace("Received NO CONTENT reseponse from [%s]", serverId);
                    } else if (HttpServletResponse.SC_OK == responseHandler.getStatus()) {
                        try {
                            List<SegmentLoadDropHandler.DataSegmentChangeRequestAndStatus> statuses = jsonMapper.readValue(result, RESPONSE_ENTITY_TYPE_REF);
                            log.trace("Server[%s] returned status response [%s].", serverId, statuses);
                            synchronized (lock) {
                                if (stopped) {
                                    log.trace("Ignoring response from Server[%s]. We are already stopped.", serverId);
                                    scheduleNextRunImmediately = false;
                                    return;
                                }
                                for (SegmentLoadDropHandler.DataSegmentChangeRequestAndStatus e : statuses) {
                                    switch(e.getStatus().getState()) {
                                        case SUCCESS:
                                        case FAILED:
                                            handleResponseStatus(e.getRequest(), e.getStatus());
                                            break;
                                        case PENDING:
                                            log.trace("Request[%s] is still pending on server[%s].", e.getRequest(), serverId);
                                            break;
                                        default:
                                            scheduleNextRunImmediately = false;
                                            log.error("Server[%s] returned unknown state in status[%s].", serverId, e.getStatus());
                                    }
                                }
                            }
                        } catch (Exception ex) {
                            scheduleNextRunImmediately = false;
                            logRequestFailure(ex);
                        }
                    } else {
                        scheduleNextRunImmediately = false;
                        logRequestFailure(new RE("Unexpected Response Status."));
                    }
                } finally {
                    mainLoopInProgress.set(false);
                    if (scheduleNextRunImmediately) {
                        processingExecutor.execute(HttpLoadQueuePeon.this::doSegmentManagement);
                    }
                }
            }

            @Override
            public void onFailure(Throwable t) {
                try {
                    logRequestFailure(t);
                } finally {
                    mainLoopInProgress.set(false);
                }
            }

            private void logRequestFailure(Throwable t) {
                log.error(t, "Request[%s] Failed with status[%s]. Reason[%s].", changeRequestURL, responseHandler.getStatus(), responseHandler.getDescription());
            }
        }, processingExecutor);
    } catch (Throwable th) {
        log.error(th, "Error sending load/drop request to [%s].", serverId);
        mainLoopInProgress.set(false);
    }
}
Also used : SegmentLoadDropHandler(org.apache.druid.server.coordination.SegmentLoadDropHandler) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) Request(org.apache.druid.java.util.http.client.Request) DataSegmentChangeRequest(org.apache.druid.server.coordination.DataSegmentChangeRequest) DataSegmentChangeRequest(org.apache.druid.server.coordination.DataSegmentChangeRequest) Duration(org.joda.time.Duration) DataSegment(org.apache.druid.timeline.DataSegment) MalformedURLException(java.net.MalformedURLException) RE(org.apache.druid.java.util.common.RE) Map(java.util.Map) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap)

Example 90 with Duration

use of org.joda.time.Duration in project druid by druid-io.

the class KafkaSupervisorTest method getSupervisor.

/**
 * Use when you don't want generateSequenceNumber overridden
 */
private KafkaSupervisor getSupervisor(int replicas, int taskCount, boolean useEarliestOffset, String duration, Period lateMessageRejectionPeriod, Period earlyMessageRejectionPeriod, boolean suspended, String kafkaHost, DataSchema dataSchema, KafkaSupervisorTuningConfig tuningConfig) {
    Map<String, Object> consumerProperties = new HashMap<>();
    consumerProperties.put("myCustomKey", "myCustomValue");
    consumerProperties.put("bootstrap.servers", kafkaHost);
    consumerProperties.put("isolation.level", "read_committed");
    KafkaSupervisorIOConfig kafkaSupervisorIOConfig = new KafkaSupervisorIOConfig(topic, INPUT_FORMAT, replicas, taskCount, new Period(duration), consumerProperties, null, KafkaSupervisorIOConfig.DEFAULT_POLL_TIMEOUT_MILLIS, new Period("P1D"), new Period("PT30S"), useEarliestOffset, new Period("PT30M"), lateMessageRejectionPeriod, earlyMessageRejectionPeriod, null);
    KafkaIndexTaskClientFactory taskClientFactory = new KafkaIndexTaskClientFactory(null, null) {

        @Override
        public KafkaIndexTaskClient build(TaskInfoProvider taskInfoProvider, String dataSource, int numThreads, Duration httpTimeout, long numRetries) {
            Assert.assertEquals(TEST_CHAT_THREADS, numThreads);
            Assert.assertEquals(TEST_HTTP_TIMEOUT.toStandardDuration(), httpTimeout);
            Assert.assertEquals(TEST_CHAT_RETRIES, numRetries);
            return taskClient;
        }
    };
    return new KafkaSupervisor(taskStorage, taskMaster, indexerMetadataStorageCoordinator, taskClientFactory, OBJECT_MAPPER, new KafkaSupervisorSpec(null, dataSchema, tuningConfig, kafkaSupervisorIOConfig, null, suspended, taskStorage, taskMaster, indexerMetadataStorageCoordinator, taskClientFactory, OBJECT_MAPPER, new NoopServiceEmitter(), new DruidMonitorSchedulerConfig(), rowIngestionMetersFactory, supervisorConfig), rowIngestionMetersFactory);
}
Also used : HashMap(java.util.HashMap) Period(org.joda.time.Period) Duration(org.joda.time.Duration) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) DruidMonitorSchedulerConfig(org.apache.druid.server.metrics.DruidMonitorSchedulerConfig) KafkaIndexTaskClientFactory(org.apache.druid.indexing.kafka.KafkaIndexTaskClientFactory) TaskInfoProvider(org.apache.druid.indexing.common.TaskInfoProvider)

Aggregations

Duration (org.joda.time.Duration)272 Test (org.junit.Test)148 Instant (org.joda.time.Instant)66 DateTime (org.joda.time.DateTime)32 Period (org.joda.time.Period)27 IntervalWindow (org.apache.beam.sdk.transforms.windowing.IntervalWindow)24 TestDruidCoordinatorConfig (org.apache.druid.server.coordinator.TestDruidCoordinatorConfig)22 HashMap (java.util.HashMap)18 IOException (java.io.IOException)17 Category (org.junit.experimental.categories.Category)16 ArrayList (java.util.ArrayList)15 Map (java.util.Map)15 KV (org.apache.beam.sdk.values.KV)15 AtomicReference (java.util.concurrent.atomic.AtomicReference)13 IndexSpec (org.apache.druid.segment.IndexSpec)12 Set (java.util.Set)10 GlobalWindows (org.apache.beam.sdk.transforms.windowing.GlobalWindows)10 DynamicPartitionsSpec (org.apache.druid.indexer.partitions.DynamicPartitionsSpec)10 Interval (org.joda.time.Interval)10 Request (com.metamx.http.client.Request)9