Search in sources :

Example 71 with ISE

use of io.druid.java.util.common.ISE in project druid by druid-io.

the class QueryInterruptedExceptionTest method testErrorClass.

@Test
public void testErrorClass() {
    Assert.assertEquals("java.util.concurrent.CancellationException", new QueryInterruptedException(new QueryInterruptedException(new CancellationException())).getErrorClass());
    Assert.assertEquals("java.util.concurrent.CancellationException", new QueryInterruptedException(new CancellationException()).getErrorClass());
    Assert.assertEquals("java.lang.InterruptedException", new QueryInterruptedException(new InterruptedException()).getErrorClass());
    Assert.assertEquals("java.util.concurrent.TimeoutException", new QueryInterruptedException(new TimeoutException()).getErrorClass());
    Assert.assertEquals("io.druid.query.ResourceLimitExceededException", new QueryInterruptedException(new ResourceLimitExceededException("too many!")).getErrorClass());
    Assert.assertEquals(null, new QueryInterruptedException(null).getErrorClass());
    Assert.assertEquals("io.druid.java.util.common.ISE", new QueryInterruptedException(new ISE("Something bad!")).getErrorClass());
    Assert.assertEquals("io.druid.java.util.common.ISE", new QueryInterruptedException(new QueryInterruptedException(new ISE("Something bad!"))).getErrorClass());
}
Also used : CancellationException(java.util.concurrent.CancellationException) ISE(io.druid.java.util.common.ISE) TimeoutException(java.util.concurrent.TimeoutException) Test(org.junit.Test)

Example 72 with ISE

use of io.druid.java.util.common.ISE in project druid by druid-io.

the class GenericIndexedWriter method bagSizePower.

/**
   * Tries to get best value split(number of elements in each value file) which can be expressed as power of 2.
   *
   * @return Returns the size of value file splits as power of 2.
   *
   * @throws IOException
   */
private int bagSizePower() throws IOException {
    long avgObjectSize = (valuesOut.getCount() + numWritten - 1) / numWritten;
    File f = ioPeon.getFile(makeFilename("headerLong"));
    Preconditions.checkNotNull(f, "header file missing.");
    try (RandomAccessFile headerFile = new RandomAccessFile(f, "r")) {
        for (int i = 31; i >= 0; --i) {
            if ((1L << i) * avgObjectSize <= fileSizeLimit) {
                if (actuallyFits(i, headerFile)) {
                    return i;
                }
            }
        }
    }
    throw new ISE("no value split found with fileSizeLimit [%d], avgObjectSize [%d] while serializing [%s]", fileSizeLimit, avgObjectSize, filenameBase);
}
Also used : RandomAccessFile(java.io.RandomAccessFile) ISE(io.druid.java.util.common.ISE) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File)

Example 73 with ISE

use of io.druid.java.util.common.ISE in project druid by druid-io.

the class KafkaLookupExtractorFactory method start.

@Override
public boolean start() {
    synchronized (started) {
        if (started.get()) {
            LOG.warn("Already started, not starting again");
            return started.get();
        }
        if (executorService.isShutdown()) {
            LOG.warn("Already shut down, not starting again");
            return false;
        }
        final Properties kafkaProperties = new Properties();
        kafkaProperties.putAll(getKafkaProperties());
        if (kafkaProperties.containsKey("group.id")) {
            throw new IAE("Cannot set kafka property [group.id]. Property is randomly generated for you. Found [%s]", kafkaProperties.getProperty("group.id"));
        }
        if (kafkaProperties.containsKey("auto.offset.reset")) {
            throw new IAE("Cannot set kafka property [auto.offset.reset]. Property will be forced to [smallest]. Found [%s]", kafkaProperties.getProperty("auto.offset.reset"));
        }
        Preconditions.checkNotNull(kafkaProperties.getProperty("zookeeper.connect"), "zookeeper.connect required property");
        kafkaProperties.setProperty("group.id", factoryId);
        final String topic = getKafkaTopic();
        LOG.debug("About to listen to topic [%s] with group.id [%s]", topic, factoryId);
        cacheHandler = cacheManager.createCache();
        final Map<String, String> map = cacheHandler.getCache();
        mapRef.set(map);
        // Enable publish-subscribe
        kafkaProperties.setProperty("auto.offset.reset", "smallest");
        final CountDownLatch startingReads = new CountDownLatch(1);
        final ListenableFuture<?> future = executorService.submit(new Runnable() {

            @Override
            public void run() {
                while (!executorService.isShutdown()) {
                    consumerConnector = buildConnector(kafkaProperties);
                    try {
                        if (executorService.isShutdown()) {
                            break;
                        }
                        final List<KafkaStream<String, String>> streams = consumerConnector.createMessageStreamsByFilter(new Whitelist(Pattern.quote(topic)), 1, DEFAULT_STRING_DECODER, DEFAULT_STRING_DECODER);
                        if (streams == null || streams.isEmpty()) {
                            throw new IAE("Topic [%s] had no streams", topic);
                        }
                        if (streams.size() > 1) {
                            throw new ISE("Topic [%s] has %d streams! expected 1", topic, streams.size());
                        }
                        final KafkaStream<String, String> kafkaStream = streams.get(0);
                        startingReads.countDown();
                        for (final MessageAndMetadata<String, String> messageAndMetadata : kafkaStream) {
                            final String key = messageAndMetadata.key();
                            final String message = messageAndMetadata.message();
                            if (key == null || message == null) {
                                LOG.error("Bad key/message from topic [%s]: [%s]", topic, messageAndMetadata);
                                continue;
                            }
                            doubleEventCount.incrementAndGet();
                            map.put(key, message);
                            doubleEventCount.incrementAndGet();
                            LOG.trace("Placed key[%s] val[%s]", key, message);
                        }
                    } catch (Exception e) {
                        LOG.error(e, "Error reading stream for topic [%s]", topic);
                    } finally {
                        consumerConnector.shutdown();
                    }
                }
            }
        });
        Futures.addCallback(future, new FutureCallback<Object>() {

            @Override
            public void onSuccess(Object result) {
                LOG.debug("Success listening to [%s]", topic);
            }

            @Override
            public void onFailure(Throwable t) {
                if (t instanceof CancellationException) {
                    LOG.debug("Topic [%s] cancelled", topic);
                } else {
                    LOG.error(t, "Error in listening to [%s]", topic);
                }
            }
        }, MoreExecutors.sameThreadExecutor());
        this.future = future;
        final Stopwatch stopwatch = Stopwatch.createStarted();
        try {
            while (!startingReads.await(100, TimeUnit.MILLISECONDS) && connectTimeout > 0L) {
                // Don't return until we have actually connected
                if (future.isDone()) {
                    future.get();
                } else {
                    if (stopwatch.elapsed(TimeUnit.MILLISECONDS) > connectTimeout) {
                        throw new TimeoutException("Failed to connect to kafka in sufficient time");
                    }
                }
            }
        } catch (InterruptedException | ExecutionException | TimeoutException e) {
            executorService.shutdown();
            if (!future.isDone() && !future.cancel(false)) {
                LOG.warn("Could not cancel kafka listening thread");
            }
            LOG.error(e, "Failed to start kafka extraction factory");
            cacheHandler.close();
            return false;
        }
        started.set(true);
        return true;
    }
}
Also used : MessageAndMetadata(kafka.message.MessageAndMetadata) Stopwatch(com.google.common.base.Stopwatch) KafkaStream(kafka.consumer.KafkaStream) Properties(java.util.Properties) IAE(io.druid.java.util.common.IAE) CountDownLatch(java.util.concurrent.CountDownLatch) TimeoutException(java.util.concurrent.TimeoutException) CancellationException(java.util.concurrent.CancellationException) ExecutionException(java.util.concurrent.ExecutionException) CancellationException(java.util.concurrent.CancellationException) Whitelist(kafka.consumer.Whitelist) List(java.util.List) ISE(io.druid.java.util.common.ISE) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException)

Example 74 with ISE

use of io.druid.java.util.common.ISE in project druid by druid-io.

the class HdfsStorageAuthentication method authenticate.

/**
   * Dose authenticate against a secured hadoop cluster
   * In case of any bug fix make sure to fix the code in JobHelper#authenticate as well.
   */
@LifecycleStart
public void authenticate() {
    String principal = hdfsKerberosConfig.getPrincipal();
    String keytab = hdfsKerberosConfig.getKeytab();
    if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(keytab)) {
        UserGroupInformation.setConfiguration(hadoopConf);
        if (UserGroupInformation.isSecurityEnabled()) {
            try {
                if (UserGroupInformation.getCurrentUser().hasKerberosCredentials() == false || !UserGroupInformation.getCurrentUser().getUserName().equals(principal)) {
                    log.info("Trying to authenticate user [%s] with keytab [%s]..", principal, keytab);
                    UserGroupInformation.loginUserFromKeytab(principal, keytab);
                }
            } catch (IOException e) {
                throw new ISE(e, "Failed to authenticate user principal [%s] with keytab [%s]", principal, keytab);
            }
        }
    }
}
Also used : ISE(io.druid.java.util.common.ISE) IOException(java.io.IOException) LifecycleStart(io.druid.java.util.common.lifecycle.LifecycleStart)

Example 75 with ISE

use of io.druid.java.util.common.ISE in project druid by druid-io.

the class JobHelper method runJobs.

public static boolean runJobs(List<Jobby> jobs, HadoopDruidIndexerConfig config) {
    String failedMessage = null;
    for (Jobby job : jobs) {
        if (failedMessage == null) {
            if (!job.run()) {
                failedMessage = String.format("Job[%s] failed!", job.getClass());
            }
        }
    }
    if (!config.getSchema().getTuningConfig().isLeaveIntermediate()) {
        if (failedMessage == null || config.getSchema().getTuningConfig().isCleanupOnFailure()) {
            Path workingPath = config.makeIntermediatePath();
            log.info("Deleting path[%s]", workingPath);
            try {
                workingPath.getFileSystem(injectSystemProperties(new Configuration())).delete(workingPath, true);
            } catch (IOException e) {
                log.error(e, "Failed to cleanup path[%s]", workingPath);
            }
        }
    }
    if (failedMessage != null) {
        throw new ISE(failedMessage);
    }
    return true;
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) ISE(io.druid.java.util.common.ISE) IOException(java.io.IOException)

Aggregations

ISE (io.druid.java.util.common.ISE)158 IOException (java.io.IOException)37 Map (java.util.Map)23 Test (org.junit.Test)21 File (java.io.File)20 List (java.util.List)19 DateTime (org.joda.time.DateTime)18 ArrayList (java.util.ArrayList)17 DataSegment (io.druid.timeline.DataSegment)15 Interval (org.joda.time.Interval)15 Function (com.google.common.base.Function)14 TimeoutException (java.util.concurrent.TimeoutException)12 IAE (io.druid.java.util.common.IAE)10 HashMap (java.util.HashMap)10 ExecutionException (java.util.concurrent.ExecutionException)10 Stopwatch (com.google.common.base.Stopwatch)9 DimensionSpec (io.druid.query.dimension.DimensionSpec)9 ImmutableMap (com.google.common.collect.ImmutableMap)8 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)8 AggregatorFactory (io.druid.query.aggregation.AggregatorFactory)8