Search in sources :

Example 36 with IAE

use of io.druid.java.util.common.IAE in project druid by druid-io.

the class VSizeIndexedInts method readFromByteBuffer.

public static VSizeIndexedInts readFromByteBuffer(ByteBuffer buffer) {
    byte versionFromBuffer = buffer.get();
    if (VERSION == versionFromBuffer) {
        int numBytes = buffer.get();
        int size = buffer.getInt();
        ByteBuffer bufferToUse = buffer.asReadOnlyBuffer();
        bufferToUse.limit(bufferToUse.position() + size);
        buffer.position(bufferToUse.limit());
        return new VSizeIndexedInts(bufferToUse, numBytes);
    }
    throw new IAE("Unknown version[%s]", versionFromBuffer);
}
Also used : IAE(io.druid.java.util.common.IAE) ByteBuffer(java.nio.ByteBuffer)

Example 37 with IAE

use of io.druid.java.util.common.IAE in project druid by druid-io.

the class GenericIndexed method createVersionTwoGenericIndexed.

private static <T> GenericIndexed<T> createVersionTwoGenericIndexed(ByteBuffer byteBuffer, ObjectStrategy<T> strategy, SmooshedFileMapper fileMapper) {
    if (fileMapper == null) {
        throw new IAE("SmooshedFileMapper can not be null for version 2.");
    }
    boolean allowReverseLookup = byteBuffer.get() == REVERSE_LOOKUP_ALLOWED;
    int logBaseTwoOfElementsPerValueFile = byteBuffer.getInt();
    int numElements = byteBuffer.getInt();
    String columnName;
    List<ByteBuffer> valueBuffersToUse;
    ByteBuffer headerBuffer;
    try {
        columnName = SERIALIZER_UTILS.readString(byteBuffer);
        valueBuffersToUse = Lists.newArrayList();
        int elementsPerValueFile = 1 << logBaseTwoOfElementsPerValueFile;
        int numberOfFilesRequired = getNumberOfFilesRequired(elementsPerValueFile, numElements);
        for (int i = 0; i < numberOfFilesRequired; i++) {
            valueBuffersToUse.add(fileMapper.mapFile(GenericIndexedWriter.generateValueFileName(columnName, i)).asReadOnlyBuffer());
        }
        headerBuffer = fileMapper.mapFile(GenericIndexedWriter.generateHeaderFileName(columnName));
    } catch (IOException e) {
        throw new RuntimeException("File mapping failed.", e);
    }
    return new GenericIndexed<T>(valueBuffersToUse, headerBuffer, strategy, allowReverseLookup, logBaseTwoOfElementsPerValueFile, numElements);
}
Also used : IOException(java.io.IOException) IAE(io.druid.java.util.common.IAE) ByteBuffer(java.nio.ByteBuffer)

Example 38 with IAE

use of io.druid.java.util.common.IAE in project druid by druid-io.

the class DruidCoordinator method moveSegment.

public void moveSegment(ImmutableDruidServer fromServer, ImmutableDruidServer toServer, String segmentName, final LoadPeonCallback callback) {
    try {
        if (fromServer.getMetadata().equals(toServer.getMetadata())) {
            throw new IAE("Cannot move [%s] to and from the same server [%s]", segmentName, fromServer.getName());
        }
        final DataSegment segment = fromServer.getSegment(segmentName);
        if (segment == null) {
            throw new IAE("Unable to find segment [%s] on server [%s]", segmentName, fromServer.getName());
        }
        final LoadQueuePeon loadPeon = loadManagementPeons.get(toServer.getName());
        if (loadPeon == null) {
            throw new IAE("LoadQueuePeon hasn't been created yet for path [%s]", toServer.getName());
        }
        final LoadQueuePeon dropPeon = loadManagementPeons.get(fromServer.getName());
        if (dropPeon == null) {
            throw new IAE("LoadQueuePeon hasn't been created yet for path [%s]", fromServer.getName());
        }
        final ServerHolder toHolder = new ServerHolder(toServer, loadPeon);
        if (toHolder.getAvailableSize() < segment.getSize()) {
            throw new IAE("Not enough capacity on server [%s] for segment [%s]. Required: %,d, available: %,d.", toServer.getName(), segment, segment.getSize(), toHolder.getAvailableSize());
        }
        final String toLoadQueueSegPath = ZKPaths.makePath(ZKPaths.makePath(zkPaths.getLoadQueuePath(), toServer.getName()), segmentName);
        final String toServedSegPath = ZKPaths.makePath(ZKPaths.makePath(serverInventoryView.getInventoryManagerConfig().getInventoryPath(), toServer.getName()), segmentName);
        loadPeon.loadSegment(segment, new LoadPeonCallback() {

            @Override
            public void execute() {
                try {
                    if (curator.checkExists().forPath(toServedSegPath) != null && curator.checkExists().forPath(toLoadQueueSegPath) == null && !dropPeon.getSegmentsToDrop().contains(segment)) {
                        dropPeon.dropSegment(segment, callback);
                    } else if (callback != null) {
                        callback.execute();
                    }
                } catch (Exception e) {
                    throw Throwables.propagate(e);
                }
            }
        });
    } catch (Exception e) {
        log.makeAlert(e, "Exception moving segment %s", segmentName).emit();
        if (callback != null) {
            callback.execute();
        }
    }
}
Also used : IAE(io.druid.java.util.common.IAE) DataSegment(io.druid.timeline.DataSegment) IOException(java.io.IOException)

Example 39 with IAE

use of io.druid.java.util.common.IAE in project druid by druid-io.

the class CoordinatorDynamicConfigTest method testSerdeWithKillAllDataSources.

@Test
public void testSerdeWithKillAllDataSources() throws Exception {
    String jsonStr = "{\n" + "  \"millisToWaitBeforeDeleting\": 1,\n" + "  \"mergeBytesLimit\": 1,\n" + "  \"mergeSegmentsLimit\" : 1,\n" + "  \"maxSegmentsToMove\": 1,\n" + "  \"replicantLifetime\": 1,\n" + "  \"replicationThrottleLimit\": 1,\n" + "  \"balancerComputeThreads\": 2, \n" + "  \"emitBalancingStats\": true,\n" + "  \"killAllDataSources\": true\n" + "}\n";
    ObjectMapper mapper = TestHelper.getObjectMapper();
    CoordinatorDynamicConfig actual = mapper.readValue(mapper.writeValueAsString(mapper.readValue(jsonStr, CoordinatorDynamicConfig.class)), CoordinatorDynamicConfig.class);
    Assert.assertEquals(new CoordinatorDynamicConfig(1, 1, 1, 1, 1, 1, 2, true, ImmutableSet.of(), true), actual);
    //ensure whitelist is empty when killAllDataSources is true
    try {
        jsonStr = "{\n" + "  \"killDataSourceWhitelist\": [\"test1\",\"test2\"],\n" + "  \"killAllDataSources\": true\n" + "}\n";
        mapper.readValue(jsonStr, CoordinatorDynamicConfig.class);
        Assert.fail("deserialization should fail.");
    } catch (JsonMappingException e) {
        Assert.assertTrue(e.getCause() instanceof IAE);
    }
}
Also used : CoordinatorDynamicConfig(io.druid.server.coordinator.CoordinatorDynamicConfig) JsonMappingException(com.fasterxml.jackson.databind.JsonMappingException) IAE(io.druid.java.util.common.IAE) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test)

Example 40 with IAE

use of io.druid.java.util.common.IAE in project druid by druid-io.

the class HadoopConverterJob method run.

public List<DataSegment> run() throws IOException {
    final JobConf jobConf = new JobConf();
    jobConf.setKeepFailedTaskFiles(false);
    for (Map.Entry<String, String> entry : converterConfig.getHadoopProperties().entrySet()) {
        jobConf.set(entry.getKey(), entry.getValue(), "converterConfig.getHadoopProperties()");
    }
    final List<DataSegment> segments = converterConfig.getSegments();
    if (segments.isEmpty()) {
        throw new IAE("No segments found for datasource [%s]", converterConfig.getDataSource());
    }
    converterConfigIntoConfiguration(converterConfig, segments, jobConf);
    // Map only. Number of map tasks determined by input format
    jobConf.setNumReduceTasks(0);
    jobConf.setWorkingDirectory(new Path(converterConfig.getDistributedSuccessCache()));
    setJobName(jobConf, segments);
    if (converterConfig.getJobPriority() != null) {
        jobConf.setJobPriority(JobPriority.valueOf(converterConfig.getJobPriority()));
    }
    final Job job = Job.getInstance(jobConf);
    job.setInputFormatClass(ConfigInputFormat.class);
    job.setMapperClass(ConvertingMapper.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Text.class);
    job.setMapSpeculativeExecution(false);
    job.setOutputFormatClass(ConvertingOutputFormat.class);
    JobHelper.setupClasspath(JobHelper.distributedClassPath(jobConf.getWorkingDirectory()), JobHelper.distributedClassPath(getJobClassPathDir(job.getJobName(), jobConf.getWorkingDirectory())), job);
    Throwable throwable = null;
    try {
        job.submit();
        log.info("Job %s submitted, status available at %s", job.getJobName(), job.getTrackingURL());
        final boolean success = job.waitForCompletion(true);
        if (!success) {
            final TaskReport[] reports = job.getTaskReports(TaskType.MAP);
            if (reports != null) {
                for (final TaskReport report : reports) {
                    log.error("Error in task [%s] : %s", report.getTaskId(), Arrays.toString(report.getDiagnostics()));
                }
            }
            return null;
        }
        try {
            loadedBytes = job.getCounters().findCounter(COUNTER_GROUP, COUNTER_LOADED).getValue();
            writtenBytes = job.getCounters().findCounter(COUNTER_GROUP, COUNTER_WRITTEN).getValue();
        } catch (IOException ex) {
            log.error(ex, "Could not fetch counters");
        }
        final JobID jobID = job.getJobID();
        final Path jobDir = getJobPath(jobID, job.getWorkingDirectory());
        final FileSystem fs = jobDir.getFileSystem(job.getConfiguration());
        final RemoteIterator<LocatedFileStatus> it = fs.listFiles(jobDir, true);
        final List<Path> goodPaths = new ArrayList<>();
        while (it.hasNext()) {
            final LocatedFileStatus locatedFileStatus = it.next();
            if (locatedFileStatus.isFile()) {
                final Path myPath = locatedFileStatus.getPath();
                if (ConvertingOutputFormat.DATA_SUCCESS_KEY.equals(myPath.getName())) {
                    goodPaths.add(new Path(myPath.getParent(), ConvertingOutputFormat.DATA_FILE_KEY));
                }
            }
        }
        if (goodPaths.isEmpty()) {
            log.warn("No good data found at [%s]", jobDir);
            return null;
        }
        final List<DataSegment> returnList = ImmutableList.copyOf(Lists.transform(goodPaths, new Function<Path, DataSegment>() {

            @Nullable
            @Override
            public DataSegment apply(final Path input) {
                try {
                    if (!fs.exists(input)) {
                        throw new ISE("Somehow [%s] was found but [%s] is missing at [%s]", ConvertingOutputFormat.DATA_SUCCESS_KEY, ConvertingOutputFormat.DATA_FILE_KEY, jobDir);
                    }
                } catch (final IOException e) {
                    throw Throwables.propagate(e);
                }
                try (final InputStream stream = fs.open(input)) {
                    return HadoopDruidConverterConfig.jsonMapper.readValue(stream, DataSegment.class);
                } catch (final IOException e) {
                    throw Throwables.propagate(e);
                }
            }
        }));
        if (returnList.size() == segments.size()) {
            return returnList;
        } else {
            throw new ISE("Tasks reported success but result length did not match! Expected %d found %d at path [%s]", segments.size(), returnList.size(), jobDir);
        }
    } catch (InterruptedException | ClassNotFoundException e) {
        RuntimeException exception = Throwables.propagate(e);
        throwable = exception;
        throw exception;
    } catch (Throwable t) {
        throwable = t;
        throw t;
    } finally {
        try {
            cleanup(job);
        } catch (IOException e) {
            if (throwable != null) {
                throwable.addSuppressed(e);
            } else {
                log.error(e, "Could not clean up job [%s]", job.getJobID());
            }
        }
    }
}
Also used : ArrayList(java.util.ArrayList) DataSegment(io.druid.timeline.DataSegment) WindowedDataSegment(io.druid.indexer.hadoop.WindowedDataSegment) Function(com.google.common.base.Function) FileSystem(org.apache.hadoop.fs.FileSystem) ISE(io.druid.java.util.common.ISE) Job(org.apache.hadoop.mapreduce.Job) JobConf(org.apache.hadoop.mapred.JobConf) Path(org.apache.hadoop.fs.Path) TaskReport(org.apache.hadoop.mapreduce.TaskReport) InputStream(java.io.InputStream) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) IOException(java.io.IOException) IAE(io.druid.java.util.common.IAE) Map(java.util.Map) JobID(org.apache.hadoop.mapreduce.JobID)

Aggregations

IAE (io.druid.java.util.common.IAE)50 IOException (java.io.IOException)12 ISE (io.druid.java.util.common.ISE)10 ByteBuffer (java.nio.ByteBuffer)10 Function (com.google.common.base.Function)5 DataSegment (io.druid.timeline.DataSegment)5 URI (java.net.URI)5 Interval (org.joda.time.Interval)5 File (java.io.File)4 Nullable (javax.annotation.Nullable)4 DateTime (org.joda.time.DateTime)4 ObjectColumnSelector (io.druid.segment.ObjectColumnSelector)3 ArrayList (java.util.ArrayList)3 Map (java.util.Map)3 JsonProcessingException (com.fasterxml.jackson.core.JsonProcessingException)2 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)2 ByteSource (com.google.common.io.ByteSource)2 Injector (com.google.inject.Injector)2 Request (com.metamx.http.client.Request)2