Search in sources :

Example 1 with CharSource

use of com.google.common.io.CharSource in project druid by druid-io.

the class MapVirtualColumnTest method constructorFeeder.

@Parameterized.Parameters
public static Iterable<Object[]> constructorFeeder() throws IOException {
    final Supplier<SelectQueryConfig> selectConfigSupplier = Suppliers.ofInstance(new SelectQueryConfig(true));
    SelectQueryRunnerFactory factory = new SelectQueryRunnerFactory(new SelectQueryQueryToolChest(new DefaultObjectMapper(), QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator(), selectConfigSupplier), new SelectQueryEngine(selectConfigSupplier), QueryRunnerTestHelper.NOOP_QUERYWATCHER);
    final IncrementalIndexSchema schema = new IncrementalIndexSchema.Builder().withMinTimestamp(new DateTime("2011-01-12T00:00:00.000Z").getMillis()).withQueryGranularity(Granularities.NONE).build();
    final IncrementalIndex index = new OnheapIncrementalIndex(schema, true, 10000);
    final StringInputRowParser parser = new StringInputRowParser(new DelimitedParseSpec(new TimestampSpec("ts", "iso", null), new DimensionsSpec(DimensionsSpec.getDefaultSchemas(Arrays.asList("dim", "keys", "values")), null, null), "\t", ",", Arrays.asList("ts", "dim", "keys", "values")), "utf8");
    CharSource input = CharSource.wrap("2011-01-12T00:00:00.000Z\ta\tkey1,key2,key3\tvalue1,value2,value3\n" + "2011-01-12T00:00:00.000Z\tb\tkey4,key5,key6\tvalue4\n" + "2011-01-12T00:00:00.000Z\tc\tkey1,key5\tvalue1,value5,value9\n");
    IncrementalIndex index1 = TestIndex.loadIncrementalIndex(index, input, parser);
    QueryableIndex index2 = TestIndex.persistRealtimeAndLoadMMapped(index1);
    return transformToConstructionFeeder(Arrays.asList(makeQueryRunner(factory, "index1", new IncrementalIndexSegment(index1, "index1"), "incremental"), makeQueryRunner(factory, "index2", new QueryableIndexSegment("index2", index2), "queryable")));
}
Also used : CharSource(com.google.common.io.CharSource) IncrementalIndex(io.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(io.druid.segment.incremental.OnheapIncrementalIndex) DelimitedParseSpec(io.druid.data.input.impl.DelimitedParseSpec) OnheapIncrementalIndex(io.druid.segment.incremental.OnheapIncrementalIndex) SelectQueryRunnerFactory(io.druid.query.select.SelectQueryRunnerFactory) SelectQueryConfig(io.druid.query.select.SelectQueryConfig) DateTime(org.joda.time.DateTime) SelectQueryQueryToolChest(io.druid.query.select.SelectQueryQueryToolChest) SelectQueryEngine(io.druid.query.select.SelectQueryEngine) StringInputRowParser(io.druid.data.input.impl.StringInputRowParser) TimestampSpec(io.druid.data.input.impl.TimestampSpec) DimensionsSpec(io.druid.data.input.impl.DimensionsSpec) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) IncrementalIndexSchema(io.druid.segment.incremental.IncrementalIndexSchema)

Example 2 with CharSource

use of com.google.common.io.CharSource in project druid by druid-io.

the class MultiSegmentScanQueryTest method setup.

@BeforeClass
public static void setup() throws IOException {
    CharSource v_0112 = CharSource.wrap(StringUtils.join(V_0112, "\n"));
    CharSource v_0113 = CharSource.wrap(StringUtils.join(V_0113, "\n"));
    IncrementalIndex index0 = TestIndex.loadIncrementalIndex(newIndex("2011-01-12T00:00:00.000Z"), v_0112);
    IncrementalIndex index1 = TestIndex.loadIncrementalIndex(newIndex("2011-01-13T00:00:00.000Z"), v_0113);
    segment0 = new IncrementalIndexSegment(index0, makeIdentifier(index0, "v1"));
    segment1 = new IncrementalIndexSegment(index1, makeIdentifier(index1, "v1"));
}
Also used : CharSource(com.google.common.io.CharSource) IncrementalIndex(io.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(io.druid.segment.incremental.OnheapIncrementalIndex) IncrementalIndexSegment(io.druid.segment.IncrementalIndexSegment) BeforeClass(org.junit.BeforeClass)

Example 3 with CharSource

use of com.google.common.io.CharSource in project druid by druid-io.

the class DruidJsonValidator method run.

@Override
public void run() {
    File file = new File(jsonFile);
    if (!file.exists()) {
        System.out.printf("File[%s] does not exist.%n", file);
    }
    final Injector injector = makeInjector();
    final ObjectMapper jsonMapper = injector.getInstance(ObjectMapper.class);
    registerModules(jsonMapper, Iterables.concat(Initialization.getFromExtensions(injector.getInstance(ExtensionsConfig.class), DruidModule.class), Arrays.asList(new FirehoseModule(), new IndexingHadoopModule(), new IndexingServiceFirehoseModule(), new LocalDataStorageDruidModule(), new ParsersModule())));
    final ClassLoader loader;
    if (Thread.currentThread().getContextClassLoader() != null) {
        loader = Thread.currentThread().getContextClassLoader();
    } else {
        loader = DruidJsonValidator.class.getClassLoader();
    }
    if (toLogger) {
        logWriter = new NullWriter() {

            private final Logger logger = new Logger(DruidJsonValidator.class);

            @Override
            public void write(char[] cbuf, int off, int len) {
                logger.info(new String(cbuf, off, len));
            }
        };
    }
    try {
        if (type.equalsIgnoreCase("query")) {
            jsonMapper.readValue(file, Query.class);
        } else if (type.equalsIgnoreCase("hadoopConfig")) {
            jsonMapper.readValue(file, HadoopDruidIndexerConfig.class);
        } else if (type.equalsIgnoreCase("task")) {
            jsonMapper.readValue(file, Task.class);
        } else if (type.equalsIgnoreCase("parse")) {
            final StringInputRowParser parser;
            if (file.isFile()) {
                logWriter.write("loading parse spec from file '" + file + "'");
                parser = jsonMapper.readValue(file, StringInputRowParser.class);
            } else if (loader.getResource(jsonFile) != null) {
                logWriter.write("loading parse spec from resource '" + jsonFile + "'");
                parser = jsonMapper.readValue(loader.getResource(jsonFile), StringInputRowParser.class);
            } else {
                logWriter.write("cannot find proper spec from 'file'.. regarding it as a json spec");
                parser = jsonMapper.readValue(jsonFile, StringInputRowParser.class);
            }
            if (resource != null) {
                final CharSource source;
                if (new File(resource).isFile()) {
                    logWriter.write("loading data from file '" + resource + "'");
                    source = Resources.asByteSource(new File(resource).toURL()).asCharSource(Charset.forName(parser.getEncoding()));
                } else if (loader.getResource(resource) != null) {
                    logWriter.write("loading data from resource '" + resource + "'");
                    source = Resources.asByteSource(loader.getResource(resource)).asCharSource(Charset.forName(parser.getEncoding()));
                } else {
                    logWriter.write("cannot find proper data from 'resource'.. regarding it as data string");
                    source = CharSource.wrap(resource);
                }
                readData(parser, source);
            }
        } else {
            throw new UOE("Unknown type[%s]", type);
        }
    } catch (Exception e) {
        System.out.println("INVALID JSON!");
        throw Throwables.propagate(e);
    }
}
Also used : CharSource(com.google.common.io.CharSource) IndexingServiceFirehoseModule(io.druid.guice.IndexingServiceFirehoseModule) IndexingHadoopModule(io.druid.indexer.IndexingHadoopModule) LocalDataStorageDruidModule(io.druid.guice.LocalDataStorageDruidModule) UOE(io.druid.java.util.common.UOE) Logger(io.druid.java.util.common.logger.Logger) HadoopDruidIndexerConfig(io.druid.indexer.HadoopDruidIndexerConfig) NullWriter(org.apache.commons.io.output.NullWriter) IOException(java.io.IOException) FirehoseModule(io.druid.guice.FirehoseModule) IndexingServiceFirehoseModule(io.druid.guice.IndexingServiceFirehoseModule) Injector(com.google.inject.Injector) StringInputRowParser(io.druid.data.input.impl.StringInputRowParser) ParsersModule(io.druid.guice.ParsersModule) ExtensionsConfig(io.druid.guice.ExtensionsConfig) File(java.io.File) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper)

Example 4 with CharSource

use of com.google.common.io.CharSource in project druid by druid-io.

the class SearchQueryRunnerWithCaseTest method constructorFeeder.

@Parameterized.Parameters
public static Iterable<Object[]> constructorFeeder() throws IOException {
    final SearchQueryConfig[] configs = new SearchQueryConfig[3];
    configs[0] = new SearchQueryConfig();
    configs[0].setSearchStrategy(UseIndexesStrategy.NAME);
    configs[1] = new SearchQueryConfig();
    configs[1].setSearchStrategy(CursorOnlyStrategy.NAME);
    configs[2] = new SearchQueryConfig();
    configs[2].setSearchStrategy(AutoStrategy.NAME);
    CharSource input = CharSource.wrap("2011-01-12T00:00:00.000Z\tspot\tAutoMotive\t1000\t10000.0\t100000\tPREFERRED\tapreferred\t100.000000\n" + "2011-01-12T00:00:00.000Z\tSPot\tbusiness\t1100\t11000.0\t110000\tpreferred\tbPreferred\t100.000000\n" + "2011-01-12T00:00:00.000Z\tspot\tentertainment\t1200\t12000.0\t120000\tPREFERRed\tepreferred\t100.000000\n" + "2011-01-13T00:00:00.000Z\tspot\tautomotive\t1000\t10000.0\t100000\tpreferred\tapreferred\t94.874713");
    IncrementalIndex index1 = TestIndex.makeRealtimeIndex(input);
    IncrementalIndex index2 = TestIndex.makeRealtimeIndex(input);
    QueryableIndex index3 = TestIndex.persistRealtimeAndLoadMMapped(index1);
    QueryableIndex index4 = TestIndex.persistRealtimeAndLoadMMapped(index2);
    final List<QueryRunner<Result<SearchResultValue>>> runners = Lists.newArrayList();
    for (int i = 0; i < configs.length; i++) {
        runners.addAll(Arrays.asList(makeQueryRunner(makeRunnerFactory(configs[i]), "index1", new IncrementalIndexSegment(index1, "index1"), "index1"), makeQueryRunner(makeRunnerFactory(configs[i]), "index2", new IncrementalIndexSegment(index2, "index2"), "index2"), makeQueryRunner(makeRunnerFactory(configs[i]), "index3", new QueryableIndexSegment("index3", index3), "index3"), makeQueryRunner(makeRunnerFactory(configs[i]), "index4", new QueryableIndexSegment("index4", index4), "index4")));
    }
    return transformToConstructionFeeder(runners);
}
Also used : QueryableIndexSegment(io.druid.segment.QueryableIndexSegment) CharSource(com.google.common.io.CharSource) SearchQueryConfig(io.druid.query.search.search.SearchQueryConfig) IncrementalIndex(io.druid.segment.incremental.IncrementalIndex) IncrementalIndexSegment(io.druid.segment.IncrementalIndexSegment) QueryableIndex(io.druid.segment.QueryableIndex) QueryRunnerTestHelper.makeQueryRunner(io.druid.query.QueryRunnerTestHelper.makeQueryRunner) QueryRunner(io.druid.query.QueryRunner)

Example 5 with CharSource

use of com.google.common.io.CharSource in project MinecraftForge by MinecraftForge.

the class FMLDeobfuscatingRemapper method setup.

public void setup(File mcDir, LaunchClassLoader classLoader, String deobfFileName) {
    this.classLoader = classLoader;
    try {
        List<String> srgList;
        final String gradleStartProp = System.getProperty("net.minecraftforge.gradle.GradleStart.srg.srg-mcp");
        if (Strings.isNullOrEmpty(gradleStartProp)) {
            // get as a resource
            InputStream classData = getClass().getResourceAsStream(deobfFileName);
            LZMAInputSupplier zis = new LZMAInputSupplier(classData);
            CharSource srgSource = zis.asCharSource(Charsets.UTF_8);
            srgList = srgSource.readLines();
            FMLRelaunchLog.fine("Loading deobfuscation resource %s with %d records", deobfFileName, srgList.size());
        } else {
            srgList = Files.readLines(new File(gradleStartProp), Charsets.UTF_8);
            FMLRelaunchLog.fine("Loading deobfuscation resource %s with %d records", gradleStartProp, srgList.size());
        }
        rawMethodMaps = Maps.newHashMap();
        rawFieldMaps = Maps.newHashMap();
        Builder<String, String> builder = ImmutableBiMap.builder();
        Splitter splitter = Splitter.on(CharMatcher.anyOf(": ")).omitEmptyStrings().trimResults();
        for (String line : srgList) {
            String[] parts = Iterables.toArray(splitter.split(line), String.class);
            String typ = parts[0];
            if ("CL".equals(typ)) {
                parseClass(builder, parts);
            } else if ("MD".equals(typ)) {
                parseMethod(parts);
            } else if ("FD".equals(typ)) {
                parseField(parts);
            }
        }
        classNameBiMap = builder.build();
    } catch (IOException ioe) {
        FMLRelaunchLog.log(Level.ERROR, ioe, "An error occurred loading the deobfuscation map data");
    }
    methodNameMaps = Maps.newHashMapWithExpectedSize(rawMethodMaps.size());
    fieldNameMaps = Maps.newHashMapWithExpectedSize(rawFieldMaps.size());
}
Also used : CharSource(com.google.common.io.CharSource) Splitter(com.google.common.base.Splitter) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) IOException(java.io.IOException) File(java.io.File)

Aggregations

CharSource (com.google.common.io.CharSource)13 IncrementalIndex (io.druid.segment.incremental.IncrementalIndex)5 IncrementalIndexSegment (io.druid.segment.IncrementalIndexSegment)4 OnheapIncrementalIndex (io.druid.segment.incremental.OnheapIncrementalIndex)4 File (java.io.File)4 IOException (java.io.IOException)3 Splitter (com.google.common.base.Splitter)2 StringInputRowParser (io.druid.data.input.impl.StringInputRowParser)2 Segment (io.druid.segment.Segment)2 DataSegment (io.druid.timeline.DataSegment)2 VersionedIntervalTimeline (io.druid.timeline.VersionedIntervalTimeline)2 SingleElementPartitionChunk (io.druid.timeline.partition.SingleElementPartitionChunk)2 FileInputStream (java.io.FileInputStream)2 URL (java.net.URL)2 Interval (org.joda.time.Interval)2 BeforeClass (org.junit.BeforeClass)2 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)1 Injector (com.google.inject.Injector)1 DelimitedParseSpec (io.druid.data.input.impl.DelimitedParseSpec)1 DimensionsSpec (io.druid.data.input.impl.DimensionsSpec)1