Search in sources :

Example 66 with Handle

use of jnc.platform.win32.Handle in project dropwizard by dropwizard.

the class GuavaOptionalInstantTest method setupTests.

@Before
public void setupTests() throws IOException {
    final DataSourceFactory dataSourceFactory = new DataSourceFactory();
    dataSourceFactory.setDriverClass("org.h2.Driver");
    dataSourceFactory.setUrl("jdbc:h2:mem:guava-instant-" + System.currentTimeMillis() + "?user=sa");
    dataSourceFactory.setInitialSize(1);
    final DBI dbi = new DBIFactory().build(env, dataSourceFactory, "test");
    try (Handle h = dbi.open()) {
        h.execute("CREATE TABLE IF NOT EXISTS tasks (" + "id INT PRIMARY KEY, " + "assignee VARCHAR(255) NOT NULL, " + "start_date TIMESTAMP, " + "end_date TIMESTAMP, " + "comments VARCHAR(1024) " + ")");
    }
    dao = dbi.onDemand(TaskDao.class);
}
Also used : DataSourceFactory(io.dropwizard.db.DataSourceFactory) DBI(org.skife.jdbi.v2.DBI) DBIFactory(io.dropwizard.jdbi.DBIFactory) Handle(org.skife.jdbi.v2.Handle) Before(org.junit.Before)

Example 67 with Handle

use of jnc.platform.win32.Handle in project dropwizard by dropwizard.

the class GuavaOptionalLocalDateTimeTest method setupTests.

@Before
public void setupTests() throws IOException {
    final DataSourceFactory dataSourceFactory = new DataSourceFactory();
    dataSourceFactory.setDriverClass("org.h2.Driver");
    dataSourceFactory.setUrl("jdbc:h2:mem:guava-local-date-time-" + System.currentTimeMillis() + "?user=sa");
    dataSourceFactory.setInitialSize(1);
    final DBI dbi = new DBIFactory().build(env, dataSourceFactory, "test");
    try (Handle h = dbi.open()) {
        h.execute("CREATE TABLE IF NOT EXISTS tasks (" + "id INT PRIMARY KEY, " + "assignee VARCHAR(255) NOT NULL, " + "start_date TIMESTAMP, " + "end_date TIMESTAMP, " + "comments VARCHAR(1024) " + ")");
    }
    dao = dbi.onDemand(TaskDao.class);
}
Also used : DataSourceFactory(io.dropwizard.db.DataSourceFactory) DBI(org.skife.jdbi.v2.DBI) DBIFactory(io.dropwizard.jdbi.DBIFactory) Handle(org.skife.jdbi.v2.Handle) Before(org.junit.Before)

Example 68 with Handle

use of jnc.platform.win32.Handle in project dropwizard by dropwizard.

the class GuavaOptionalOffsetDateTimeTest method setupTests.

@Before
public void setupTests() throws IOException {
    final DataSourceFactory dataSourceFactory = new DataSourceFactory();
    dataSourceFactory.setDriverClass("org.h2.Driver");
    dataSourceFactory.setUrl("jdbc:h2:mem:guava-offset-date-time-" + System.currentTimeMillis() + "?user=sa");
    dataSourceFactory.setInitialSize(1);
    final DBI dbi = new DBIFactory().build(env, dataSourceFactory, "test");
    try (Handle h = dbi.open()) {
        h.execute("CREATE TABLE IF NOT EXISTS tasks (" + "id INT PRIMARY KEY, " + "assignee VARCHAR(255) NOT NULL, " + "start_date TIMESTAMP, " + "end_date TIMESTAMP, " + "comments VARCHAR(1024) " + ")");
    }
    dao = dbi.onDemand(TaskDao.class);
}
Also used : DataSourceFactory(io.dropwizard.db.DataSourceFactory) DBI(org.skife.jdbi.v2.DBI) DBIFactory(io.dropwizard.jdbi.DBIFactory) Handle(org.skife.jdbi.v2.Handle) Before(org.junit.Before)

Example 69 with Handle

use of jnc.platform.win32.Handle in project dropwizard by dropwizard.

the class OptionalLongTest method setupTests.

@Before
public void setupTests() throws IOException {
    final DataSourceFactory dataSourceFactory = new DataSourceFactory();
    dataSourceFactory.setDriverClass("org.h2.Driver");
    dataSourceFactory.setUrl("jdbc:h2:mem:optional-long-" + System.currentTimeMillis() + "?user=sa");
    dataSourceFactory.setInitialSize(1);
    final DBI dbi = new DBIFactory().build(env, dataSourceFactory, "test");
    try (Handle h = dbi.open()) {
        h.execute("CREATE TABLE test (id INT PRIMARY KEY, optional BIGINT)");
    }
    dao = dbi.onDemand(TestDao.class);
}
Also used : DataSourceFactory(io.dropwizard.db.DataSourceFactory) DBI(org.skife.jdbi.v2.DBI) DBIFactory(io.dropwizard.jdbi.DBIFactory) Handle(org.skife.jdbi.v2.Handle) Before(org.junit.Before)

Example 70 with Handle

use of jnc.platform.win32.Handle in project druid by druid-io.

the class HadoopConverterJobTest method setUp.

@Before
public void setUp() throws Exception {
    final MetadataStorageUpdaterJobSpec metadataStorageUpdaterJobSpec = new MetadataStorageUpdaterJobSpec() {

        @Override
        public String getSegmentTable() {
            return derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable();
        }

        @Override
        public MetadataStorageConnectorConfig get() {
            return derbyConnectorRule.getMetadataConnectorConfig();
        }
    };
    final File scratchFileDir = temporaryFolder.newFolder();
    storageLocProperty = System.getProperty(STORAGE_PROPERTY_KEY);
    tmpSegmentDir = temporaryFolder.newFolder();
    System.setProperty(STORAGE_PROPERTY_KEY, tmpSegmentDir.getAbsolutePath());
    final URL url = Preconditions.checkNotNull(Query.class.getClassLoader().getResource("druid.sample.tsv"));
    final File tmpInputFile = temporaryFolder.newFile();
    FileUtils.retryCopy(new ByteSource() {

        @Override
        public InputStream openStream() throws IOException {
            return url.openStream();
        }
    }, tmpInputFile, FileUtils.IS_EXCEPTION, 3);
    final HadoopDruidIndexerConfig hadoopDruidIndexerConfig = new HadoopDruidIndexerConfig(new HadoopIngestionSpec(new DataSchema(DATASOURCE, HadoopDruidIndexerConfig.JSON_MAPPER.convertValue(new StringInputRowParser(new DelimitedParseSpec(new TimestampSpec("ts", "iso", null), new DimensionsSpec(DimensionsSpec.getDefaultSchemas(Arrays.asList(TestIndex.DIMENSIONS)), null, null), "\t", "", Arrays.asList(TestIndex.COLUMNS)), null), Map.class), new AggregatorFactory[] { new DoubleSumAggregatorFactory(TestIndex.METRICS[0], TestIndex.METRICS[0]), new HyperUniquesAggregatorFactory("quality_uniques", "quality") }, new UniformGranularitySpec(Granularities.MONTH, Granularities.DAY, ImmutableList.<Interval>of(interval)), HadoopDruidIndexerConfig.JSON_MAPPER), new HadoopIOConfig(ImmutableMap.<String, Object>of("type", "static", "paths", tmpInputFile.getAbsolutePath()), metadataStorageUpdaterJobSpec, tmpSegmentDir.getAbsolutePath()), new HadoopTuningConfig(scratchFileDir.getAbsolutePath(), null, null, null, null, null, false, false, false, false, null, false, false, null, null, null, false, false)));
    metadataStorageTablesConfigSupplier = derbyConnectorRule.metadataTablesConfigSupplier();
    connector = derbyConnectorRule.getConnector();
    try {
        connector.getDBI().withHandle(new HandleCallback<Void>() {

            @Override
            public Void withHandle(Handle handle) throws Exception {
                handle.execute("DROP TABLE druid_segments");
                return null;
            }
        });
    } catch (CallbackFailedException e) {
    // Who cares
    }
    List<Jobby> jobs = ImmutableList.of(new Jobby() {

        @Override
        public boolean run() {
            connector.createSegmentTable(metadataStorageUpdaterJobSpec.getSegmentTable());
            return true;
        }
    }, new HadoopDruidDetermineConfigurationJob(hadoopDruidIndexerConfig), new HadoopDruidIndexerJob(hadoopDruidIndexerConfig, new SQLMetadataStorageUpdaterJobHandler(connector)));
    JobHelper.runJobs(jobs, hadoopDruidIndexerConfig);
}
Also used : HadoopIngestionSpec(io.druid.indexer.HadoopIngestionSpec) HadoopTuningConfig(io.druid.indexer.HadoopTuningConfig) URL(java.net.URL) HadoopIOConfig(io.druid.indexer.HadoopIOConfig) UniformGranularitySpec(io.druid.segment.indexing.granularity.UniformGranularitySpec) TimestampSpec(io.druid.data.input.impl.TimestampSpec) SQLMetadataStorageUpdaterJobHandler(io.druid.indexer.SQLMetadataStorageUpdaterJobHandler) DoubleSumAggregatorFactory(io.druid.query.aggregation.DoubleSumAggregatorFactory) InputStream(java.io.InputStream) DelimitedParseSpec(io.druid.data.input.impl.DelimitedParseSpec) IOException(java.io.IOException) HadoopDruidIndexerConfig(io.druid.indexer.HadoopDruidIndexerConfig) IOException(java.io.IOException) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) Handle(org.skife.jdbi.v2.Handle) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) DataSchema(io.druid.segment.indexing.DataSchema) Jobby(io.druid.indexer.Jobby) HadoopDruidIndexerJob(io.druid.indexer.HadoopDruidIndexerJob) StringInputRowParser(io.druid.data.input.impl.StringInputRowParser) HyperUniquesAggregatorFactory(io.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) ByteSource(com.google.common.io.ByteSource) DimensionsSpec(io.druid.data.input.impl.DimensionsSpec) File(java.io.File) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HadoopDruidDetermineConfigurationJob(io.druid.indexer.HadoopDruidDetermineConfigurationJob) Interval(org.joda.time.Interval) Before(org.junit.Before)

Aggregations

Handle (org.skife.jdbi.v2.Handle)103 DBI (org.skife.jdbi.v2.DBI)28 Before (org.junit.Before)21 IOException (java.io.IOException)18 List (java.util.List)17 DataSourceFactory (io.dropwizard.db.DataSourceFactory)15 DBIFactory (io.dropwizard.jdbi.DBIFactory)15 SQLException (java.sql.SQLException)15 Map (java.util.Map)14 Test (org.junit.Test)14 Test (org.testng.annotations.Test)14 DateTime (org.joda.time.DateTime)13 ArrayList (java.util.ArrayList)11 TransactionStatus (org.skife.jdbi.v2.TransactionStatus)11 ResultSet (java.sql.ResultSet)10 ImmutableList (com.google.common.collect.ImmutableList)8 UUID (java.util.UUID)8 CallbackFailedException (org.skife.jdbi.v2.exceptions.CallbackFailedException)7 ImmutableSet (com.google.common.collect.ImmutableSet)6 Set (java.util.Set)6