Search in sources :

Example 81 with Handle

use of jnc.platform.win32.Handle in project SpinalTap by airbnb.

the class MysqlSchemaStore method getAll.

@Override
public TreeMap<Integer, MysqlTableSchema> getAll(@NotNull final String database, @NotNull final String table) {
    TreeMap<Integer, MysqlTableSchema> allSchemaVersions = Maps.newTreeMap();
    List<String> allSchemaInfo;
    try (Handle handle = jdbi.open()) {
        allSchemaInfo = MysqlSchemaUtil.LIST_STRING_RETRYER.call(() -> handle.createQuery(String.format(GET_ALL_SCHEMA_BY_TABLE_QUERY, source)).map(StringColumnMapper.INSTANCE).list());
        metrics.schemaStoreGetSuccess(database, table);
    } catch (Exception ex) {
        log.error(String.format("Failed to get all schema for database: %s table: %s", database, table), ex);
        metrics.schemaStoreGetFailure(database, table, ex);
        Throwables.throwIfUnchecked(ex);
        throw new RuntimeException(ex);
    }
    allSchemaInfo.stream().map(MysqlSchemaStore::deserializeSchemaInfo).forEach(schemaInfo -> allSchemaVersions.put(schemaInfo.getVersion(), schemaInfo));
    return allSchemaVersions;
}
Also used : Handle(org.skife.jdbi.v2.Handle)

Example 82 with Handle

use of jnc.platform.win32.Handle in project presto by prestodb.

the class TestOrcFileRewriter method testRewriterDropThenAddSameColumns.

/**
 * The following test drop and add the same columns; the legacy ORC rewriter will fail due to unchanged schema.
 * However, if we enforce the newly added column to always have the largest ID, this won't happen.
 */
@Test
public void testRewriterDropThenAddSameColumns() throws Exception {
    FunctionAndTypeManager functionAndTypeManager = createTestFunctionAndTypeManager();
    DBI dbi = new DBI("jdbc:h2:mem:test" + System.nanoTime() + "_" + ThreadLocalRandom.current().nextInt());
    dbi.registerMapper(new TableColumn.Mapper(functionAndTypeManager));
    Handle dummyHandle = dbi.open();
    File dataDir = Files.createTempDir();
    StorageManager storageManager = createOrcStorageManager(dbi, dataDir);
    List<Long> columnIds = ImmutableList.of(3L, 7L);
    List<Type> columnTypes = ImmutableList.of(BIGINT, createVarcharType(20));
    File file = new File(temporary, randomUUID().toString());
    try (FileWriter writer = createFileWriter(columnIds, columnTypes, file, false)) {
        List<Page> pages = rowPagesBuilder(columnTypes).row(2L, "2").build();
        writer.appendPages(pages);
    }
    // Add a column
    File newFile1 = new File(temporary, randomUUID().toString());
    FileSystem fileSystem = new LocalOrcDataEnvironment().getFileSystem(DEFAULT_RAPTOR_CONTEXT);
    OrcFileInfo info = createFileRewriter().rewrite(fileSystem, getColumnTypes(ImmutableList.of(3L, 7L, 10L), ImmutableList.of(BIGINT, createVarcharType(20), DOUBLE)), path(file), path(newFile1), new BitSet(5));
    assertEquals(info.getRowCount(), 1);
    // Drop a column
    File newFile2 = new File(temporary, randomUUID().toString());
    info = createFileRewriter().rewrite(fileSystem, getColumnTypes(ImmutableList.of(7L, 10L), ImmutableList.of(createVarcharType(20), DOUBLE)), path(newFile1), path(newFile2), new BitSet(5));
    assertEquals(info.getRowCount(), 1);
    // Add a column with the same ID but different type
    File newFile3 = new File(temporary, randomUUID().toString());
    info = createFileRewriter().rewrite(fileSystem, getColumnTypes(ImmutableList.of(7L, 10L, 3L), ImmutableList.of(createVarcharType(20), DOUBLE, createVarcharType(5))), path(newFile2), path(newFile3), new BitSet(5));
    assertEquals(info.getRowCount(), 1);
    // Get prepared for the final file; make sure it is accessible from storage manager
    UUID uuid = randomUUID();
    File newFile4 = getFileSystemPath(new File(dataDir, "data/storage"), uuid);
    // Optimized ORC writer does not create the file itself
    newFile4.getParentFile().mkdirs();
    newFile4.createNewFile();
    // Drop a column and add a column
    info = createFileRewriter().rewrite(fileSystem, getColumnTypes(ImmutableList.of(7L, 3L, 8L), ImmutableList.of(createVarcharType(20), createVarcharType(5), INTEGER)), path(newFile3), path(newFile4), new BitSet(5));
    assertEquals(info.getRowCount(), 1);
    ConnectorPageSource source = storageManager.getPageSource(DEFAULT_RAPTOR_CONTEXT, DEFAULT_HIVE_FILE_CONTEXT, uuid, Optional.empty(), false, OptionalInt.empty(), ImmutableList.of(3L, 7L, 8L), ImmutableList.of(createVarcharType(5), createVarcharType(20), INTEGER), TupleDomain.all(), READER_ATTRIBUTES);
    Page page = null;
    while (page == null) {
        page = source.getNextPage();
    }
    assertEquals(page.getPositionCount(), 1);
    try {
        // Column 3L
        Block column0 = page.getBlock(0);
        assertTrue(column0.isNull(0));
        // Column 7L
        Block column1 = page.getBlock(1);
        assertEquals(createVarcharType(20).getSlice(column1, 0), utf8Slice("2"));
        // Column 8L
        Block column2 = page.getBlock(2);
        assertTrue(column2.isNull(0));
        dummyHandle.close();
        deleteRecursively(dataDir.toPath(), ALLOW_INSECURE);
    } catch (UnsupportedOperationException e) {
        // Optimized ORC rewriter will respect the schema
        fail();
    }
}
Also used : TestOrcStorageManager.createOrcStorageManager(com.facebook.presto.raptor.storage.TestOrcStorageManager.createOrcStorageManager) BitSet(java.util.BitSet) DBI(org.skife.jdbi.v2.DBI) Page(com.facebook.presto.common.Page) ConnectorPageSource(com.facebook.presto.spi.ConnectorPageSource) TableColumn(com.facebook.presto.raptor.metadata.TableColumn) Handle(org.skife.jdbi.v2.Handle) DecimalType(com.facebook.presto.common.type.DecimalType) VarcharType.createVarcharType(com.facebook.presto.common.type.VarcharType.createVarcharType) ArrayType(com.facebook.presto.common.type.ArrayType) Type(com.facebook.presto.common.type.Type) FunctionAndTypeManager(com.facebook.presto.metadata.FunctionAndTypeManager) FunctionAndTypeManager.createTestFunctionAndTypeManager(com.facebook.presto.metadata.FunctionAndTypeManager.createTestFunctionAndTypeManager) FileSystem(org.apache.hadoop.fs.FileSystem) LocalOrcDataEnvironment(com.facebook.presto.raptor.filesystem.LocalOrcDataEnvironment) Block(com.facebook.presto.common.block.Block) UUID(java.util.UUID) UUID.randomUUID(java.util.UUID.randomUUID) File(java.io.File) Test(org.testng.annotations.Test)

Example 83 with Handle

use of jnc.platform.win32.Handle in project presto by prestodb.

the class DatabaseShardManager method dropTable.

@Override
public void dropTable(long tableId) {
    runTransaction(dbi, (handle, status) -> {
        lockTable(handle, tableId);
        ShardDao shardDao = shardDaoSupplier.attach(handle);
        shardDao.insertDeletedShards(tableId);
        shardDao.dropShardNodes(tableId);
        shardDao.dropShards(tableId);
        handle.attach(ShardOrganizerDao.class).dropOrganizerJobs(tableId);
        MetadataDao dao = handle.attach(MetadataDao.class);
        dao.dropColumns(tableId);
        dao.dropTable(tableId);
        return null;
    });
    // It is not possible to drop the index tables in a transaction.
    try (Handle handle = dbi.open()) {
        handle.execute("DROP TABLE " + shardIndexTable(tableId));
    } catch (DBIException e) {
        log.warn(e, "Failed to drop index table %s", shardIndexTable(tableId));
    }
}
Also used : ShardOrganizerDao(com.facebook.presto.raptor.storage.organization.ShardOrganizerDao) DBIException(org.skife.jdbi.v2.exceptions.DBIException) RaptorColumnHandle(com.facebook.presto.raptor.RaptorColumnHandle) Handle(org.skife.jdbi.v2.Handle)

Example 84 with Handle

use of jnc.platform.win32.Handle in project presto by prestodb.

the class DatabaseShardManager method createTable.

@Override
public void createTable(long tableId, List<ColumnInfo> columns, boolean bucketed, OptionalLong temporalColumnId, boolean tableSupportsDeltaDelete) {
    StringJoiner tableColumns = new StringJoiner(",\n  ", "  ", ",\n").setEmptyValue("");
    for (ColumnInfo column : columns) {
        String columnType = sqlColumnType(column.getType());
        if (columnType != null) {
            tableColumns.add(minColumn(column.getColumnId()) + " " + columnType);
            tableColumns.add(maxColumn(column.getColumnId()) + " " + columnType);
        }
    }
    StringJoiner coveringIndexColumns = new StringJoiner(", ");
    // Add the max temporal column first to accelerate queries that usually scan recent data
    temporalColumnId.ifPresent(id -> coveringIndexColumns.add(maxColumn(id)));
    temporalColumnId.ifPresent(id -> coveringIndexColumns.add(minColumn(id)));
    if (bucketed) {
        coveringIndexColumns.add("bucket_number");
    } else {
        coveringIndexColumns.add("node_ids");
    }
    coveringIndexColumns.add("shard_id").add("shard_uuid");
    String sql = "" + "CREATE TABLE " + shardIndexTable(tableId) + " (\n" + "  shard_id BIGINT NOT NULL,\n" + "  shard_uuid BINARY(16) NOT NULL,\n" + (tableSupportsDeltaDelete ? "  delta_shard_uuid BINARY(16) DEFAULT NULL,\n" : "") + (bucketed ? "  bucket_number INT NOT NULL\n," : "  node_ids VARBINARY(128) NOT NULL,\n") + tableColumns + (bucketed ? "  PRIMARY KEY (bucket_number, shard_uuid),\n" : "  PRIMARY KEY (node_ids, shard_uuid),\n") + "  UNIQUE (shard_id),\n" + "  UNIQUE (shard_uuid),\n" + "  UNIQUE (" + coveringIndexColumns + ")\n" + ")";
    try (Handle handle = dbi.open()) {
        handle.execute(sql);
    } catch (DBIException e) {
        throw metadataError(e);
    }
}
Also used : DBIException(org.skife.jdbi.v2.exceptions.DBIException) StringJoiner(java.util.StringJoiner) RaptorColumnHandle(com.facebook.presto.raptor.RaptorColumnHandle) Handle(org.skife.jdbi.v2.Handle)

Example 85 with Handle

use of jnc.platform.win32.Handle in project presto by prestodb.

the class DatabaseShardManager method addColumn.

@Override
public void addColumn(long tableId, ColumnInfo column) {
    String columnType = sqlColumnType(column.getType());
    if (columnType == null) {
        return;
    }
    String sql = format("ALTER TABLE %s ADD COLUMN (%s %s, %s %s)", shardIndexTable(tableId), minColumn(column.getColumnId()), columnType, maxColumn(column.getColumnId()), columnType);
    int attempts = 0;
    while (true) {
        attempts++;
        try (Handle handle = dbi.open()) {
            handle.execute(sql);
        } catch (DBIException e) {
            if (isSyntaxOrAccessError(e)) {
                // exit when column already exists
                return;
            }
            if (attempts >= MAX_ADD_COLUMN_ATTEMPTS) {
                throw metadataError(e);
            }
            log.warn(e, "Failed to alter table on attempt %s, will retry. SQL: %s", attempts, sql);
            try {
                SECONDS.sleep(3);
            } catch (InterruptedException ie) {
                Thread.currentThread().interrupt();
                throw metadataError(ie);
            }
        }
    }
}
Also used : DBIException(org.skife.jdbi.v2.exceptions.DBIException) RaptorColumnHandle(com.facebook.presto.raptor.RaptorColumnHandle) Handle(org.skife.jdbi.v2.Handle)

Aggregations

Handle (org.skife.jdbi.v2.Handle)103 DBI (org.skife.jdbi.v2.DBI)28 Before (org.junit.Before)21 IOException (java.io.IOException)18 List (java.util.List)17 DataSourceFactory (io.dropwizard.db.DataSourceFactory)15 DBIFactory (io.dropwizard.jdbi.DBIFactory)15 SQLException (java.sql.SQLException)15 Map (java.util.Map)14 Test (org.junit.Test)14 Test (org.testng.annotations.Test)14 DateTime (org.joda.time.DateTime)13 ArrayList (java.util.ArrayList)11 TransactionStatus (org.skife.jdbi.v2.TransactionStatus)11 ResultSet (java.sql.ResultSet)10 ImmutableList (com.google.common.collect.ImmutableList)8 UUID (java.util.UUID)8 CallbackFailedException (org.skife.jdbi.v2.exceptions.CallbackFailedException)7 ImmutableSet (com.google.common.collect.ImmutableSet)6 Set (java.util.Set)6