use of jnc.platform.win32.Handle in project SpinalTap by airbnb.
the class MysqlSchemaStore method getAll.
@Override
public TreeMap<Integer, MysqlTableSchema> getAll(@NotNull final String database, @NotNull final String table) {
TreeMap<Integer, MysqlTableSchema> allSchemaVersions = Maps.newTreeMap();
List<String> allSchemaInfo;
try (Handle handle = jdbi.open()) {
allSchemaInfo = MysqlSchemaUtil.LIST_STRING_RETRYER.call(() -> handle.createQuery(String.format(GET_ALL_SCHEMA_BY_TABLE_QUERY, source)).map(StringColumnMapper.INSTANCE).list());
metrics.schemaStoreGetSuccess(database, table);
} catch (Exception ex) {
log.error(String.format("Failed to get all schema for database: %s table: %s", database, table), ex);
metrics.schemaStoreGetFailure(database, table, ex);
Throwables.throwIfUnchecked(ex);
throw new RuntimeException(ex);
}
allSchemaInfo.stream().map(MysqlSchemaStore::deserializeSchemaInfo).forEach(schemaInfo -> allSchemaVersions.put(schemaInfo.getVersion(), schemaInfo));
return allSchemaVersions;
}
use of jnc.platform.win32.Handle in project presto by prestodb.
the class TestOrcFileRewriter method testRewriterDropThenAddSameColumns.
/**
* The following test drop and add the same columns; the legacy ORC rewriter will fail due to unchanged schema.
* However, if we enforce the newly added column to always have the largest ID, this won't happen.
*/
@Test
public void testRewriterDropThenAddSameColumns() throws Exception {
FunctionAndTypeManager functionAndTypeManager = createTestFunctionAndTypeManager();
DBI dbi = new DBI("jdbc:h2:mem:test" + System.nanoTime() + "_" + ThreadLocalRandom.current().nextInt());
dbi.registerMapper(new TableColumn.Mapper(functionAndTypeManager));
Handle dummyHandle = dbi.open();
File dataDir = Files.createTempDir();
StorageManager storageManager = createOrcStorageManager(dbi, dataDir);
List<Long> columnIds = ImmutableList.of(3L, 7L);
List<Type> columnTypes = ImmutableList.of(BIGINT, createVarcharType(20));
File file = new File(temporary, randomUUID().toString());
try (FileWriter writer = createFileWriter(columnIds, columnTypes, file, false)) {
List<Page> pages = rowPagesBuilder(columnTypes).row(2L, "2").build();
writer.appendPages(pages);
}
// Add a column
File newFile1 = new File(temporary, randomUUID().toString());
FileSystem fileSystem = new LocalOrcDataEnvironment().getFileSystem(DEFAULT_RAPTOR_CONTEXT);
OrcFileInfo info = createFileRewriter().rewrite(fileSystem, getColumnTypes(ImmutableList.of(3L, 7L, 10L), ImmutableList.of(BIGINT, createVarcharType(20), DOUBLE)), path(file), path(newFile1), new BitSet(5));
assertEquals(info.getRowCount(), 1);
// Drop a column
File newFile2 = new File(temporary, randomUUID().toString());
info = createFileRewriter().rewrite(fileSystem, getColumnTypes(ImmutableList.of(7L, 10L), ImmutableList.of(createVarcharType(20), DOUBLE)), path(newFile1), path(newFile2), new BitSet(5));
assertEquals(info.getRowCount(), 1);
// Add a column with the same ID but different type
File newFile3 = new File(temporary, randomUUID().toString());
info = createFileRewriter().rewrite(fileSystem, getColumnTypes(ImmutableList.of(7L, 10L, 3L), ImmutableList.of(createVarcharType(20), DOUBLE, createVarcharType(5))), path(newFile2), path(newFile3), new BitSet(5));
assertEquals(info.getRowCount(), 1);
// Get prepared for the final file; make sure it is accessible from storage manager
UUID uuid = randomUUID();
File newFile4 = getFileSystemPath(new File(dataDir, "data/storage"), uuid);
// Optimized ORC writer does not create the file itself
newFile4.getParentFile().mkdirs();
newFile4.createNewFile();
// Drop a column and add a column
info = createFileRewriter().rewrite(fileSystem, getColumnTypes(ImmutableList.of(7L, 3L, 8L), ImmutableList.of(createVarcharType(20), createVarcharType(5), INTEGER)), path(newFile3), path(newFile4), new BitSet(5));
assertEquals(info.getRowCount(), 1);
ConnectorPageSource source = storageManager.getPageSource(DEFAULT_RAPTOR_CONTEXT, DEFAULT_HIVE_FILE_CONTEXT, uuid, Optional.empty(), false, OptionalInt.empty(), ImmutableList.of(3L, 7L, 8L), ImmutableList.of(createVarcharType(5), createVarcharType(20), INTEGER), TupleDomain.all(), READER_ATTRIBUTES);
Page page = null;
while (page == null) {
page = source.getNextPage();
}
assertEquals(page.getPositionCount(), 1);
try {
// Column 3L
Block column0 = page.getBlock(0);
assertTrue(column0.isNull(0));
// Column 7L
Block column1 = page.getBlock(1);
assertEquals(createVarcharType(20).getSlice(column1, 0), utf8Slice("2"));
// Column 8L
Block column2 = page.getBlock(2);
assertTrue(column2.isNull(0));
dummyHandle.close();
deleteRecursively(dataDir.toPath(), ALLOW_INSECURE);
} catch (UnsupportedOperationException e) {
// Optimized ORC rewriter will respect the schema
fail();
}
}
use of jnc.platform.win32.Handle in project presto by prestodb.
the class DatabaseShardManager method dropTable.
@Override
public void dropTable(long tableId) {
runTransaction(dbi, (handle, status) -> {
lockTable(handle, tableId);
ShardDao shardDao = shardDaoSupplier.attach(handle);
shardDao.insertDeletedShards(tableId);
shardDao.dropShardNodes(tableId);
shardDao.dropShards(tableId);
handle.attach(ShardOrganizerDao.class).dropOrganizerJobs(tableId);
MetadataDao dao = handle.attach(MetadataDao.class);
dao.dropColumns(tableId);
dao.dropTable(tableId);
return null;
});
// It is not possible to drop the index tables in a transaction.
try (Handle handle = dbi.open()) {
handle.execute("DROP TABLE " + shardIndexTable(tableId));
} catch (DBIException e) {
log.warn(e, "Failed to drop index table %s", shardIndexTable(tableId));
}
}
use of jnc.platform.win32.Handle in project presto by prestodb.
the class DatabaseShardManager method createTable.
@Override
public void createTable(long tableId, List<ColumnInfo> columns, boolean bucketed, OptionalLong temporalColumnId, boolean tableSupportsDeltaDelete) {
StringJoiner tableColumns = new StringJoiner(",\n ", " ", ",\n").setEmptyValue("");
for (ColumnInfo column : columns) {
String columnType = sqlColumnType(column.getType());
if (columnType != null) {
tableColumns.add(minColumn(column.getColumnId()) + " " + columnType);
tableColumns.add(maxColumn(column.getColumnId()) + " " + columnType);
}
}
StringJoiner coveringIndexColumns = new StringJoiner(", ");
// Add the max temporal column first to accelerate queries that usually scan recent data
temporalColumnId.ifPresent(id -> coveringIndexColumns.add(maxColumn(id)));
temporalColumnId.ifPresent(id -> coveringIndexColumns.add(minColumn(id)));
if (bucketed) {
coveringIndexColumns.add("bucket_number");
} else {
coveringIndexColumns.add("node_ids");
}
coveringIndexColumns.add("shard_id").add("shard_uuid");
String sql = "" + "CREATE TABLE " + shardIndexTable(tableId) + " (\n" + " shard_id BIGINT NOT NULL,\n" + " shard_uuid BINARY(16) NOT NULL,\n" + (tableSupportsDeltaDelete ? " delta_shard_uuid BINARY(16) DEFAULT NULL,\n" : "") + (bucketed ? " bucket_number INT NOT NULL\n," : " node_ids VARBINARY(128) NOT NULL,\n") + tableColumns + (bucketed ? " PRIMARY KEY (bucket_number, shard_uuid),\n" : " PRIMARY KEY (node_ids, shard_uuid),\n") + " UNIQUE (shard_id),\n" + " UNIQUE (shard_uuid),\n" + " UNIQUE (" + coveringIndexColumns + ")\n" + ")";
try (Handle handle = dbi.open()) {
handle.execute(sql);
} catch (DBIException e) {
throw metadataError(e);
}
}
use of jnc.platform.win32.Handle in project presto by prestodb.
the class DatabaseShardManager method addColumn.
@Override
public void addColumn(long tableId, ColumnInfo column) {
String columnType = sqlColumnType(column.getType());
if (columnType == null) {
return;
}
String sql = format("ALTER TABLE %s ADD COLUMN (%s %s, %s %s)", shardIndexTable(tableId), minColumn(column.getColumnId()), columnType, maxColumn(column.getColumnId()), columnType);
int attempts = 0;
while (true) {
attempts++;
try (Handle handle = dbi.open()) {
handle.execute(sql);
} catch (DBIException e) {
if (isSyntaxOrAccessError(e)) {
// exit when column already exists
return;
}
if (attempts >= MAX_ADD_COLUMN_ATTEMPTS) {
throw metadataError(e);
}
log.warn(e, "Failed to alter table on attempt %s, will retry. SQL: %s", attempts, sql);
try {
SECONDS.sleep(3);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw metadataError(ie);
}
}
}
}
Aggregations