use of jnc.platform.win32.Handle in project presto by prestodb.
the class TestOrcFileRewriter method testRewriterDropThenAddDifferentColumns.
/**
* The following test add or drop different columns
*/
@Test
public void testRewriterDropThenAddDifferentColumns() throws Exception {
FunctionAndTypeManager functionAndTypeManager = createTestFunctionAndTypeManager();
DBI dbi = new DBI("jdbc:h2:mem:test" + System.nanoTime() + "_" + ThreadLocalRandom.current().nextInt());
dbi.registerMapper(new TableColumn.Mapper(functionAndTypeManager));
Handle dummyHandle = dbi.open();
File dataDir = Files.createTempDir();
StorageManager storageManager = createOrcStorageManager(dbi, dataDir);
List<Long> columnIds = ImmutableList.of(3L, 7L);
List<Type> columnTypes = ImmutableList.of(BIGINT, createVarcharType(20));
File file = new File(temporary, randomUUID().toString());
try (FileWriter writer = createFileWriter(columnIds, columnTypes, file, false)) {
List<Page> pages = rowPagesBuilder(columnTypes).row(1L, "1").row(2L, "2").row(3L, "3").row(4L, "4").build();
writer.appendPages(pages);
}
// Add a column
File newFile1 = new File(temporary, randomUUID().toString());
FileSystem fileSystem = new LocalOrcDataEnvironment().getFileSystem(DEFAULT_RAPTOR_CONTEXT);
OrcFileInfo info = createFileRewriter().rewrite(fileSystem, getColumnTypes(ImmutableList.of(3L, 7L, 10L), ImmutableList.of(BIGINT, createVarcharType(20), DOUBLE)), path(file), path(newFile1), new BitSet(5));
assertEquals(info.getRowCount(), 4);
assertEquals(readAllBytes(file.toPath()), readAllBytes(newFile1.toPath()));
// Drop a column
File newFile2 = new File(temporary, randomUUID().toString());
info = createFileRewriter().rewrite(fileSystem, getColumnTypes(ImmutableList.of(7L, 10L), ImmutableList.of(createVarcharType(20), DOUBLE)), path(newFile1), path(newFile2), new BitSet(5));
assertEquals(info.getRowCount(), 4);
// Optimized writer will keep the only column
OrcReader orcReader = new OrcReader(fileOrcDataSource(newFile2), ORC, new StorageOrcFileTailSource(), new StorageStripeMetadataSource(), new RaptorOrcAggregatedMemoryContext(), OrcTestingUtil.createDefaultTestConfig(), false, NO_ENCRYPTION, DwrfKeyProvider.EMPTY, new RuntimeStats());
orcReader.getColumnNames().equals(ImmutableList.of("7"));
// Add a column with the different ID with different type
File newFile3 = new File(temporary, randomUUID().toString());
info = createFileRewriter().rewrite(fileSystem, getColumnTypes(ImmutableList.of(7L, 10L, 13L), ImmutableList.of(createVarcharType(20), DOUBLE, createVarcharType(5))), path(newFile2), path(newFile3), new BitSet(5));
assertEquals(info.getRowCount(), 4);
assertEquals(readAllBytes(newFile2.toPath()), readAllBytes(newFile3.toPath()));
// Get prepared for the final file; make sure it is accessible from storage manager
UUID uuid = randomUUID();
File newFile4 = getFileSystemPath(new File(dataDir, "data/storage"), uuid);
// Optimized ORC writer does not create the file itself
newFile4.getParentFile().mkdirs();
newFile4.createNewFile();
// Drop a column and add a column; also delete 3 rows
BitSet rowsToDelete = new BitSet(5);
rowsToDelete.set(0);
rowsToDelete.set(1);
rowsToDelete.set(3);
info = createFileRewriter().rewrite(fileSystem, getColumnTypes(ImmutableList.of(7L, 13L, 18L), ImmutableList.of(createVarcharType(20), createVarcharType(5), INTEGER)), path(newFile3), path(newFile4), rowsToDelete);
assertEquals(info.getRowCount(), 1);
ConnectorPageSource source = storageManager.getPageSource(DEFAULT_RAPTOR_CONTEXT, DEFAULT_HIVE_FILE_CONTEXT, uuid, Optional.empty(), false, OptionalInt.empty(), ImmutableList.of(13L, 7L, 18L), ImmutableList.of(createVarcharType(5), createVarcharType(20), INTEGER), TupleDomain.all(), READER_ATTRIBUTES);
Page page = null;
while (page == null) {
page = source.getNextPage();
}
assertEquals(page.getPositionCount(), 1);
// Column 13L
Block column0 = page.getBlock(0);
assertTrue(column0.isNull(0));
// Column 7L
Block column1 = page.getBlock(1);
assertEquals(createVarcharType(20).getSlice(column1, 0), utf8Slice("3"));
// Column 8L
Block column2 = page.getBlock(2);
assertTrue(column2.isNull(0));
// Remove all the columns
File newFile5 = new File(temporary, randomUUID().toString());
info = createFileRewriter().rewrite(fileSystem, getColumnTypes(ImmutableList.of(13L, 18L), ImmutableList.of(createVarcharType(5), INTEGER)), path(newFile4), path(newFile5), new BitSet(5));
// Optimized writer will drop the file
assertEquals(info.getRowCount(), 0);
assertFalse(newFile5.exists());
dummyHandle.close();
deleteRecursively(dataDir.toPath(), ALLOW_INSECURE);
}
use of jnc.platform.win32.Handle in project presto by prestodb.
the class RaptorMetadata method beginInsert.
@Override
public ConnectorInsertTableHandle beginInsert(ConnectorSession session, ConnectorTableHandle tableHandle) {
RaptorTableHandle handle = (RaptorTableHandle) tableHandle;
long tableId = handle.getTableId();
ImmutableList.Builder<RaptorColumnHandle> columnHandles = ImmutableList.builder();
ImmutableList.Builder<Type> columnTypes = ImmutableList.builder();
for (TableColumn column : dao.listTableColumns(tableId)) {
columnHandles.add(new RaptorColumnHandle(connectorId, column.getColumnName(), column.getColumnId(), column.getDataType()));
columnTypes.add(column.getDataType());
}
long transactionId = shardManager.beginTransaction();
setTransactionId(transactionId);
Optional<String> externalBatchId = getExternalBatchId(session);
List<RaptorColumnHandle> sortColumnHandles = getSortColumnHandles(tableId);
List<RaptorColumnHandle> bucketColumnHandles = getBucketColumnHandles(tableId);
Optional<RaptorColumnHandle> temporalColumnHandle = Optional.ofNullable(dao.getTemporalColumnId(tableId)).map(temporalColumnId -> getOnlyElement(columnHandles.build().stream().filter(columnHandle -> columnHandle.getColumnId() == temporalColumnId).collect(toList())));
return new RaptorInsertTableHandle(connectorId, transactionId, tableId, columnHandles.build(), columnTypes.build(), externalBatchId, sortColumnHandles, nCopies(sortColumnHandles.size(), ASC_NULLS_FIRST), handle.getBucketCount(), bucketColumnHandles, temporalColumnHandle);
}
use of jnc.platform.win32.Handle in project presto by prestodb.
the class SchemaDaoUtil method createTablesWithRetry.
public static void createTablesWithRetry(IDBI dbi) {
Duration delay = new Duration(2, TimeUnit.SECONDS);
while (true) {
try (Handle handle = dbi.open()) {
createTables(handle.attach(SchemaDao.class));
alterTables(handle.getConnection(), handle.attach(SchemaDao.class));
return;
} catch (UnableToObtainConnectionException | SQLTransientException e) {
log.warn("Failed to connect to database. Will retry again in %s. Exception: %s", delay, e.getMessage());
sleep(delay);
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
}
use of jnc.platform.win32.Handle in project presto by prestodb.
the class DatabaseShardManager method getExistingShardUuids.
@Override
public Set<UUID> getExistingShardUuids(long tableId, Set<UUID> shardUuids) {
try (Handle handle = dbi.open()) {
String args = Joiner.on(",").join(nCopies(shardUuids.size(), "?"));
String selectShards = format("SELECT shard_uuid FROM %s WHERE shard_uuid IN (%s)", shardIndexTable(tableId), args);
ImmutableSet.Builder<UUID> existingShards = ImmutableSet.builder();
try (PreparedStatement statement = handle.getConnection().prepareStatement(selectShards)) {
bindUuids(statement, shardUuids);
try (ResultSet rs = statement.executeQuery()) {
while (rs.next()) {
existingShards.add(uuidFromBytes(rs.getBytes("shard_uuid")));
}
}
}
return existingShards.build();
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
use of jnc.platform.win32.Handle in project killbill by killbill.
the class EntitySqlDaoTransactionalJdbiWrapper method execute.
/**
* @param <ReturnType> object type to return from the transaction
* @param requestedRO hint as whether to use the read-only connection
* @param entitySqlDaoTransactionWrapper transaction to execute
* @return result from the transaction fo type ReturnType
*/
public <ReturnType> ReturnType execute(final boolean requestedRO, final EntitySqlDaoTransactionWrapper<ReturnType> entitySqlDaoTransactionWrapper) {
final String debugInfo = logger.isDebugEnabled() ? getDebugInfo() : null;
final Handle handle = dbRouter.getHandle(requestedRO);
logger.debug("DBI handle created, transaction: {}", debugInfo);
try {
final EntitySqlDao<EntityModelDao<Entity>, Entity> entitySqlDao = handle.attach(InitialEntitySqlDao.class);
// The transaction isolation level is now set at the pool level: this avoids 3 roundtrips for each transaction
// Note that if the pool isn't used (tests or PostgreSQL), the transaction level will depend on the DB configuration
// return entitySqlDao.inTransaction(TransactionIsolationLevel.READ_COMMITTED, new JdbiTransaction<ReturnType, EntityModelDao<Entity>, Entity>(handle, entitySqlDaoTransactionWrapper));
logger.debug("Starting transaction {}", debugInfo);
final ReturnType returnType = entitySqlDao.inTransaction(new JdbiTransaction<ReturnType, EntityModelDao<Entity>, Entity>(handle, entitySqlDaoTransactionWrapper));
logger.debug("Exiting transaction {}, returning {}", debugInfo, returnType);
return returnType;
} finally {
handle.close();
logger.debug("DBI handle closed, transaction: {}", debugInfo);
}
}
Aggregations