use of org.apache.hadoop.mapred.FileSplit in project presto by prestodb.
the class TestCustomSplitConversionUtils method testHudiRealtimeBootstrapBaseFileSplitConverter.
@Test
public void testHudiRealtimeBootstrapBaseFileSplitConverter() throws IOException {
List<String> deltaLogPaths = Arrays.asList("test1", "test2", "test3");
String maxCommitTime = "max_commit_time";
Path bootstrapSourceFilePath = new Path("/test/source/test.parquet");
long bootstrapSourceSplitStartPos = 0L;
long bootstrapSourceSplitLength = 200L;
FileSplit baseSplit = new FileSplit(FILE_PATH, SPLIT_START_POS, SPLIT_LENGTH, SPLIT_HOSTS);
FileSplit bootstrapSourceSplit = new FileSplit(bootstrapSourceFilePath, bootstrapSourceSplitStartPos, bootstrapSourceSplitLength, new String[0]);
FileSplit hudiSplit = new RealtimeBootstrapBaseFileSplit(baseSplit, BASE_PATH, deltaLogPaths, maxCommitTime, bootstrapSourceSplit);
// Test conversion of HudiSplit -> customSplitInfo
Map<String, String> customSplitInfo = CustomSplitConversionUtils.extractCustomSplitInfo(hudiSplit);
// Test conversion of (customSplitInfo + baseSplit) -> HudiSplit
RealtimeBootstrapBaseFileSplit recreatedSplit = (RealtimeBootstrapBaseFileSplit) CustomSplitConversionUtils.recreateSplitWithCustomInfo(baseSplit, customSplitInfo);
assertEquals(FILE_PATH, recreatedSplit.getPath());
assertEquals(SPLIT_START_POS, recreatedSplit.getStart());
assertEquals(SPLIT_LENGTH, recreatedSplit.getLength());
assertEquals(SPLIT_HOSTS, recreatedSplit.getLocations());
assertEquals(BASE_PATH, recreatedSplit.getBasePath());
assertEquals(deltaLogPaths, recreatedSplit.getDeltaLogPaths());
assertEquals(maxCommitTime, recreatedSplit.getMaxCommitTime());
assertEquals(bootstrapSourceFilePath, recreatedSplit.getBootstrapFileSplit().getPath());
assertEquals(bootstrapSourceSplitStartPos, recreatedSplit.getBootstrapFileSplit().getStart());
assertEquals(bootstrapSourceSplitLength, recreatedSplit.getBootstrapFileSplit().getLength());
}
use of org.apache.hadoop.mapred.FileSplit in project presto by prestodb.
the class TestCustomSplitConversionUtils method testHudiBootstrapBaseFileSplitConverter.
@Test
public void testHudiBootstrapBaseFileSplitConverter() throws IOException {
Path bootstrapSourceFilePath = new Path("/test/source/test.parquet");
long bootstrapSourceSplitStartPos = 0L;
long bootstrapSourceSplitLength = 200L;
FileSplit baseSplit = new FileSplit(FILE_PATH, SPLIT_START_POS, SPLIT_LENGTH, SPLIT_HOSTS);
FileSplit bootstrapSourceSplit = new FileSplit(bootstrapSourceFilePath, bootstrapSourceSplitStartPos, bootstrapSourceSplitLength, new String[0]);
FileSplit hudiSplit = new BootstrapBaseFileSplit(baseSplit, bootstrapSourceSplit);
// Test conversion of HudiSplit -> customSplitInfo
Map<String, String> customSplitInfo = CustomSplitConversionUtils.extractCustomSplitInfo(hudiSplit);
// Test conversion of (customSplitInfo + baseSplit) -> HudiSplit
BootstrapBaseFileSplit recreatedSplit = (BootstrapBaseFileSplit) CustomSplitConversionUtils.recreateSplitWithCustomInfo(baseSplit, customSplitInfo);
assertEquals(FILE_PATH, recreatedSplit.getPath());
assertEquals(SPLIT_START_POS, recreatedSplit.getStart());
assertEquals(SPLIT_LENGTH, recreatedSplit.getLength());
assertEquals(SPLIT_HOSTS, recreatedSplit.getLocations());
assertEquals(bootstrapSourceFilePath, recreatedSplit.getBootstrapFileSplit().getPath());
assertEquals(bootstrapSourceSplitStartPos, recreatedSplit.getBootstrapFileSplit().getStart());
assertEquals(bootstrapSourceSplitLength, recreatedSplit.getBootstrapFileSplit().getLength());
}
use of org.apache.hadoop.mapred.FileSplit in project presto by prestodb.
the class AbstractTestHiveFileFormats method createTestFile.
public static FileSplit createTestFile(String filePath, HiveStorageFormat storageFormat, HiveCompressionCodec compressionCodec, List<TestColumn> testColumns, ConnectorSession session, int numRows, HiveFileWriterFactory fileWriterFactory) {
// filter out partition keys, which are not written to the file
testColumns = ImmutableList.copyOf(filter(testColumns, not(TestColumn::isPartitionKey)));
List<Type> types = testColumns.stream().map(TestColumn::getType).map(HiveType::valueOf).map(type -> type.getType(FUNCTION_AND_TYPE_MANAGER)).collect(toList());
PageBuilder pageBuilder = new PageBuilder(types);
for (int rowNumber = 0; rowNumber < numRows; rowNumber++) {
pageBuilder.declarePosition();
for (int columnNumber = 0; columnNumber < testColumns.size(); columnNumber++) {
serializeObject(types.get(columnNumber), pageBuilder.getBlockBuilder(columnNumber), testColumns.get(columnNumber).getWriteValue(), testColumns.get(columnNumber).getObjectInspector(), false);
}
}
Page page = pageBuilder.build();
JobConf jobConf = configureCompression(new JobConf(), compressionCodec);
Properties tableProperties = new Properties();
tableProperties.setProperty("columns", Joiner.on(',').join(transform(testColumns, TestColumn::getName)));
tableProperties.setProperty("columns.types", Joiner.on(',').join(transform(testColumns, TestColumn::getType)));
Optional<HiveFileWriter> fileWriter = fileWriterFactory.createFileWriter(new Path(filePath), testColumns.stream().map(TestColumn::getName).collect(toList()), StorageFormat.fromHiveStorageFormat(storageFormat), tableProperties, jobConf, session, Optional.empty());
HiveFileWriter hiveFileWriter = fileWriter.orElseThrow(() -> new IllegalArgumentException("fileWriterFactory"));
hiveFileWriter.appendRows(page);
Optional<Page> fileStatistics = hiveFileWriter.commit();
assertFileStatistics(fileStatistics, hiveFileWriter.getFileSizeInBytes(), storageFormat);
return new FileSplit(new Path(filePath), 0, new File(filePath).length(), new String[0]);
}
use of org.apache.hadoop.mapred.FileSplit in project presto by prestodb.
the class HudiBootstrapBaseFileSplitConverter method recreateFileSplitWithCustomInfo.
@Override
public Optional<FileSplit> recreateFileSplitWithCustomInfo(FileSplit split, Map<String, String> customSplitInfo) throws IOException {
requireNonNull(customSplitInfo);
String customFileSplitClass = customSplitInfo.get(CUSTOM_FILE_SPLIT_CLASS_KEY);
if (!isNullOrEmpty(customFileSplitClass) && BootstrapBaseFileSplit.class.getName().equals(customFileSplitClass)) {
FileSplit bootstrapFileSplit = new FileSplit(new Path(customSplitInfo.get(BOOTSTRAP_FILE_SPLIT_PATH_KEY)), parseLong(customSplitInfo.get(BOOTSTRAP_FILE_SPLIT_START_KEY)), parseLong(customSplitInfo.get(BOOTSTRAP_FILE_SPLIT_LEN_KEY)), (String[]) null);
split = new BootstrapBaseFileSplit(split, bootstrapFileSplit);
return Optional.of(split);
}
return Optional.empty();
}
use of org.apache.hadoop.mapred.FileSplit in project presto by prestodb.
the class TestHiveFileFormats method testPageSourceFactory.
private void testPageSourceFactory(HiveBatchPageSourceFactory sourceFactory, FileSplit split, HiveStorageFormat storageFormat, List<TestColumn> testColumns, ConnectorSession session, int rowCount) throws IOException {
List<HivePartitionKey> partitionKeys = testColumns.stream().filter(TestColumn::isPartitionKey).map(TestColumn::toHivePartitionKey).collect(toList());
List<HiveColumnHandle> partitionKeyColumnHandles = getColumnHandles(testColumns.stream().filter(TestColumn::isPartitionKey).collect(toImmutableList()));
List<Column> tableDataColumns = testColumns.stream().filter(column -> !column.isPartitionKey()).map(column -> new Column(column.getName(), HiveType.valueOf(column.getType()), Optional.empty(), Optional.empty())).collect(toImmutableList());
List<HiveColumnHandle> columnHandles = getColumnHandles(testColumns);
Optional<ConnectorPageSource> pageSource = HivePageSourceProvider.createHivePageSource(ImmutableSet.of(), ImmutableSet.of(sourceFactory), new Configuration(), session, split.getPath(), OptionalInt.empty(), split.getStart(), split.getLength(), split.getLength(), Instant.now().toEpochMilli(), new Storage(StorageFormat.create(storageFormat.getSerDe(), storageFormat.getInputFormat(), storageFormat.getOutputFormat()), "location", Optional.empty(), false, ImmutableMap.of(), ImmutableMap.of()), TupleDomain.all(), columnHandles, ImmutableMap.of(), partitionKeys, DateTimeZone.getDefault(), FUNCTION_AND_TYPE_MANAGER, new SchemaTableName("schema", "table"), partitionKeyColumnHandles, tableDataColumns, ImmutableMap.of(), tableDataColumns.size(), TableToPartitionMapping.empty(), Optional.empty(), false, DEFAULT_HIVE_FILE_CONTEXT, TRUE_CONSTANT, false, ROW_EXPRESSION_SERVICE, Optional.empty(), ImmutableMap.of());
assertTrue(pageSource.isPresent());
checkPageSource(pageSource.get(), testColumns, getTypes(columnHandles), rowCount);
}
Aggregations