use of io.prestosql.spi.PageIndexer in project hetu-core by openlookeng.
the class TestHivePageSink method prepareHivePageSink.
// Used to test snapshot. Input pages has 1 row and 1 column. Partition is based on this column.
private HivePageSink prepareHivePageSink() throws IOException {
// Mock all relevant dependencies
HiveWriterFactory writerFactory = mock(HiveWriterFactory.class);
HiveColumnHandle hiveColumnHandle = mock(HiveColumnHandle.class);
HdfsEnvironment hdfsEnvironment = mock(HdfsEnvironment.class);
PageIndexerFactory pageIndexerFactory = mock(PageIndexerFactory.class);
PageIndexer pageIndexer = mock(PageIndexer.class);
JsonCodec jsonCodec = mock(JsonCodec.class);
ConnectorSession connectorSession = mock(ConnectorSession.class);
// Mocked necessary but uninteresting methods
when(connectorSession.isSnapshotEnabled()).thenReturn(true);
when(connectorSession.getTaskId()).thenReturn(OptionalInt.of(1));
when(pageIndexerFactory.createPageIndexer(anyObject())).thenReturn(pageIndexer);
when(jsonCodec.toJsonBytes(anyObject())).thenReturn(new byte[0]);
when(writerFactory.isTxnTable()).thenReturn(false);
HiveWriter hiveWriter = mock(HiveWriter.class);
when(hiveWriter.getVerificationTask()).thenReturn(Optional.empty());
when(writerFactory.createWriter(anyObject(), anyObject(), anyObject())).thenReturn(hiveWriter);
when(writerFactory.createWriterForSnapshotMerge(anyObject(), anyObject(), anyObject())).thenReturn(hiveWriter);
when(writerFactory.getPartitionName(anyObject(), anyInt())).thenReturn(Optional.empty());
when(hiveColumnHandle.isPartitionKey()).thenReturn(true);
// When hdfsEnvironment.doAs() is called, simply invoke the passed in action
when(hdfsEnvironment.doAs(anyObject(), (GenericExceptionAction) anyObject())).thenAnswer(invocation -> ((GenericExceptionAction) invocation.getArguments()[1]).run());
doAnswer(invocation -> {
((Runnable) invocation.getArguments()[1]).run();
return null;
}).when(hdfsEnvironment).doAs(anyObject(), (Runnable) anyObject());
// The only entry in the page is a integer. We use it to determine partition index.
// That is, page1 with value 0 is in partition 0; page2 with value 1 is in partition 1.
// Some functions' return values depend on the number of partitions.
// Store that as an array entry below, so that other mocked methods can use it.
int[] maxIndex = new int[1];
when(pageIndexer.indexPage(anyObject())).thenAnswer(invocation -> {
maxIndex[0] = (int) ((Page) invocation.getArguments()[0]).getBlock(0).get(0);
return new int[] { maxIndex[0] };
});
when(pageIndexer.getMaxIndex()).thenAnswer(invocation -> maxIndex[0]);
doAnswer(invocation -> {
assertEquals(((List) invocation.getArguments()[0]).size(), maxIndex[0] + 1);
return null;
}).when(writerFactory).mergeSubFiles(anyObject());
return new HivePageSink(writerFactory, Collections.singletonList(hiveColumnHandle), Optional.empty(), pageIndexerFactory, mock(TypeManager.class), hdfsEnvironment, 10, mock(ListeningExecutorService.class), jsonCodec, connectorSession, HiveACIDWriteType.INSERT, mock(HiveWritableTableHandle.class));
}
use of io.prestosql.spi.PageIndexer in project boostkit-bigdata by kunpengcompute.
the class TestHivePageSink method prepareHivePageSink.
// Used to test snapshot. Input pages has 1 row and 1 column. Partition is based on this column.
private HivePageSink prepareHivePageSink() throws IOException {
// Mock all relevant dependencies
HiveWriterFactory writerFactory = mock(HiveWriterFactory.class);
HiveColumnHandle hiveColumnHandle = mock(HiveColumnHandle.class);
HdfsEnvironment hdfsEnvironment = mock(HdfsEnvironment.class);
PageIndexerFactory pageIndexerFactory = mock(PageIndexerFactory.class);
PageIndexer pageIndexer = mock(PageIndexer.class);
JsonCodec jsonCodec = mock(JsonCodec.class);
ConnectorSession connectorSession = mock(ConnectorSession.class);
// Mocked necessary but uninteresting methods
when(connectorSession.isSnapshotEnabled()).thenReturn(true);
when(connectorSession.getTaskId()).thenReturn(OptionalInt.of(1));
when(pageIndexerFactory.createPageIndexer(anyObject())).thenReturn(pageIndexer);
when(jsonCodec.toJsonBytes(anyObject())).thenReturn(new byte[0]);
when(writerFactory.isTxnTable()).thenReturn(false);
HiveWriter hiveWriter = mock(HiveWriter.class);
when(hiveWriter.getVerificationTask()).thenReturn(Optional.empty());
when(writerFactory.createWriter(anyObject(), anyObject(), anyObject())).thenReturn(hiveWriter);
when(writerFactory.createWriterForSnapshotMerge(anyObject(), anyObject(), anyObject())).thenReturn(hiveWriter);
when(writerFactory.getPartitionName(anyObject(), anyInt())).thenReturn(Optional.empty());
when(hiveColumnHandle.isPartitionKey()).thenReturn(true);
// When hdfsEnvironment.doAs() is called, simply invoke the passed in action
when(hdfsEnvironment.doAs(anyObject(), (GenericExceptionAction) anyObject())).thenAnswer(invocation -> ((GenericExceptionAction) invocation.getArguments()[1]).run());
doAnswer(invocation -> {
((Runnable) invocation.getArguments()[1]).run();
return null;
}).when(hdfsEnvironment).doAs(anyObject(), (Runnable) anyObject());
// The only entry in the page is a integer. We use it to determine partition index.
// That is, page1 with value 0 is in partition 0; page2 with value 1 is in partition 1.
// Some functions' return values depend on the number of partitions.
// Store that as an array entry below, so that other mocked methods can use it.
int[] maxIndex = new int[1];
when(pageIndexer.indexPage(anyObject())).thenAnswer(invocation -> {
maxIndex[0] = (int) ((Page) invocation.getArguments()[0]).getBlock(0).get(0);
return new int[] { maxIndex[0] };
});
when(pageIndexer.getMaxIndex()).thenAnswer(invocation -> maxIndex[0]);
doAnswer(invocation -> {
assertEquals(((List) invocation.getArguments()[0]).size(), maxIndex[0] + 1);
return null;
}).when(writerFactory).mergeSubFiles(anyObject());
return new HivePageSink(writerFactory, Collections.singletonList(hiveColumnHandle), Optional.empty(), pageIndexerFactory, mock(TypeManager.class), hdfsEnvironment, 10, mock(ListeningExecutorService.class), jsonCodec, connectorSession, HiveACIDWriteType.INSERT, mock(HiveWritableTableHandle.class));
}
Aggregations