Search in sources :

Example 11 with TempDir

use of org.junit.jupiter.api.io.TempDir in project sonarlint-core by SonarSource.

the class ConnectedStorageProblemsMediumTests method test_no_storage.

@Test
void test_no_storage(@TempDir Path slHome, @TempDir Path baseDir) {
    var config = ConnectedGlobalConfiguration.builder().setConnectionId("localhost").setSonarLintUserHome(slHome).setLogOutput((msg, level) -> {
    }).build();
    sonarlint = new ConnectedSonarLintEngineImpl(config);
    assertThat(sonarlint.getGlobalStorageStatus()).isNull();
    assertThat(sonarlint.getProjectStorageStatus("foo")).isNull();
    assertThat(sonarlint.allProjectsByKey()).isEmpty();
    var serverBranches = sonarlint.getServerBranches("foo");
    assertThat(serverBranches.getBranchNames()).isEmpty();
    assertThat(serverBranches.getMainBranchName()).isEmpty();
    var thrown = assertThrows(IllegalStateException.class, () -> sonarlint.getActiveRuleDetails(null, null, "rule", null));
    assertThat(thrown).hasMessage("Unable to find rule details for 'rule'");
    var analysisConfig = ConnectedAnalysisConfiguration.builder().setBaseDir(baseDir).build();
    var thrown2 = assertThrows(StorageException.class, () -> sonarlint.analyze(analysisConfig, i -> {
    }, null, null));
    assertThat(thrown2).hasMessage("Missing storage for connection");
}
Also used : Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) StorageException(org.sonarsource.sonarlint.core.client.api.exceptions.StorageException) ClientInputFile(org.sonarsource.sonarlint.core.analysis.api.ClientInputFile) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) ConnectedSonarLintEngineImpl(org.sonarsource.sonarlint.core.ConnectedSonarLintEngineImpl) Language(org.sonarsource.sonarlint.core.commons.Language) IOException(java.io.IOException) FileUtils(org.apache.commons.io.FileUtils) ConnectedAnalysisConfiguration(org.sonarsource.sonarlint.core.client.api.connected.ConnectedAnalysisConfiguration) TestUtils(testutils.TestUtils) ConnectedGlobalConfiguration(org.sonarsource.sonarlint.core.client.api.connected.ConnectedGlobalConfiguration) File(java.io.File) StandardCharsets(java.nio.charset.StandardCharsets) StorageFixture.newStorage(org.sonarsource.sonarlint.core.mediumtest.fixtures.StorageFixture.newStorage) ArrayList(java.util.ArrayList) Test(org.junit.jupiter.api.Test) List(java.util.List) AfterEach(org.junit.jupiter.api.AfterEach) TempDir(org.junit.jupiter.api.io.TempDir) Issue(org.sonarsource.sonarlint.core.client.api.common.analysis.Issue) Path(java.nio.file.Path) ConnectedSonarLintEngine(org.sonarsource.sonarlint.core.client.api.connected.ConnectedSonarLintEngine) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ConnectedSonarLintEngineImpl(org.sonarsource.sonarlint.core.ConnectedSonarLintEngineImpl) Test(org.junit.jupiter.api.Test)

Example 12 with TempDir

use of org.junit.jupiter.api.io.TempDir in project gocd by gocd.

the class StandbyFileSyncServiceTest method setUp.

@BeforeEach
void setUp(@TempDir File tempFolder) throws Exception {
    when(authToken.forHttp()).thenReturn("foo:bar");
    when(systemEnvironment.getWebappContextPath()).thenReturn("/go");
    when(systemEnvironment.getCruiseConfigFile()).thenReturn(new File(tempFolder, "cruise-config.xml").getAbsolutePath());
    when(systemEnvironment.getDESCipherFile()).thenReturn(new File(tempFolder, "cipher"));
    when(systemEnvironment.getAESCipherFile()).thenReturn(new File(tempFolder, "cipher.aes"));
    when(systemEnvironment.getJettyConfigFile()).thenReturn(new File(tempFolder, "jetty.xml"));
    File externalPluginsDirectory = new File(tempFolder, "external");
    FileUtils.forceMkdir(externalPluginsDirectory);
    when(systemEnvironment.getExternalPluginAbsolutePath()).thenReturn(externalPluginsDirectory.getAbsolutePath());
    Answer answerWithFile = invocationOnMock -> {
        Object[] arguments = invocationOnMock.getArguments();
        ConfigFileType type = (ConfigFileType) arguments[0];
        File file = (File) arguments[1];
        FileUtils.writeStringToFile(file, type + " contents", UTF_8);
        return null;
    };
    Answer answerWithPlugin = invocationOnMock -> {
        Object[] arguments = invocationOnMock.getArguments();
        String pluginName = (String) arguments[1];
        File file = (File) arguments[2];
        FileUtils.writeStringToFile(file, pluginName + " contents", UTF_8);
        return null;
    };
    lenient().doAnswer(answerWithFile).when(primaryServerCommunicationService).downloadConfigFile(eq(ConfigFileType.CRUISE_CONFIG_XML), any(File.class));
    lenient().doAnswer(answerWithFile).when(primaryServerCommunicationService).downloadConfigFile(eq(ConfigFileType.AES_CIPHER), any(File.class));
    lenient().doAnswer(answerWithFile).when(primaryServerCommunicationService).downloadConfigFile(eq(ConfigFileType.JETTY_XML), any(File.class));
    lenient().doAnswer(answerWithPlugin).when(primaryServerCommunicationService).downloadPlugin(eq("external"), eq("external-1.jar"), any(File.class));
    lenient().doAnswer(answerWithPlugin).when(primaryServerCommunicationService).downloadPlugin(eq("external"), eq("external-2.jar"), any(File.class));
    when(addOnConfiguration.isServerInStandby()).thenReturn(true);
    lenient().when(primaryServerCommunicationService.ableToConnect()).thenReturn(true);
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) BeforeEach(org.junit.jupiter.api.BeforeEach) MockitoExtension(org.mockito.junit.jupiter.MockitoExtension) SystemStubsExtension(uk.org.webcompere.systemstubs.jupiter.SystemStubsExtension) java.util(java.util) ServerStatusResponse(com.thoughtworks.go.addon.businesscontinuity.primary.ServerStatusResponse) Mock(org.mockito.Mock) SystemProperties(uk.org.webcompere.systemstubs.properties.SystemProperties) UTF_8(java.nio.charset.StandardCharsets.UTF_8) FileUtils(org.apache.commons.io.FileUtils) File(java.io.File) Test(org.junit.jupiter.api.Test) Answer(org.mockito.stubbing.Answer) Mockito(org.mockito.Mockito) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) com.thoughtworks.go.addon.businesscontinuity(com.thoughtworks.go.addon.businesscontinuity) TempDir(org.junit.jupiter.api.io.TempDir) SystemEnvironment(com.thoughtworks.go.util.SystemEnvironment) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) SystemStub(uk.org.webcompere.systemstubs.jupiter.SystemStub) Answer(org.mockito.stubbing.Answer) File(java.io.File) BeforeEach(org.junit.jupiter.api.BeforeEach)

Example 13 with TempDir

use of org.junit.jupiter.api.io.TempDir in project Gaffer by gchq.

the class WriteUnsortedDataTest method testMultipleSplitPointsCase.

@Test
public void testMultipleSplitPointsCase(@TempDir java.nio.file.Path tempDir) throws IOException, OperationException {
    // Given
    final String tempFilesDir = tempDir.toAbsolutePath().toString();
    final SchemaUtils schemaUtils = new SchemaUtils(TestUtils.gafferSchema("schemaUsingLongVertexType"));
    final GraphPartitioner graphPartitioner = new GraphPartitioner();
    final List<Element> elements = new ArrayList<>();
    // TestGroups.ENTITY, split points are 10L and 100L. Create data with
    // VERTEX
    // 5L
    // 10L
    // 10L
    // 11L
    // 12L
    // 100L
    // 100L
    // 200L
    final List<PartitionKey> splitPointsEntity = new ArrayList<>();
    splitPointsEntity.add(new PartitionKey(new Object[] { 10L }));
    splitPointsEntity.add(new PartitionKey(new Object[] { 100L }));
    graphPartitioner.addGroupPartitioner(TestGroups.ENTITY, new GroupPartitioner(TestGroups.ENTITY, splitPointsEntity));
    elements.add(createEntityForEntityGroup(5L));
    elements.add(createEntityForEntityGroup(10L));
    elements.add(createEntityForEntityGroup(10L));
    elements.add(createEntityForEntityGroup(11L));
    elements.add(createEntityForEntityGroup(12L));
    elements.add(createEntityForEntityGroup(100L));
    elements.add(createEntityForEntityGroup(100L));
    elements.add(createEntityForEntityGroup(200L));
    // TestGroups.ENTITY_2, split points are 100L and 1000L. Create data with
    // VERTEX
    // 5L
    // 100L
    // 200L
    // 1000L
    // 5000L
    final List<PartitionKey> splitPointsEntity_2 = new ArrayList<>();
    splitPointsEntity_2.add(new PartitionKey(new Object[] { 100L }));
    splitPointsEntity_2.add(new PartitionKey(new Object[] { 1000L }));
    graphPartitioner.addGroupPartitioner(TestGroups.ENTITY_2, new GroupPartitioner(TestGroups.ENTITY_2, splitPointsEntity_2));
    elements.add(createEntityForEntityGroup_2(5L));
    elements.add(createEntityForEntityGroup_2(100L));
    elements.add(createEntityForEntityGroup_2(200L));
    elements.add(createEntityForEntityGroup_2(1000L));
    elements.add(createEntityForEntityGroup_2(5000L));
    // TestGroups.EDGE, split points are [1000L, 200L, true] and [1000L, 30000L, false]. Create data with
    // SOURCE   DESTINATION    DIRECTED
    // 5L        5000L         true
    // 5L         200L         false
    // 1000L         100L         true
    // 1000L       10000L         false
    // 1000L       30000L         false
    // 1000L      300000L         true
    // 10000L         400L         false
    final List<PartitionKey> splitPointsEdge = new ArrayList<>();
    splitPointsEdge.add(new PartitionKey(new Object[] { 1000L, 200L, true }));
    splitPointsEdge.add(new PartitionKey(new Object[] { 1000L, 30000L, false }));
    graphPartitioner.addGroupPartitioner(TestGroups.EDGE, new GroupPartitioner(TestGroups.EDGE, splitPointsEdge));
    final List<PartitionKey> splitPointsReversedEdge = new ArrayList<>();
    splitPointsReversedEdge.add(new PartitionKey(new Object[] { 100L, 1000L, true }));
    splitPointsReversedEdge.add(new PartitionKey(new Object[] { 300L, 2000L, false }));
    graphPartitioner.addGroupPartitionerForReversedEdges(TestGroups.EDGE, new GroupPartitioner(TestGroups.EDGE, splitPointsReversedEdge));
    elements.add(createEdgeForEdgeGroup(5L, 5000L, true));
    elements.add(createEdgeForEdgeGroup(5L, 200L, false));
    elements.add(createEdgeForEdgeGroup(1000L, 90L, true));
    elements.add(createEdgeForEdgeGroup(1000L, 10000L, false));
    elements.add(createEdgeForEdgeGroup(1000L, 30000L, false));
    elements.add(createEdgeForEdgeGroup(1000L, 300000L, true));
    elements.add(createEdgeForEdgeGroup(10000L, 400L, false));
    // TestGroups.EDGE_2, split points are [10L, 2000L, true] and [100L, 1000L, false]. Create data with
    // SOURCE   DESTINATION    DIRECTED
    // 5L         5000L        true
    // 10L         2000L        false
    // 10L         2000L        true
    // 10L         3000L        false
    // 100L         1000L        false
    // 100L         3000L        false
    // 100L         3000L        true
    final List<PartitionKey> splitPointsEdge_2 = new ArrayList<>();
    splitPointsEdge_2.add(new PartitionKey(new Object[] { 10L, 2000L, true }));
    splitPointsEdge_2.add(new PartitionKey(new Object[] { 100L, 1000L, false }));
    graphPartitioner.addGroupPartitioner(TestGroups.EDGE_2, new GroupPartitioner(TestGroups.EDGE_2, splitPointsEdge_2));
    final List<PartitionKey> splitPointsReversedEdge_2 = new ArrayList<>();
    splitPointsReversedEdge_2.add(new PartitionKey(new Object[] { 1000L, 1500L, true }));
    splitPointsReversedEdge_2.add(new PartitionKey(new Object[] { 2000L, 2500L, false }));
    graphPartitioner.addGroupPartitionerForReversedEdges(TestGroups.EDGE_2, new GroupPartitioner(TestGroups.EDGE_2, splitPointsReversedEdge_2));
    elements.add(createEdgeForEdgeGroup_2(5L, 5000L, true));
    elements.add(createEdgeForEdgeGroup_2(10L, 2000L, false));
    elements.add(createEdgeForEdgeGroup_2(10L, 2000L, true));
    elements.add(createEdgeForEdgeGroup_2(10L, 3000L, false));
    elements.add(createEdgeForEdgeGroup_2(100L, 1000L, false));
    elements.add(createEdgeForEdgeGroup_2(100L, 3000L, false));
    elements.add(createEdgeForEdgeGroup_2(100L, 3000L, true));
    final BiFunction<String, Integer, String> fileNameForGroupAndPartitionId = (group, partitionId) -> tempFilesDir + "/GROUP=" + group + "/split-" + partitionId;
    final BiFunction<String, Integer, String> fileNameForGroupAndPartitionIdForReversedEdge = (group, partitionId) -> tempFilesDir + "/REVERSED-GROUP=" + group + "/split-" + partitionId;
    final WriteUnsortedData writeUnsortedData = new WriteUnsortedData(tempFilesDir, CompressionCodecName.GZIP, schemaUtils, graphPartitioner, fileNameForGroupAndPartitionId, fileNameForGroupAndPartitionIdForReversedEdge);
    // When
    writeUnsortedData.writeElements(elements);
    // Then
    // - For each group, directories split0, split1 and split2 should exist and each contain one file
    testExistsAndContainsNFiles(tempFilesDir + "/GROUP=" + TestGroups.ENTITY + "/split-0", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/GROUP=" + TestGroups.ENTITY + "/split-1", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/GROUP=" + TestGroups.ENTITY + "/split-2", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/GROUP=" + TestGroups.ENTITY_2 + "/split-0", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/GROUP=" + TestGroups.ENTITY_2 + "/split-1", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/GROUP=" + TestGroups.ENTITY_2 + "/split-2", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/GROUP=" + TestGroups.EDGE + "/split-0", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/GROUP=" + TestGroups.EDGE + "/split-1", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/GROUP=" + TestGroups.EDGE + "/split-2", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/GROUP=" + TestGroups.EDGE_2 + "/split-0", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/GROUP=" + TestGroups.EDGE_2 + "/split-1", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/GROUP=" + TestGroups.EDGE_2 + "/split-2", 1);
    // - Each split file should contain the data for that split in the order it was written
    for (final String group : new HashSet<>(Arrays.asList(TestGroups.ENTITY, TestGroups.ENTITY_2))) {
        testSplitFileContainsCorrectData(tempFilesDir + "/GROUP=" + group + "/split-0", group, true, false, null, graphPartitioner.getGroupPartitioner(group).getIthPartitionKey(0), elements, schemaUtils);
        testSplitFileContainsCorrectData(tempFilesDir + "/GROUP=" + group + "/split-1", group, true, false, graphPartitioner.getGroupPartitioner(group).getIthPartitionKey(0), graphPartitioner.getGroupPartitioner(group).getIthPartitionKey(1), elements, schemaUtils);
        testSplitFileContainsCorrectData(tempFilesDir + "/GROUP=" + group + "/split-2", group, true, false, graphPartitioner.getGroupPartitioner(group).getIthPartitionKey(1), null, elements, schemaUtils);
    }
    for (final String group : new HashSet<>(Arrays.asList(TestGroups.EDGE, TestGroups.EDGE_2))) {
        testSplitFileContainsCorrectData(tempFilesDir + "/GROUP=" + group + "/split-0", group, false, false, null, graphPartitioner.getGroupPartitioner(group).getIthPartitionKey(0), elements, schemaUtils);
        testSplitFileContainsCorrectData(tempFilesDir + "/REVERSED-GROUP=" + group + "/split-0", group, false, true, null, graphPartitioner.getGroupPartitionerForReversedEdges(group).getIthPartitionKey(0), elements, schemaUtils);
        testSplitFileContainsCorrectData(tempFilesDir + "/GROUP=" + group + "/split-1", group, false, false, graphPartitioner.getGroupPartitioner(group).getIthPartitionKey(0), graphPartitioner.getGroupPartitioner(group).getIthPartitionKey(1), elements, schemaUtils);
        testSplitFileContainsCorrectData(tempFilesDir + "/REVERSED-GROUP=" + group + "/split-1", group, false, true, graphPartitioner.getGroupPartitionerForReversedEdges(group).getIthPartitionKey(0), graphPartitioner.getGroupPartitionerForReversedEdges(group).getIthPartitionKey(1), elements, schemaUtils);
        testSplitFileContainsCorrectData(tempFilesDir + "/GROUP=" + group + "/split-2", group, false, false, graphPartitioner.getGroupPartitioner(group).getIthPartitionKey(1), null, elements, schemaUtils);
        testSplitFileContainsCorrectData(tempFilesDir + "/REVERSED-GROUP=" + group + "/split-2", group, false, true, graphPartitioner.getGroupPartitionerForReversedEdges(group).getIthPartitionKey(1), null, elements, schemaUtils);
    }
}
Also used : GroupPartitioner(uk.gov.gchq.gaffer.parquetstore.partitioner.GroupPartitioner) Assertions.fail(org.junit.jupiter.api.Assertions.fail) Arrays(java.util.Arrays) FileSystem(org.apache.hadoop.fs.FileSystem) Date(java.util.Date) BiFunction(java.util.function.BiFunction) SerialisationException(uk.gov.gchq.gaffer.exception.SerialisationException) SimpleDateFormat(java.text.SimpleDateFormat) GroupPartitioner(uk.gov.gchq.gaffer.parquetstore.partitioner.GroupPartitioner) FileStatus(org.apache.hadoop.fs.FileStatus) Element(uk.gov.gchq.gaffer.data.element.Element) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) PartitionKey(uk.gov.gchq.gaffer.parquetstore.partitioner.PartitionKey) FreqMap(uk.gov.gchq.gaffer.types.FreqMap) BeforeAll(org.junit.jupiter.api.BeforeAll) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) Path(org.apache.hadoop.fs.Path) Edge(uk.gov.gchq.gaffer.data.element.Edge) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) ParseException(java.text.ParseException) TestGroups(uk.gov.gchq.gaffer.commonutil.TestGroups) ParquetElementReader(uk.gov.gchq.gaffer.parquetstore.io.reader.ParquetElementReader) TimeZone(java.util.TimeZone) ParquetReader(org.apache.parquet.hadoop.ParquetReader) IOException(java.io.IOException) Entity(uk.gov.gchq.gaffer.data.element.Entity) Collectors(java.util.stream.Collectors) Test(org.junit.jupiter.api.Test) DataGen(uk.gov.gchq.gaffer.parquetstore.testutils.DataGen) List(java.util.List) WriteUnsortedData(uk.gov.gchq.gaffer.parquetstore.operation.handler.utilities.WriteUnsortedData) TempDir(org.junit.jupiter.api.io.TempDir) TestUtils(uk.gov.gchq.gaffer.parquetstore.testutils.TestUtils) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) GraphPartitioner(uk.gov.gchq.gaffer.parquetstore.partitioner.GraphPartitioner) CompressionCodecName(org.apache.parquet.hadoop.metadata.CompressionCodecName) OperationException(uk.gov.gchq.gaffer.operation.OperationException) WriteUnsortedData(uk.gov.gchq.gaffer.parquetstore.operation.handler.utilities.WriteUnsortedData) Element(uk.gov.gchq.gaffer.data.element.Element) ArrayList(java.util.ArrayList) GraphPartitioner(uk.gov.gchq.gaffer.parquetstore.partitioner.GraphPartitioner) PartitionKey(uk.gov.gchq.gaffer.parquetstore.partitioner.PartitionKey) HashSet(java.util.HashSet) Test(org.junit.jupiter.api.Test)

Example 14 with TempDir

use of org.junit.jupiter.api.io.TempDir in project Gaffer by gchq.

the class WriteUnsortedDataTest method testOneSplitPointCase.

@Test
public void testOneSplitPointCase(@TempDir java.nio.file.Path tempDir) throws IOException, OperationException {
    // Given
    final String tempFilesDir = tempDir.toAbsolutePath().toString();
    final SchemaUtils schemaUtils = new SchemaUtils(TestUtils.gafferSchema("schemaUsingLongVertexType"));
    final GraphPartitioner graphPartitioner = new GraphPartitioner();
    final List<Element> elements = new ArrayList<>();
    // TestGroups.ENTITY, split point is 10L. Create data with
    // VERTEX
    // 5L
    // 10L
    // 10L
    // 10L
    // 20L
    final List<PartitionKey> splitPointsEntity = new ArrayList<>();
    splitPointsEntity.add(new PartitionKey(new Object[] { 10L }));
    graphPartitioner.addGroupPartitioner(TestGroups.ENTITY, new GroupPartitioner(TestGroups.ENTITY, splitPointsEntity));
    elements.add(createEntityForEntityGroup(5L));
    elements.add(createEntityForEntityGroup(10L));
    elements.add(createEntityForEntityGroup(10L));
    elements.add(createEntityForEntityGroup(10L));
    elements.add(createEntityForEntityGroup(20L));
    // TestGroups.ENTITY_2, split point is 100L. Create data with
    // VERTEX
    // 5L
    // 100L
    // 1000L
    final List<PartitionKey> splitPointsEntity_2 = new ArrayList<>();
    splitPointsEntity_2.add(new PartitionKey(new Object[] { 100L }));
    graphPartitioner.addGroupPartitioner(TestGroups.ENTITY_2, new GroupPartitioner(TestGroups.ENTITY_2, splitPointsEntity_2));
    elements.add(createEntityForEntityGroup_2(5L));
    elements.add(createEntityForEntityGroup_2(100L));
    elements.add(createEntityForEntityGroup_2(1000L));
    // TestGroups.EDGE, split point is [1000L, 200L, true]. Create data with
    // SOURCE   DESTINATION    DIRECTED
    // 5L         5000L        true
    // 5L         200L         false
    // 1000L         100L         true
    // 1000L         200L         false
    // 1000L         200L         true
    // 1000L         300L         true
    // 10000L         400L         false
    // 10000L         400L         true
    final List<PartitionKey> splitPointsEdge = new ArrayList<>();
    splitPointsEdge.add(new PartitionKey(new Object[] { 1000L, 200L, true }));
    graphPartitioner.addGroupPartitioner(TestGroups.EDGE, new GroupPartitioner(TestGroups.EDGE, splitPointsEdge));
    final List<PartitionKey> splitPointsReversedEdge = new ArrayList<>();
    splitPointsReversedEdge.add(new PartitionKey(new Object[] { 1000L, 300L, true }));
    graphPartitioner.addGroupPartitionerForReversedEdges(TestGroups.EDGE, new GroupPartitioner(TestGroups.EDGE, splitPointsReversedEdge));
    elements.add(createEdgeForEdgeGroup(5L, 5000L, true));
    elements.add(createEdgeForEdgeGroup(5L, 200L, false));
    elements.add(createEdgeForEdgeGroup(1000L, 100L, true));
    elements.add(createEdgeForEdgeGroup(1000L, 200L, false));
    elements.add(createEdgeForEdgeGroup(1000L, 200L, true));
    elements.add(createEdgeForEdgeGroup(1000L, 300L, true));
    elements.add(createEdgeForEdgeGroup(10000L, 400L, false));
    elements.add(createEdgeForEdgeGroup(10000L, 400L, true));
    // TestGroups.EDGE_2, split point is [10L, 2000L, true]. Create data with
    // SOURCE   DESTINATION    DIRECTED
    // 5L         5000L        true
    // 10L         2000L        false
    // 10L         2000L        true
    // 10L         3000L        false
    // 100L         1000L        true
    // 100L         3000L        false
    // 100L         3000L        true
    final List<PartitionKey> splitPointsEdge_2 = new ArrayList<>();
    splitPointsEdge_2.add(new PartitionKey(new Object[] { 10L, 2000L, true }));
    graphPartitioner.addGroupPartitioner(TestGroups.EDGE_2, new GroupPartitioner(TestGroups.EDGE_2, splitPointsEdge_2));
    final List<PartitionKey> splitPointsReversedEdge_2 = new ArrayList<>();
    splitPointsReversedEdge_2.add(new PartitionKey(new Object[] { 3000L, 20L, true }));
    graphPartitioner.addGroupPartitionerForReversedEdges(TestGroups.EDGE_2, new GroupPartitioner(TestGroups.EDGE_2, splitPointsReversedEdge_2));
    elements.add(createEdgeForEdgeGroup_2(5L, 5000L, true));
    elements.add(createEdgeForEdgeGroup_2(5L, 200L, false));
    elements.add(createEdgeForEdgeGroup_2(1000L, 100L, true));
    elements.add(createEdgeForEdgeGroup_2(1000L, 200L, false));
    elements.add(createEdgeForEdgeGroup_2(1000L, 200L, true));
    elements.add(createEdgeForEdgeGroup_2(1000L, 300L, true));
    elements.add(createEdgeForEdgeGroup_2(10000L, 400L, false));
    elements.add(createEdgeForEdgeGroup_2(10000L, 400L, true));
    final BiFunction<String, Integer, String> fileNameForGroupAndPartitionId = (group, partitionId) -> tempFilesDir + "/GROUP=" + group + "/split-" + partitionId;
    final BiFunction<String, Integer, String> fileNameForGroupAndPartitionIdForReversedEdge = (group, partitionId) -> tempFilesDir + "/REVERSED-GROUP=" + group + "/split-" + partitionId;
    final WriteUnsortedData writeUnsortedData = new WriteUnsortedData(tempFilesDir, CompressionCodecName.GZIP, schemaUtils, graphPartitioner, fileNameForGroupAndPartitionId, fileNameForGroupAndPartitionIdForReversedEdge);
    // When
    writeUnsortedData.writeElements(elements);
    // Then
    // - For each group, directories split0 and split1 should exist and each contain one file
    testExistsAndContainsNFiles(tempFilesDir + "/GROUP=" + TestGroups.ENTITY + "/split-0", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/GROUP=" + TestGroups.ENTITY + "/split-1", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/GROUP=" + TestGroups.ENTITY_2 + "/split-0", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/GROUP=" + TestGroups.ENTITY_2 + "/split-1", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/GROUP=" + TestGroups.EDGE + "/split-0", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/GROUP=" + TestGroups.EDGE + "/split-1", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/GROUP=" + TestGroups.EDGE_2 + "/split-0", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/GROUP=" + TestGroups.EDGE_2 + "/split-1", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/REVERSED-GROUP=" + TestGroups.EDGE + "/split-0", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/REVERSED-GROUP=" + TestGroups.EDGE + "/split-1", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/REVERSED-GROUP=" + TestGroups.EDGE_2 + "/split-0", 1);
    testExistsAndContainsNFiles(tempFilesDir + "/REVERSED-GROUP=" + TestGroups.EDGE_2 + "/split-1", 1);
    // - Each split file should contain the data for that split in the order it was written
    for (final String group : new HashSet<>(Arrays.asList(TestGroups.ENTITY, TestGroups.ENTITY_2))) {
        testSplitFileContainsCorrectData(tempFilesDir + "/GROUP=" + group + "/split-0", group, true, false, null, graphPartitioner.getGroupPartitioner(group).getIthPartitionKey(0), elements, schemaUtils);
        testSplitFileContainsCorrectData(tempFilesDir + "/GROUP=" + group + "/split-1", group, true, false, graphPartitioner.getGroupPartitioner(group).getIthPartitionKey(0), null, elements, schemaUtils);
    }
    for (final String group : new HashSet<>(Arrays.asList(TestGroups.EDGE, TestGroups.EDGE_2))) {
        testSplitFileContainsCorrectData(tempFilesDir + "/GROUP=" + group + "/split-0", group, false, false, null, graphPartitioner.getGroupPartitioner(group).getIthPartitionKey(0), elements, schemaUtils);
        testSplitFileContainsCorrectData(tempFilesDir + "/REVERSED-GROUP=" + group + "/split-0", group, false, true, null, graphPartitioner.getGroupPartitionerForReversedEdges(group).getIthPartitionKey(0), elements, schemaUtils);
        testSplitFileContainsCorrectData(tempFilesDir + "/GROUP=" + group + "/split-1", group, false, false, graphPartitioner.getGroupPartitioner(group).getIthPartitionKey(0), null, elements, schemaUtils);
        testSplitFileContainsCorrectData(tempFilesDir + "/REVERSED-GROUP=" + group + "/split-1", group, false, true, graphPartitioner.getGroupPartitionerForReversedEdges(group).getIthPartitionKey(0), null, elements, schemaUtils);
    }
}
Also used : GroupPartitioner(uk.gov.gchq.gaffer.parquetstore.partitioner.GroupPartitioner) Assertions.fail(org.junit.jupiter.api.Assertions.fail) Arrays(java.util.Arrays) FileSystem(org.apache.hadoop.fs.FileSystem) Date(java.util.Date) BiFunction(java.util.function.BiFunction) SerialisationException(uk.gov.gchq.gaffer.exception.SerialisationException) SimpleDateFormat(java.text.SimpleDateFormat) GroupPartitioner(uk.gov.gchq.gaffer.parquetstore.partitioner.GroupPartitioner) FileStatus(org.apache.hadoop.fs.FileStatus) Element(uk.gov.gchq.gaffer.data.element.Element) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) PartitionKey(uk.gov.gchq.gaffer.parquetstore.partitioner.PartitionKey) FreqMap(uk.gov.gchq.gaffer.types.FreqMap) BeforeAll(org.junit.jupiter.api.BeforeAll) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) Path(org.apache.hadoop.fs.Path) Edge(uk.gov.gchq.gaffer.data.element.Edge) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) ParseException(java.text.ParseException) TestGroups(uk.gov.gchq.gaffer.commonutil.TestGroups) ParquetElementReader(uk.gov.gchq.gaffer.parquetstore.io.reader.ParquetElementReader) TimeZone(java.util.TimeZone) ParquetReader(org.apache.parquet.hadoop.ParquetReader) IOException(java.io.IOException) Entity(uk.gov.gchq.gaffer.data.element.Entity) Collectors(java.util.stream.Collectors) Test(org.junit.jupiter.api.Test) DataGen(uk.gov.gchq.gaffer.parquetstore.testutils.DataGen) List(java.util.List) WriteUnsortedData(uk.gov.gchq.gaffer.parquetstore.operation.handler.utilities.WriteUnsortedData) TempDir(org.junit.jupiter.api.io.TempDir) TestUtils(uk.gov.gchq.gaffer.parquetstore.testutils.TestUtils) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) GraphPartitioner(uk.gov.gchq.gaffer.parquetstore.partitioner.GraphPartitioner) CompressionCodecName(org.apache.parquet.hadoop.metadata.CompressionCodecName) OperationException(uk.gov.gchq.gaffer.operation.OperationException) WriteUnsortedData(uk.gov.gchq.gaffer.parquetstore.operation.handler.utilities.WriteUnsortedData) Element(uk.gov.gchq.gaffer.data.element.Element) ArrayList(java.util.ArrayList) GraphPartitioner(uk.gov.gchq.gaffer.parquetstore.partitioner.GraphPartitioner) PartitionKey(uk.gov.gchq.gaffer.parquetstore.partitioner.PartitionKey) HashSet(java.util.HashSet) Test(org.junit.jupiter.api.Test)

Example 15 with TempDir

use of org.junit.jupiter.api.io.TempDir in project flink by apache.

the class FileSystemBlobStoreTest method fileSystemBlobStoreCallsSyncOnPut.

@Test
public void fileSystemBlobStoreCallsSyncOnPut(@TempDir Path storageDirectory) throws IOException {
    final Path blobStoreDirectory = storageDirectory.resolve("blobStore");
    final AtomicReference<TestingLocalDataOutputStream> createdOutputStream = new AtomicReference<>();
    final FunctionWithException<org.apache.flink.core.fs.Path, FSDataOutputStream, IOException> outputStreamFactory = value -> {
        final File file = new File(value.toString());
        FileUtils.createParentDirectories(file);
        final TestingLocalDataOutputStream outputStream = new TestingLocalDataOutputStream(file);
        createdOutputStream.compareAndSet(null, outputStream);
        return outputStream;
    };
    try (FileSystemBlobStore fileSystemBlobStore = new FileSystemBlobStore(new TestFs(outputStreamFactory), blobStoreDirectory.toString())) {
        final BlobKey blobKey = BlobKey.createKey(BlobKey.BlobType.PERMANENT_BLOB);
        final File localFile = storageDirectory.resolve("localFile").toFile();
        FileUtils.createParentDirectories(localFile);
        FileUtils.writeStringToFile(localFile, "foobar", StandardCharsets.UTF_8);
        fileSystemBlobStore.put(localFile, new JobID(), blobKey);
        assertThat(createdOutputStream.get().hasSyncBeenCalled()).isTrue();
    }
}
Also used : Path(java.nio.file.Path) TestFs(org.apache.flink.runtime.state.filesystem.TestFs) BeforeEach(org.junit.jupiter.api.BeforeEach) MessageDigest(java.security.MessageDigest) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) AtomicReference(java.util.concurrent.atomic.AtomicReference) TestLoggerExtension(org.apache.flink.util.TestLoggerExtension) FSDataOutputStream(org.apache.flink.core.fs.FSDataOutputStream) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) Assertions.assertThatThrownBy(org.assertj.core.api.Assertions.assertThatThrownBy) FunctionWithException(org.apache.flink.util.function.FunctionWithException) TestFileSystem(org.apache.flink.testutils.TestFileSystem) Path(java.nio.file.Path) Files(java.nio.file.Files) BufferedWriter(java.io.BufferedWriter) FileWriter(java.io.FileWriter) FileUtils(org.apache.commons.io.FileUtils) IOException(java.io.IOException) FileInputStream(java.io.FileInputStream) LocalDataOutputStream(org.apache.flink.core.fs.local.LocalDataOutputStream) Preconditions(org.apache.flink.util.Preconditions) File(java.io.File) FileNotFoundException(java.io.FileNotFoundException) StandardCharsets(java.nio.charset.StandardCharsets) Test(org.junit.jupiter.api.Test) AfterEach(org.junit.jupiter.api.AfterEach) JobID(org.apache.flink.api.common.JobID) TempDir(org.junit.jupiter.api.io.TempDir) InputStream(java.io.InputStream) TestFs(org.apache.flink.runtime.state.filesystem.TestFs) AtomicReference(java.util.concurrent.atomic.AtomicReference) IOException(java.io.IOException) FSDataOutputStream(org.apache.flink.core.fs.FSDataOutputStream) File(java.io.File) JobID(org.apache.flink.api.common.JobID) Test(org.junit.jupiter.api.Test)

Aggregations

Test (org.junit.jupiter.api.Test)15 TempDir (org.junit.jupiter.api.io.TempDir)15 File (java.io.File)10 IOException (java.io.IOException)10 ArrayList (java.util.ArrayList)9 List (java.util.List)9 Path (java.nio.file.Path)8 Assertions.assertThat (org.assertj.core.api.Assertions.assertThat)8 StandardCharsets (java.nio.charset.StandardCharsets)6 FileUtils (org.apache.commons.io.FileUtils)6 Assertions.assertThrows (org.junit.jupiter.api.Assertions.assertThrows)5 BeforeAll (org.junit.jupiter.api.BeforeAll)5 HashSet (java.util.HashSet)4 Map (java.util.Map)4 Configuration (org.apache.hadoop.conf.Configuration)4 FileSystem (org.apache.hadoop.fs.FileSystem)4 Path (org.apache.hadoop.fs.Path)4 CompressionCodecName (org.apache.parquet.hadoop.metadata.CompressionCodecName)4 AfterEach (org.junit.jupiter.api.AfterEach)4 ConnectedSonarLintEngineImpl (org.sonarsource.sonarlint.core.ConnectedSonarLintEngineImpl)4