Search in sources :

Example 56 with Files

use of java.nio.file.Files in project storm by apache.

the class LogviewerLogSearchHandler method deepSearchLogsForTopology.

/**
 * Advanced search across worker log files in a topology.
 *
 * @param topologyId topology ID
 * @param user username
 * @param search search string
 * @param numMatchesStr the count of maximum matches. Note that this number is with respect to each port, not to each log or each search
 *     request
 * @param portStr worker port, null or '*' if the request wants to search from all worker logs
 * @param fileOffsetStr index (offset) of the log files
 * @param offsetStr start offset for log file
 * @param searchArchived true if the request wants to search also archived files, false if not
 * @param callback callbackParameterName for JSONP
 * @param origin origin
 * @return Response containing JSON content representing search result
 */
public Response deepSearchLogsForTopology(String topologyId, String user, String search, String numMatchesStr, String portStr, String fileOffsetStr, String offsetStr, Boolean searchArchived, String callback, String origin) throws IOException {
    int numMatchedFiles = 0;
    int numScannedFiles = 0;
    Path rootDir = logRoot;
    Path absTopoDir = rootDir.resolve(topologyId).toAbsolutePath().normalize();
    Object returnValue;
    if (StringUtils.isEmpty(search) || !absTopoDir.toFile().exists() || !absTopoDir.startsWith(rootDir)) {
        returnValue = new ArrayList<>();
    } else {
        int fileOffset = ObjectReader.getInt(fileOffsetStr, 0);
        int offset = ObjectReader.getInt(offsetStr, 0);
        int numMatches = ObjectReader.getInt(numMatchesStr, 1);
        if (StringUtils.isEmpty(portStr) || portStr.equals("*")) {
            try (Stream<Path> topoDir = Files.list(absTopoDir)) {
                // check for all ports
                Stream<List<Path>> portsOfLogs = topoDir.map(portDir -> logsForPort(user, portDir)).filter(logs -> logs != null && !logs.isEmpty());
                if (BooleanUtils.isNotTrue(searchArchived)) {
                    portsOfLogs = portsOfLogs.map(fl -> Collections.singletonList(first(fl)));
                }
                final List<Matched> matchedList = portsOfLogs.map(logs -> findNMatches(logs, numMatches, 0, 0, search)).collect(toList());
                numMatchedFiles = matchedList.stream().mapToInt(match -> match.getMatches().size()).sum();
                numScannedFiles = matchedList.stream().mapToInt(match -> match.openedFiles).sum();
                returnValue = matchedList;
            }
        } else {
            int port = Integer.parseInt(portStr);
            // check just the one port
            @SuppressWarnings("unchecked") List<Integer> slotsPorts = SupervisorUtils.getSlotsPorts(stormConf);
            boolean containsPort = slotsPorts.stream().anyMatch(slotPort -> slotPort != null && (slotPort == port));
            if (!containsPort) {
                returnValue = new ArrayList<>();
            } else {
                Path absPortDir = absTopoDir.resolve(Integer.toString(port)).toAbsolutePath().normalize();
                if (!absPortDir.toFile().exists() || !absPortDir.startsWith(absTopoDir)) {
                    returnValue = new ArrayList<>();
                } else {
                    List<Path> filteredLogs = logsForPort(user, absPortDir);
                    if (BooleanUtils.isNotTrue(searchArchived)) {
                        filteredLogs = Collections.singletonList(first(filteredLogs));
                        fileOffset = 0;
                    }
                    returnValue = findNMatches(filteredLogs, numMatches, fileOffset, offset, search);
                    numMatchedFiles = ((Matched) returnValue).getMatches().size();
                    numScannedFiles = ((Matched) returnValue).openedFiles;
                }
            }
        }
    }
    if (numMatchedFiles == 0) {
        numDeepSearchNoResult.mark();
    }
    numFileScanned.update(numScannedFiles);
    return LogviewerResponseBuilder.buildSuccessJsonResponse(returnValue, callback, origin);
}
Also used : Path(java.nio.file.Path) Arrays(java.util.Arrays) GZIPInputStream(java.util.zip.GZIPInputStream) StringUtils(org.apache.commons.lang.StringUtils) BufferedInputStream(java.io.BufferedInputStream) PathUtil.truncatePathToLastElements(org.apache.storm.daemon.utils.PathUtil.truncatePathToLastElements) ListFunctionalSupport.rest(org.apache.storm.daemon.utils.ListFunctionalSupport.rest) LoggerFactory(org.slf4j.LoggerFactory) FileTime(java.nio.file.attribute.FileTime) LogviewerConstant(org.apache.storm.daemon.logviewer.LogviewerConstant) ByteBuffer(java.nio.ByteBuffer) BooleanUtils(org.apache.commons.lang.BooleanUtils) Pair(org.apache.commons.lang3.tuple.Pair) DaemonConfig(org.apache.storm.DaemonConfig) Map(java.util.Map) ListFunctionalSupport.drop(org.apache.storm.daemon.utils.ListFunctionalSupport.drop) ExceptionMeterNames(org.apache.storm.daemon.logviewer.utils.ExceptionMeterNames) Path(java.nio.file.Path) UrlBuilder(org.apache.storm.daemon.utils.UrlBuilder) Unchecked(org.jooq.lambda.Unchecked) LogviewerResponseBuilder(org.apache.storm.daemon.logviewer.utils.LogviewerResponseBuilder) StandardCharsets(java.nio.charset.StandardCharsets) List(java.util.List) Stream(java.util.stream.Stream) Response(javax.ws.rs.core.Response) Pattern(java.util.regex.Pattern) UnsupportedEncodingException(java.io.UnsupportedEncodingException) InvalidRequestException(org.apache.storm.daemon.ui.InvalidRequestException) Histogram(com.codahale.metrics.Histogram) ListFunctionalSupport.last(org.apache.storm.daemon.utils.ListFunctionalSupport.last) JSONAware(org.json.simple.JSONAware) HashMap(java.util.HashMap) SupervisorUtils(org.apache.storm.daemon.supervisor.SupervisorUtils) ArrayList(java.util.ArrayList) Meter(com.codahale.metrics.Meter) DirectoryCleaner(org.apache.storm.daemon.logviewer.utils.DirectoryCleaner) ServerUtils(org.apache.storm.utils.ServerUtils) JsonIgnore(com.fasterxml.jackson.annotation.JsonIgnore) ResourceAuthorizer(org.apache.storm.daemon.logviewer.utils.ResourceAuthorizer) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) Logger(org.slf4j.Logger) ListFunctionalSupport.first(org.apache.storm.daemon.utils.ListFunctionalSupport.first) Files(java.nio.file.Files) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) IOException(java.io.IOException) Utils(org.apache.storm.utils.Utils) UnknownHostException(java.net.UnknownHostException) WorkerLogs(org.apache.storm.daemon.logviewer.utils.WorkerLogs) StreamUtil(org.apache.storm.daemon.utils.StreamUtil) Collectors.toList(java.util.stream.Collectors.toList) ObjectReader(org.apache.storm.utils.ObjectReader) JsonResponseBuilder(org.apache.storm.daemon.common.JsonResponseBuilder) Paths(java.nio.file.Paths) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Comparator(java.util.Comparator) Collections(java.util.Collections) InputStream(java.io.InputStream) List(java.util.List) ArrayList(java.util.ArrayList) Collectors.toList(java.util.stream.Collectors.toList)

Example 57 with Files

use of java.nio.file.Files in project storm by apache.

the class LogCleanerTest method testPerWorkerDirectoryCleanup.

/**
 * cleaner deletes oldest files in each worker dir if files are larger than per-dir quota.
 */
@Test
public void testPerWorkerDirectoryCleanup() throws IOException {
    long nowMillis = Time.currentTimeMillis();
    try (TmpPath testDir = new TmpPath()) {
        Files.createDirectories(testDir.getFile().toPath());
        Path rootDir = createDir(testDir.getFile().toPath(), "workers-artifacts");
        Path topo1Dir = createDir(rootDir, "topo1");
        Path topo2Dir = createDir(rootDir, "topo2");
        Path port1Dir = createDir(topo1Dir, "port1");
        Path port2Dir = createDir(topo1Dir, "port2");
        Path port3Dir = createDir(topo2Dir, "port3");
        Seq.range(0, 10).forEach(idx -> createFile(port1Dir, "A" + idx, nowMillis + 100 * idx, 200));
        Seq.range(0, 10).forEach(idx -> createFile(port2Dir, "B" + idx, nowMillis + 100 * idx, 200));
        Seq.range(0, 10).forEach(idx -> createFile(port3Dir, "C" + idx, nowMillis + 100 * idx, 200));
        Map<String, Object> conf = Utils.readStormConfig();
        StormMetricsRegistry metricRegistry = new StormMetricsRegistry();
        WorkerLogs workerLogs = new WorkerLogs(conf, rootDir, metricRegistry);
        LogCleaner logCleaner = new LogCleaner(conf, workerLogs, new DirectoryCleaner(metricRegistry), rootDir, metricRegistry);
        List<Integer> deletedFiles = logCleaner.perWorkerDirCleanup(1200).stream().map(deletionMeta -> deletionMeta.deletedFiles).collect(toList());
        assertEquals(Integer.valueOf(4), deletedFiles.get(0));
        assertEquals(Integer.valueOf(4), deletedFiles.get(1));
        assertEquals(Integer.valueOf(4), deletedFiles.get(deletedFiles.size() - 1));
    }
}
Also used : Path(java.nio.file.Path) TmpPath(org.apache.storm.testing.TmpPath) CoreMatchers.is(org.hamcrest.CoreMatchers.is) Arrays(java.util.Arrays) SortedSet(java.util.SortedSet) SUPERVISOR_WORKER_TIMEOUT_SECS(org.apache.storm.Config.SUPERVISOR_WORKER_TIMEOUT_SECS) FileTime(java.nio.file.attribute.FileTime) ArgumentMatchers.anyMap(org.mockito.ArgumentMatchers.anyMap) SupervisorUtils(org.apache.storm.daemon.supervisor.SupervisorUtils) Seq(org.jooq.lambda.Seq) TreeSet(java.util.TreeSet) Assert.assertThat(org.junit.Assert.assertThat) DirectoryStream(java.nio.file.DirectoryStream) Sets(org.mockito.internal.util.collections.Sets) Map(java.util.Map) LSWorkerHeartbeat(org.apache.storm.generated.LSWorkerHeartbeat) Path(java.nio.file.Path) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) Iterator(java.util.Iterator) Files(java.nio.file.Files) Predicate(java.util.function.Predicate) ThreadLocalRandom(org.apache.storm.shade.io.netty.util.internal.ThreadLocalRandom) Assert.assertTrue(org.junit.Assert.assertTrue) Set(java.util.Set) IOException(java.io.IOException) Mockito.when(org.mockito.Mockito.when) Utils(org.apache.storm.utils.Utils) File(java.io.File) LOGVIEWER_CLEANUP_INTERVAL_SECS(org.apache.storm.DaemonConfig.LOGVIEWER_CLEANUP_INTERVAL_SECS) UncheckedIOException(java.io.UncheckedIOException) Time(org.apache.storm.utils.Time) Test(org.junit.jupiter.api.Test) Collectors.toList(java.util.stream.Collectors.toList) List(java.util.List) Paths(java.nio.file.Paths) Assert.assertFalse(org.junit.Assert.assertFalse) TmpPath(org.apache.storm.testing.TmpPath) LOGVIEWER_CLEANUP_AGE_MINS(org.apache.storm.DaemonConfig.LOGVIEWER_CLEANUP_AGE_MINS) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) Mockito.mock(org.mockito.Mockito.mock) TmpPath(org.apache.storm.testing.TmpPath) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) Test(org.junit.jupiter.api.Test)

Example 58 with Files

use of java.nio.file.Files in project neo4j by neo4j.

the class FixturesTestIT method shouldHandleFunctionFixtures.

@Test
void shouldHandleFunctionFixtures() throws Exception {
    // Given two files in the root folder
    Path targetFolder = testDir.homePath();
    // When
    try (Neo4j server = getServerBuilder(targetFolder).withFixture(graphDatabaseService -> {
        try (Transaction tx = graphDatabaseService.beginTx()) {
            tx.createNode(Label.label("User"));
            tx.commit();
        }
        return null;
    }).build()) {
        // Then
        HTTP.Response response = HTTP.POST(server.httpURI() + "db/neo4j/tx/commit", quotedJson("{'statements':[{'statement':'MATCH (n:User) RETURN n'}]}"));
        assertThat(response.get("results").get(0).get("data").size()).isEqualTo(1);
    }
}
Also used : Path(java.nio.file.Path) Assertions.fail(org.junit.jupiter.api.Assertions.fail) HTTP(org.neo4j.test.server.HTTP) Label(org.neo4j.graphdb.Label) Files(java.nio.file.Files) SslPolicyConfig(org.neo4j.configuration.ssl.SslPolicyConfig) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) StandardOpenOption(java.nio.file.StandardOpenOption) TestDirectory(org.neo4j.test.rule.TestDirectory) IOException(java.io.IOException) SuppressOutputExtension(org.neo4j.test.extension.SuppressOutputExtension) ResourceLock(org.junit.jupiter.api.parallel.ResourceLock) HTTPS(org.neo4j.configuration.ssl.SslPolicyScope.HTTPS) RawPayload.quotedJson(org.neo4j.test.server.HTTP.RawPayload.quotedJson) StandardCharsets(java.nio.charset.StandardCharsets) Test(org.junit.jupiter.api.Test) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) Inject(org.neo4j.test.extension.Inject) SelfSignedCertificateFactory(org.neo4j.test.ssl.SelfSignedCertificateFactory) Transaction(org.neo4j.graphdb.Transaction) TestDirectoryExtension(org.neo4j.test.extension.testdirectory.TestDirectoryExtension) Resources(org.junit.jupiter.api.parallel.Resources) Path(java.nio.file.Path) BOLT(org.neo4j.configuration.ssl.SslPolicyScope.BOLT) QueryExecutionException(org.neo4j.graphdb.QueryExecutionException) Transaction(org.neo4j.graphdb.Transaction) HTTP(org.neo4j.test.server.HTTP) Test(org.junit.jupiter.api.Test)

Example 59 with Files

use of java.nio.file.Files in project flink by apache.

the class BlobServerPutTest method testConcurrentPutOperations.

/**
 * [FLINK-6020] Tests that concurrent put operations will only upload the file once to the
 * {@link BlobStore} and that the files are not corrupt at any time.
 *
 * @param jobId job ID to use (or <tt>null</tt> if job-unrelated)
 * @param blobType whether the BLOB should become permanent or transient
 */
private void testConcurrentPutOperations(@Nullable final JobID jobId, final BlobKey.BlobType blobType) throws IOException, InterruptedException, ExecutionException {
    final Configuration config = new Configuration();
    final int concurrentPutOperations = 2;
    final int dataSize = 1024;
    Collection<BlobKey> persistedBlobs = ConcurrentHashMap.newKeySet();
    TestingBlobStore blobStore = new TestingBlobStoreBuilder().setPutFunction((file, jobID, blobKey) -> {
        persistedBlobs.add(blobKey);
        return true;
    }).createTestingBlobStore();
    final CountDownLatch countDownLatch = new CountDownLatch(concurrentPutOperations);
    final byte[] data = new byte[dataSize];
    ArrayList<CompletableFuture<BlobKey>> allFutures = new ArrayList<>(concurrentPutOperations);
    ExecutorService executor = Executors.newFixedThreadPool(concurrentPutOperations);
    try (final BlobServer server = new BlobServer(config, temporaryFolder.newFolder(), blobStore)) {
        server.start();
        for (int i = 0; i < concurrentPutOperations; i++) {
            CompletableFuture<BlobKey> putFuture = CompletableFuture.supplyAsync(() -> {
                try {
                    BlockingInputStream inputStream = new BlockingInputStream(countDownLatch, data);
                    BlobKey uploadedKey = put(server, jobId, inputStream, blobType);
                    // check the uploaded file's contents (concurrently)
                    verifyContents(server, jobId, uploadedKey, data);
                    return uploadedKey;
                } catch (IOException e) {
                    throw new CompletionException(new FlinkException("Could not upload blob.", e));
                }
            }, executor);
            allFutures.add(putFuture);
        }
        FutureUtils.ConjunctFuture<Collection<BlobKey>> conjunctFuture = FutureUtils.combineAll(allFutures);
        // wait until all operations have completed and check that no exception was thrown
        Collection<BlobKey> blobKeys = conjunctFuture.get();
        Iterator<BlobKey> blobKeyIterator = blobKeys.iterator();
        assertTrue(blobKeyIterator.hasNext());
        BlobKey blobKey = blobKeyIterator.next();
        // make sure that all blob keys are the same
        while (blobKeyIterator.hasNext()) {
            verifyKeyDifferentHashEquals(blobKey, blobKeyIterator.next());
        }
        // check the uploaded file's contents
        verifyContents(server, jobId, blobKey, data);
        // check that we only uploaded the file once to the blob store
        if (blobType == PERMANENT_BLOB) {
            assertThat(persistedBlobs).hasSameElementsAs(blobKeys);
        } else {
            // can't really verify much in the other cases other than that the put operations
            // should
            // work and not corrupt files
            assertThat(persistedBlobs).isEmpty();
        }
    } finally {
        executor.shutdownNow();
    }
}
Also used : Arrays(java.util.Arrays) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) Random(java.util.Random) ByteArrayInputStream(java.io.ByteArrayInputStream) Path(org.apache.flink.core.fs.Path) TestLogger(org.apache.flink.util.TestLogger) Assert.fail(org.junit.Assert.fail) TRANSIENT_BLOB(org.apache.flink.runtime.blob.BlobKey.BlobType.TRANSIENT_BLOB) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) CompletionException(java.util.concurrent.CompletionException) Preconditions(org.apache.flink.util.Preconditions) InetSocketAddress(java.net.InetSocketAddress) Executors(java.util.concurrent.Executors) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Assume.assumeTrue(org.junit.Assume.assumeTrue) AccessDeniedException(java.nio.file.AccessDeniedException) FlinkException(org.apache.flink.util.FlinkException) BlobClientTest.validateGetAndClose(org.apache.flink.runtime.blob.BlobClientTest.validateGetAndClose) CompletableFuture(java.util.concurrent.CompletableFuture) ArrayList(java.util.ArrayList) BlobServerGetTest.get(org.apache.flink.runtime.blob.BlobServerGetTest.get) CheckedThread(org.apache.flink.core.testutils.CheckedThread) FutureUtils(org.apache.flink.util.concurrent.FutureUtils) Assert.assertArrayEquals(org.junit.Assert.assertArrayEquals) ExpectedException(org.junit.rules.ExpectedException) Nullable(javax.annotation.Nullable) ExecutorService(java.util.concurrent.ExecutorService) Iterator(java.util.Iterator) Files(java.nio.file.Files) Assert.assertNotNull(org.junit.Assert.assertNotNull) Configuration(org.apache.flink.configuration.Configuration) Assert.assertTrue(org.junit.Assert.assertTrue) FileUtils(org.apache.commons.io.FileUtils) Test(org.junit.Test) IOException(java.io.IOException) FileInputStream(java.io.FileInputStream) BlobKeyTest.verifyKeyDifferentHashEquals(org.apache.flink.runtime.blob.BlobKeyTest.verifyKeyDifferentHashEquals) OperatingSystem(org.apache.flink.util.OperatingSystem) File(java.io.File) ExecutionException(java.util.concurrent.ExecutionException) JobID(org.apache.flink.api.common.JobID) Rule(org.junit.Rule) Collections(java.util.Collections) TemporaryFolder(org.junit.rules.TemporaryFolder) PERMANENT_BLOB(org.apache.flink.runtime.blob.BlobKey.BlobType.PERMANENT_BLOB) Assert.assertEquals(org.junit.Assert.assertEquals) InputStream(java.io.InputStream) Configuration(org.apache.flink.configuration.Configuration) FutureUtils(org.apache.flink.util.concurrent.FutureUtils) ArrayList(java.util.ArrayList) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) FlinkException(org.apache.flink.util.FlinkException) CompletableFuture(java.util.concurrent.CompletableFuture) CompletionException(java.util.concurrent.CompletionException) ExecutorService(java.util.concurrent.ExecutorService) Collection(java.util.Collection)

Example 60 with Files

use of java.nio.file.Files in project flink by apache.

the class SavepointDeepCopyTest method testSavepointDeepCopy.

/**
 * Test savepoint deep copy. This method tests the savepoint deep copy by:
 *
 * <ul>
 *   <li>create {@code savepoint1} with operator {@code Operator1}, make sure it has more state
 *       files in addition to _metadata
 *   <li>create {@code savepoint2} from {@code savepoint1} by adding a new operator {@code
 *       Operator2}
 *   <li>check all state files in {@code savepoint1}'s directory are copied over to {@code
 *       savepoint2}'s directory
 *   <li>read the state of {@code Operator1} from {@code savepoint2} and make sure the number of
 *       the keys remain same
 * </ul>
 *
 * @throws Exception throw exceptions when anything goes wrong
 */
@Test
public void testSavepointDeepCopy() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    DataStream<String> words = env.fromElements(TEXT.split(" "));
    StateBootstrapTransformation<String> transformation = OperatorTransformation.bootstrapWith(words).keyBy(e -> e).transform(new WordMapBootstrapper());
    File savepointUrl1 = createAndRegisterTempFile(new AbstractID().toHexString());
    String savepointPath1 = savepointUrl1.getPath();
    SavepointWriter.newSavepoint(backend, 128).withConfiguration(FS_SMALL_FILE_THRESHOLD, FILE_STATE_SIZE_THRESHOLD).withOperator("Operator1", transformation).write(savepointPath1);
    env.execute("bootstrap savepoint1");
    Assert.assertTrue("Failed to bootstrap savepoint1 with additional state files", Files.list(Paths.get(savepointPath1)).count() > 1);
    Set<String> stateFiles1 = Files.list(Paths.get(savepointPath1)).map(path -> path.getFileName().toString()).collect(Collectors.toSet());
    // create savepoint2 from savepoint1 created above
    File savepointUrl2 = createAndRegisterTempFile(new AbstractID().toHexString());
    String savepointPath2 = savepointUrl2.getPath();
    SavepointWriter savepoint2 = SavepointWriter.fromExistingSavepoint(savepointPath1, backend).withConfiguration(FS_SMALL_FILE_THRESHOLD, FILE_STATE_SIZE_THRESHOLD);
    savepoint2.withOperator("Operator2", transformation).write(savepointPath2);
    env.execute("create savepoint2");
    Assert.assertTrue("Failed to create savepoint2 from savepoint1 with additional state files", Files.list(Paths.get(savepointPath2)).count() > 1);
    Set<String> stateFiles2 = Files.list(Paths.get(savepointPath2)).map(path -> path.getFileName().toString()).collect(Collectors.toSet());
    assertThat("At least one state file in savepoint1 are not in savepoint2", stateFiles1, everyItem(isIn(stateFiles2)));
    // Try to fromExistingSavepoint savepoint2 and read the state of "Operator1" (which has not
    // been
    // touched/changed when savepoint2
    // was created) and make sure the number of keys remain same
    long actuallyKeyNum = JobResultRetriever.collect(SavepointReader.read(env, savepointPath2, backend).readKeyedState("Operator1", new ReadFunction())).size();
    long expectedKeyNum = Arrays.stream(TEXT.split(" ")).distinct().count();
    Assert.assertEquals("Unexpected number of keys in the state of Operator1", expectedKeyNum, actuallyKeyNum);
}
Also used : Arrays(java.util.Arrays) Tuple2(org.apache.flink.api.java.tuple.Tuple2) RunWith(org.junit.runner.RunWith) JobResultRetriever(org.apache.flink.state.api.utils.JobResultRetriever) EmbeddedRocksDBStateBackend(org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend) MemorySize(org.apache.flink.configuration.MemorySize) KeyedStateReaderFunction(org.apache.flink.state.api.functions.KeyedStateReaderFunction) Assert.assertThat(org.junit.Assert.assertThat) StateBackend(org.apache.flink.runtime.state.StateBackend) Matchers.everyItem(org.hamcrest.Matchers.everyItem) Collector(org.apache.flink.util.Collector) KeyedStateBootstrapFunction(org.apache.flink.state.api.functions.KeyedStateBootstrapFunction) Matchers.isIn(org.hamcrest.Matchers.isIn) Parameterized(org.junit.runners.Parameterized) AbstractTestBase(org.apache.flink.test.util.AbstractTestBase) Types(org.apache.flink.api.common.typeinfo.Types) Files(java.nio.file.Files) AbstractID(org.apache.flink.util.AbstractID) ValueStateDescriptor(org.apache.flink.api.common.state.ValueStateDescriptor) Collection(java.util.Collection) Configuration(org.apache.flink.configuration.Configuration) Set(java.util.Set) Test(org.junit.Test) FS_SMALL_FILE_THRESHOLD(org.apache.flink.configuration.CheckpointingOptions.FS_SMALL_FILE_THRESHOLD) Collectors(java.util.stream.Collectors) File(java.io.File) DataStream(org.apache.flink.streaming.api.datastream.DataStream) ValueState(org.apache.flink.api.common.state.ValueState) Paths(java.nio.file.Paths) HashMapStateBackend(org.apache.flink.runtime.state.hashmap.HashMapStateBackend) RandomStringUtils(org.apache.commons.lang3.RandomStringUtils) Assert(org.junit.Assert) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) File(java.io.File) AbstractID(org.apache.flink.util.AbstractID) Test(org.junit.Test)

Aggregations

Files (java.nio.file.Files)243 IOException (java.io.IOException)210 Path (java.nio.file.Path)196 List (java.util.List)176 Collectors (java.util.stream.Collectors)154 Paths (java.nio.file.Paths)133 File (java.io.File)127 ArrayList (java.util.ArrayList)117 Map (java.util.Map)109 Set (java.util.Set)96 Collections (java.util.Collections)89 Arrays (java.util.Arrays)81 Stream (java.util.stream.Stream)77 HashMap (java.util.HashMap)74 HashSet (java.util.HashSet)58 InputStream (java.io.InputStream)55 Collection (java.util.Collection)55 Logger (org.slf4j.Logger)54 Pattern (java.util.regex.Pattern)53 Optional (java.util.Optional)51