use of java.nio.file.Files in project storm by apache.
the class LogviewerLogSearchHandler method deepSearchLogsForTopology.
/**
* Advanced search across worker log files in a topology.
*
* @param topologyId topology ID
* @param user username
* @param search search string
* @param numMatchesStr the count of maximum matches. Note that this number is with respect to each port, not to each log or each search
* request
* @param portStr worker port, null or '*' if the request wants to search from all worker logs
* @param fileOffsetStr index (offset) of the log files
* @param offsetStr start offset for log file
* @param searchArchived true if the request wants to search also archived files, false if not
* @param callback callbackParameterName for JSONP
* @param origin origin
* @return Response containing JSON content representing search result
*/
public Response deepSearchLogsForTopology(String topologyId, String user, String search, String numMatchesStr, String portStr, String fileOffsetStr, String offsetStr, Boolean searchArchived, String callback, String origin) throws IOException {
int numMatchedFiles = 0;
int numScannedFiles = 0;
Path rootDir = logRoot;
Path absTopoDir = rootDir.resolve(topologyId).toAbsolutePath().normalize();
Object returnValue;
if (StringUtils.isEmpty(search) || !absTopoDir.toFile().exists() || !absTopoDir.startsWith(rootDir)) {
returnValue = new ArrayList<>();
} else {
int fileOffset = ObjectReader.getInt(fileOffsetStr, 0);
int offset = ObjectReader.getInt(offsetStr, 0);
int numMatches = ObjectReader.getInt(numMatchesStr, 1);
if (StringUtils.isEmpty(portStr) || portStr.equals("*")) {
try (Stream<Path> topoDir = Files.list(absTopoDir)) {
// check for all ports
Stream<List<Path>> portsOfLogs = topoDir.map(portDir -> logsForPort(user, portDir)).filter(logs -> logs != null && !logs.isEmpty());
if (BooleanUtils.isNotTrue(searchArchived)) {
portsOfLogs = portsOfLogs.map(fl -> Collections.singletonList(first(fl)));
}
final List<Matched> matchedList = portsOfLogs.map(logs -> findNMatches(logs, numMatches, 0, 0, search)).collect(toList());
numMatchedFiles = matchedList.stream().mapToInt(match -> match.getMatches().size()).sum();
numScannedFiles = matchedList.stream().mapToInt(match -> match.openedFiles).sum();
returnValue = matchedList;
}
} else {
int port = Integer.parseInt(portStr);
// check just the one port
@SuppressWarnings("unchecked") List<Integer> slotsPorts = SupervisorUtils.getSlotsPorts(stormConf);
boolean containsPort = slotsPorts.stream().anyMatch(slotPort -> slotPort != null && (slotPort == port));
if (!containsPort) {
returnValue = new ArrayList<>();
} else {
Path absPortDir = absTopoDir.resolve(Integer.toString(port)).toAbsolutePath().normalize();
if (!absPortDir.toFile().exists() || !absPortDir.startsWith(absTopoDir)) {
returnValue = new ArrayList<>();
} else {
List<Path> filteredLogs = logsForPort(user, absPortDir);
if (BooleanUtils.isNotTrue(searchArchived)) {
filteredLogs = Collections.singletonList(first(filteredLogs));
fileOffset = 0;
}
returnValue = findNMatches(filteredLogs, numMatches, fileOffset, offset, search);
numMatchedFiles = ((Matched) returnValue).getMatches().size();
numScannedFiles = ((Matched) returnValue).openedFiles;
}
}
}
}
if (numMatchedFiles == 0) {
numDeepSearchNoResult.mark();
}
numFileScanned.update(numScannedFiles);
return LogviewerResponseBuilder.buildSuccessJsonResponse(returnValue, callback, origin);
}
use of java.nio.file.Files in project storm by apache.
the class LogCleanerTest method testPerWorkerDirectoryCleanup.
/**
* cleaner deletes oldest files in each worker dir if files are larger than per-dir quota.
*/
@Test
public void testPerWorkerDirectoryCleanup() throws IOException {
long nowMillis = Time.currentTimeMillis();
try (TmpPath testDir = new TmpPath()) {
Files.createDirectories(testDir.getFile().toPath());
Path rootDir = createDir(testDir.getFile().toPath(), "workers-artifacts");
Path topo1Dir = createDir(rootDir, "topo1");
Path topo2Dir = createDir(rootDir, "topo2");
Path port1Dir = createDir(topo1Dir, "port1");
Path port2Dir = createDir(topo1Dir, "port2");
Path port3Dir = createDir(topo2Dir, "port3");
Seq.range(0, 10).forEach(idx -> createFile(port1Dir, "A" + idx, nowMillis + 100 * idx, 200));
Seq.range(0, 10).forEach(idx -> createFile(port2Dir, "B" + idx, nowMillis + 100 * idx, 200));
Seq.range(0, 10).forEach(idx -> createFile(port3Dir, "C" + idx, nowMillis + 100 * idx, 200));
Map<String, Object> conf = Utils.readStormConfig();
StormMetricsRegistry metricRegistry = new StormMetricsRegistry();
WorkerLogs workerLogs = new WorkerLogs(conf, rootDir, metricRegistry);
LogCleaner logCleaner = new LogCleaner(conf, workerLogs, new DirectoryCleaner(metricRegistry), rootDir, metricRegistry);
List<Integer> deletedFiles = logCleaner.perWorkerDirCleanup(1200).stream().map(deletionMeta -> deletionMeta.deletedFiles).collect(toList());
assertEquals(Integer.valueOf(4), deletedFiles.get(0));
assertEquals(Integer.valueOf(4), deletedFiles.get(1));
assertEquals(Integer.valueOf(4), deletedFiles.get(deletedFiles.size() - 1));
}
}
use of java.nio.file.Files in project neo4j by neo4j.
the class FixturesTestIT method shouldHandleFunctionFixtures.
@Test
void shouldHandleFunctionFixtures() throws Exception {
// Given two files in the root folder
Path targetFolder = testDir.homePath();
// When
try (Neo4j server = getServerBuilder(targetFolder).withFixture(graphDatabaseService -> {
try (Transaction tx = graphDatabaseService.beginTx()) {
tx.createNode(Label.label("User"));
tx.commit();
}
return null;
}).build()) {
// Then
HTTP.Response response = HTTP.POST(server.httpURI() + "db/neo4j/tx/commit", quotedJson("{'statements':[{'statement':'MATCH (n:User) RETURN n'}]}"));
assertThat(response.get("results").get(0).get("data").size()).isEqualTo(1);
}
}
use of java.nio.file.Files in project flink by apache.
the class BlobServerPutTest method testConcurrentPutOperations.
/**
* [FLINK-6020] Tests that concurrent put operations will only upload the file once to the
* {@link BlobStore} and that the files are not corrupt at any time.
*
* @param jobId job ID to use (or <tt>null</tt> if job-unrelated)
* @param blobType whether the BLOB should become permanent or transient
*/
private void testConcurrentPutOperations(@Nullable final JobID jobId, final BlobKey.BlobType blobType) throws IOException, InterruptedException, ExecutionException {
final Configuration config = new Configuration();
final int concurrentPutOperations = 2;
final int dataSize = 1024;
Collection<BlobKey> persistedBlobs = ConcurrentHashMap.newKeySet();
TestingBlobStore blobStore = new TestingBlobStoreBuilder().setPutFunction((file, jobID, blobKey) -> {
persistedBlobs.add(blobKey);
return true;
}).createTestingBlobStore();
final CountDownLatch countDownLatch = new CountDownLatch(concurrentPutOperations);
final byte[] data = new byte[dataSize];
ArrayList<CompletableFuture<BlobKey>> allFutures = new ArrayList<>(concurrentPutOperations);
ExecutorService executor = Executors.newFixedThreadPool(concurrentPutOperations);
try (final BlobServer server = new BlobServer(config, temporaryFolder.newFolder(), blobStore)) {
server.start();
for (int i = 0; i < concurrentPutOperations; i++) {
CompletableFuture<BlobKey> putFuture = CompletableFuture.supplyAsync(() -> {
try {
BlockingInputStream inputStream = new BlockingInputStream(countDownLatch, data);
BlobKey uploadedKey = put(server, jobId, inputStream, blobType);
// check the uploaded file's contents (concurrently)
verifyContents(server, jobId, uploadedKey, data);
return uploadedKey;
} catch (IOException e) {
throw new CompletionException(new FlinkException("Could not upload blob.", e));
}
}, executor);
allFutures.add(putFuture);
}
FutureUtils.ConjunctFuture<Collection<BlobKey>> conjunctFuture = FutureUtils.combineAll(allFutures);
// wait until all operations have completed and check that no exception was thrown
Collection<BlobKey> blobKeys = conjunctFuture.get();
Iterator<BlobKey> blobKeyIterator = blobKeys.iterator();
assertTrue(blobKeyIterator.hasNext());
BlobKey blobKey = blobKeyIterator.next();
// make sure that all blob keys are the same
while (blobKeyIterator.hasNext()) {
verifyKeyDifferentHashEquals(blobKey, blobKeyIterator.next());
}
// check the uploaded file's contents
verifyContents(server, jobId, blobKey, data);
// check that we only uploaded the file once to the blob store
if (blobType == PERMANENT_BLOB) {
assertThat(persistedBlobs).hasSameElementsAs(blobKeys);
} else {
// can't really verify much in the other cases other than that the put operations
// should
// work and not corrupt files
assertThat(persistedBlobs).isEmpty();
}
} finally {
executor.shutdownNow();
}
}
use of java.nio.file.Files in project flink by apache.
the class SavepointDeepCopyTest method testSavepointDeepCopy.
/**
* Test savepoint deep copy. This method tests the savepoint deep copy by:
*
* <ul>
* <li>create {@code savepoint1} with operator {@code Operator1}, make sure it has more state
* files in addition to _metadata
* <li>create {@code savepoint2} from {@code savepoint1} by adding a new operator {@code
* Operator2}
* <li>check all state files in {@code savepoint1}'s directory are copied over to {@code
* savepoint2}'s directory
* <li>read the state of {@code Operator1} from {@code savepoint2} and make sure the number of
* the keys remain same
* </ul>
*
* @throws Exception throw exceptions when anything goes wrong
*/
@Test
public void testSavepointDeepCopy() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<String> words = env.fromElements(TEXT.split(" "));
StateBootstrapTransformation<String> transformation = OperatorTransformation.bootstrapWith(words).keyBy(e -> e).transform(new WordMapBootstrapper());
File savepointUrl1 = createAndRegisterTempFile(new AbstractID().toHexString());
String savepointPath1 = savepointUrl1.getPath();
SavepointWriter.newSavepoint(backend, 128).withConfiguration(FS_SMALL_FILE_THRESHOLD, FILE_STATE_SIZE_THRESHOLD).withOperator("Operator1", transformation).write(savepointPath1);
env.execute("bootstrap savepoint1");
Assert.assertTrue("Failed to bootstrap savepoint1 with additional state files", Files.list(Paths.get(savepointPath1)).count() > 1);
Set<String> stateFiles1 = Files.list(Paths.get(savepointPath1)).map(path -> path.getFileName().toString()).collect(Collectors.toSet());
// create savepoint2 from savepoint1 created above
File savepointUrl2 = createAndRegisterTempFile(new AbstractID().toHexString());
String savepointPath2 = savepointUrl2.getPath();
SavepointWriter savepoint2 = SavepointWriter.fromExistingSavepoint(savepointPath1, backend).withConfiguration(FS_SMALL_FILE_THRESHOLD, FILE_STATE_SIZE_THRESHOLD);
savepoint2.withOperator("Operator2", transformation).write(savepointPath2);
env.execute("create savepoint2");
Assert.assertTrue("Failed to create savepoint2 from savepoint1 with additional state files", Files.list(Paths.get(savepointPath2)).count() > 1);
Set<String> stateFiles2 = Files.list(Paths.get(savepointPath2)).map(path -> path.getFileName().toString()).collect(Collectors.toSet());
assertThat("At least one state file in savepoint1 are not in savepoint2", stateFiles1, everyItem(isIn(stateFiles2)));
// Try to fromExistingSavepoint savepoint2 and read the state of "Operator1" (which has not
// been
// touched/changed when savepoint2
// was created) and make sure the number of keys remain same
long actuallyKeyNum = JobResultRetriever.collect(SavepointReader.read(env, savepointPath2, backend).readKeyedState("Operator1", new ReadFunction())).size();
long expectedKeyNum = Arrays.stream(TEXT.split(" ")).distinct().count();
Assert.assertEquals("Unexpected number of keys in the state of Operator1", expectedKeyNum, actuallyKeyNum);
}
Aggregations