use of java.nio.file.Path in project groovy by apache.
the class NioGroovyMethods method traverse.
private static FileVisitResult traverse(final Path self, final Map<String, Object> options, @ClosureParams(value = SimpleType.class, options = "java.nio.file.Path") final Closure closure, final int maxDepth) throws IOException {
checkDir(self);
final Closure pre = (Closure) options.get("preDir");
final Closure post = (Closure) options.get("postDir");
final FileType type = (FileType) options.get("type");
final Object filter = options.get("filter");
final Object nameFilter = options.get("nameFilter");
final Object excludeFilter = options.get("excludeFilter");
final Object excludeNameFilter = options.get("excludeNameFilter");
final Closure sort = (Closure) options.get("sort");
try (DirectoryStream<Path> stream = Files.newDirectoryStream(self)) {
final Iterator<Path> itr = stream.iterator();
List<Path> files = new LinkedList<Path>();
while (itr.hasNext()) {
files.add(itr.next());
}
if (sort != null)
files = DefaultGroovyMethods.sort(files, sort);
for (Path path : files) {
if (Files.isDirectory(path)) {
if (type != FileType.FILES) {
if (closure != null && notFiltered(path, filter, nameFilter, excludeFilter, excludeNameFilter)) {
Object closureResult = closure.call(path);
if (closureResult == FileVisitResult.SKIP_SIBLINGS)
break;
if (closureResult == FileVisitResult.TERMINATE)
return FileVisitResult.TERMINATE;
}
}
if (maxDepth != 0) {
Object preResult = null;
if (pre != null) {
preResult = pre.call(path);
}
if (preResult == FileVisitResult.SKIP_SIBLINGS)
break;
if (preResult == FileVisitResult.TERMINATE)
return FileVisitResult.TERMINATE;
if (preResult != FileVisitResult.SKIP_SUBTREE) {
FileVisitResult terminated = traverse(path, options, closure, maxDepth - 1);
if (terminated == FileVisitResult.TERMINATE)
return terminated;
}
Object postResult = null;
if (post != null) {
postResult = post.call(path);
}
if (postResult == FileVisitResult.SKIP_SIBLINGS)
break;
if (postResult == FileVisitResult.TERMINATE)
return FileVisitResult.TERMINATE;
}
} else if (type != FileType.DIRECTORIES) {
if (closure != null && notFiltered(path, filter, nameFilter, excludeFilter, excludeNameFilter)) {
Object closureResult = closure.call(path);
if (closureResult == FileVisitResult.SKIP_SIBLINGS)
break;
if (closureResult == FileVisitResult.TERMINATE)
return FileVisitResult.TERMINATE;
}
}
}
return FileVisitResult.CONTINUE;
}
}
use of java.nio.file.Path in project groovy by apache.
the class NioGroovyMethods method eachFileMatch.
/**
* Invokes the closure for each file whose name (file.name) matches the given nameFilter in the given directory
* - calling the {@link org.codehaus.groovy.runtime.DefaultGroovyMethods#isCase(Object, Object)} method to determine if a match occurs. This method can be used
* with different kinds of filters like regular expressions, classes, ranges etc.
* Both regular files and subdirectories may be candidates for matching depending
* on the value of fileType.
* <pre>
* // collect names of files in baseDir matching supplied regex pattern
* import static groovy.io.FileType.*
* def names = []
* baseDir.eachFileMatch FILES, ~/foo\d\.txt/, { names << it.name }
* assert names == ['foo1.txt', 'foo2.txt']
*
* // remove all *.bak files in baseDir
* baseDir.eachFileMatch FILES, ~/.*\.bak/, { Path bak -> bak.delete() }
*
* // print out files > 4K in size from baseDir
* baseDir.eachFileMatch FILES, { new Path(baseDir, it).size() > 4096 }, { println "$it.name ${it.size()}" }
* </pre>
*
* @param self a file
* @param fileType whether normal files or directories or both should be processed
* @param nameFilter the filter to perform on the name of the file/directory (using the {@link org.codehaus.groovy.runtime.DefaultGroovyMethods#isCase(Object, Object)} method)
* @param closure the closure to invoke
* @throws java.io.FileNotFoundException if the given directory does not exist
* @throws IllegalArgumentException if the provided Path object does not represent a directory
* @since 2.3.0
*/
public static void eachFileMatch(final Path self, final FileType fileType, final Object nameFilter, @ClosureParams(value = SimpleType.class, options = "java.nio.file.Path") final Closure closure) throws IOException {
// throws FileNotFoundException, IllegalArgumentException {
checkDir(self);
try (DirectoryStream<Path> stream = Files.newDirectoryStream(self)) {
Iterator<Path> itr = stream.iterator();
BooleanReturningMethodInvoker bmi = new BooleanReturningMethodInvoker("isCase");
while (itr.hasNext()) {
Path currentPath = itr.next();
if ((fileType != FileType.FILES && Files.isDirectory(currentPath)) || (fileType != FileType.DIRECTORIES && Files.isRegularFile(currentPath))) {
if (bmi.invoke(nameFilter, currentPath.getFileName().toString()))
closure.call(currentPath);
}
}
}
}
use of java.nio.file.Path in project hadoop by apache.
the class NNUpgradeUtil method doPreUpgrade.
/**
* Perform any steps that must succeed across all storage dirs/JournalManagers
* involved in an upgrade before proceeding onto the actual upgrade stage. If
* a call to any JM's or local storage dir's doPreUpgrade method fails, then
* doUpgrade will not be called for any JM. The existing current dir is
* renamed to previous.tmp, and then a new, empty current dir is created.
*
* @param conf configuration for creating {@link EditLogFileOutputStream}
* @param sd the storage directory to perform the pre-upgrade procedure.
* @throws IOException in the event of error
*/
static void doPreUpgrade(Configuration conf, StorageDirectory sd) throws IOException {
LOG.info("Starting upgrade of storage directory " + sd.getRoot());
// rename current to tmp
renameCurToTmp(sd);
final Path curDir = sd.getCurrentDir().toPath();
final Path tmpDir = sd.getPreviousTmp().toPath();
Files.walkFileTree(tmpDir, /* do not follow links */
Collections.<FileVisitOption>emptySet(), 1, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
String name = file.getFileName().toString();
if (Files.isRegularFile(file) && name.startsWith(NNStorage.NameNodeFile.EDITS.getName())) {
Path newFile = curDir.resolve(name);
Files.createLink(newFile, file);
}
return super.visitFile(file, attrs);
}
});
}
use of java.nio.file.Path in project hadoop by apache.
the class TestDatanodeManager method HelperFunction.
/**
* Helper function that tests the DatanodeManagers SortedBlock function
* we invoke this function with and without topology scripts
*
* @param scriptFileName - Script Name or null
*
* @throws URISyntaxException
* @throws IOException
*/
public void HelperFunction(String scriptFileName) throws URISyntaxException, IOException {
// create the DatanodeManager which will be tested
Configuration conf = new Configuration();
FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
Mockito.when(fsn.hasWriteLock()).thenReturn(true);
if (scriptFileName != null && !scriptFileName.isEmpty()) {
URL shellScript = getClass().getResource(scriptFileName);
Path resourcePath = Paths.get(shellScript.toURI());
FileUtil.setExecutable(resourcePath.toFile(), true);
conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, resourcePath.toString());
}
DatanodeManager dm = mockDatanodeManager(fsn, conf);
// register 5 datanodes, each with different storage ID and type
DatanodeInfo[] locs = new DatanodeInfo[5];
String[] storageIDs = new String[5];
StorageType[] storageTypes = new StorageType[] { StorageType.ARCHIVE, StorageType.DEFAULT, StorageType.DISK, StorageType.RAM_DISK, StorageType.SSD };
for (int i = 0; i < 5; i++) {
// register new datanode
String uuid = "UUID-" + i;
String ip = "IP-" + i;
DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class);
Mockito.when(dr.getDatanodeUuid()).thenReturn(uuid);
Mockito.when(dr.getIpAddr()).thenReturn(ip);
Mockito.when(dr.getXferAddr()).thenReturn(ip + ":9000");
Mockito.when(dr.getXferPort()).thenReturn(9000);
Mockito.when(dr.getSoftwareVersion()).thenReturn("version1");
dm.registerDatanode(dr);
// get location and storage information
locs[i] = dm.getDatanode(uuid);
storageIDs[i] = "storageID-" + i;
}
// set first 2 locations as decomissioned
locs[0].setDecommissioned();
locs[1].setDecommissioned();
// create LocatedBlock with above locations
ExtendedBlock b = new ExtendedBlock("somePoolID", 1234);
LocatedBlock block = new LocatedBlock(b, locs, storageIDs, storageTypes);
List<LocatedBlock> blocks = new ArrayList<>();
blocks.add(block);
final String targetIp = locs[4].getIpAddr();
// sort block locations
dm.sortLocatedBlocks(targetIp, blocks);
// check that storage IDs/types are aligned with datanode locs
DatanodeInfo[] sortedLocs = block.getLocations();
storageIDs = block.getStorageIDs();
storageTypes = block.getStorageTypes();
assertThat(sortedLocs.length, is(5));
assertThat(storageIDs.length, is(5));
assertThat(storageTypes.length, is(5));
for (int i = 0; i < sortedLocs.length; i++) {
assertThat(((DatanodeInfoWithStorage) sortedLocs[i]).getStorageID(), is(storageIDs[i]));
assertThat(((DatanodeInfoWithStorage) sortedLocs[i]).getStorageType(), is(storageTypes[i]));
}
// Ensure the local node is first.
assertThat(sortedLocs[0].getIpAddr(), is(targetIp));
// Ensure the two decommissioned DNs were moved to the end.
assertThat(sortedLocs[sortedLocs.length - 1].getAdminState(), is(DatanodeInfo.AdminStates.DECOMMISSIONED));
assertThat(sortedLocs[sortedLocs.length - 2].getAdminState(), is(DatanodeInfo.AdminStates.DECOMMISSIONED));
}
use of java.nio.file.Path in project storm by apache.
the class StormSqlImpl method submit.
@Override
public void submit(String name, Iterable<String> statements, Map<String, ?> stormConf, SubmitOptions opts, StormSubmitter.ProgressListener progressListener, String asUser) throws Exception {
Map<String, ISqlTridentDataSource> dataSources = new HashMap<>();
for (String sql : statements) {
StormParser parser = new StormParser(sql);
SqlNode node = parser.impl().parseSqlStmtEof();
if (node instanceof SqlCreateTable) {
handleCreateTableForTrident((SqlCreateTable) node, dataSources);
} else if (node instanceof SqlCreateFunction) {
handleCreateFunction((SqlCreateFunction) node);
} else {
QueryPlanner planner = new QueryPlanner(schema);
AbstractTridentProcessor processor = planner.compile(dataSources, sql);
TridentTopology topo = processor.build();
Path jarPath = null;
try {
// QueryPlanner on Trident mode configures the topology with compiled classes,
// so we need to add new classes into topology jar
// Topology will be serialized and sent to Nimbus, and deserialized and executed in workers.
jarPath = Files.createTempFile("storm-sql", ".jar");
System.setProperty("storm.jar", jarPath.toString());
packageTopology(jarPath, processor);
StormSubmitter.submitTopologyAs(name, stormConf, topo.build(), opts, progressListener, asUser);
} finally {
if (jarPath != null) {
Files.delete(jarPath);
}
}
}
}
}
Aggregations