Search in sources :

Example 86 with URI

use of java.net.URI in project flink by apache.

the class MockInputSplitProvider method addInputSplits.

/**
	 * Generates a set of input splits from an input path
	 * 
	 * @param path
	 *        the path of the local file to generate the input splits from
	 * @param noSplits
	 *        the number of input splits to be generated from the given input file
	 */
public void addInputSplits(final String path, final int noSplits) {
    final InputSplit[] tmp = new InputSplit[noSplits];
    final String[] hosts = { "localhost" };
    final String localPath;
    try {
        localPath = new URI(path).getPath();
    } catch (URISyntaxException e) {
        throw new IllegalArgumentException("Path URI can not be transformed to local path.");
    }
    final File inFile = new File(localPath);
    final long splitLength = inFile.length() / noSplits;
    long pos = 0;
    for (int i = 0; i < noSplits - 1; i++) {
        tmp[i] = new FileInputSplit(i, new Path(path), pos, splitLength, hosts);
        pos += splitLength;
    }
    tmp[noSplits - 1] = new FileInputSplit(noSplits - 1, new Path(path), pos, inFile.length() - pos, hosts);
    this.inputSplits = tmp;
}
Also used : Path(org.apache.flink.core.fs.Path) FileInputSplit(org.apache.flink.core.fs.FileInputSplit) URISyntaxException(java.net.URISyntaxException) InputSplit(org.apache.flink.core.io.InputSplit) FileInputSplit(org.apache.flink.core.fs.FileInputSplit) URI(java.net.URI) File(java.io.File)

Example 87 with URI

use of java.net.URI in project hadoop by apache.

the class FTPFileSystem method isParentOf.

/**
   * Probe for a path being a parent of another
   * @param parent parent path
   * @param child possible child path
   * @return true if the parent's path matches the start of the child's
   */
private boolean isParentOf(Path parent, Path child) {
    URI parentURI = parent.toUri();
    String parentPath = parentURI.getPath();
    if (!parentPath.endsWith("/")) {
        parentPath += "/";
    }
    URI childURI = child.toUri();
    String childPath = childURI.getPath();
    return childPath.startsWith(parentPath);
}
Also used : URI(java.net.URI)

Example 88 with URI

use of java.net.URI in project hadoop by apache.

the class PathData method expandAsGlob.

/**
   * Expand the given path as a glob pattern.  Non-existent paths do not
   * throw an exception because creation commands like touch and mkdir need
   * to create them.  The "stat" field will be null if the path does not
   * exist.
   * @param pattern the pattern to expand as a glob
   * @param conf the hadoop configuration
   * @return list of {@link PathData} objects.  if the pattern is not a glob,
   * and does not exist, the list will contain a single PathData with a null
   * stat 
   * @throws IOException anything else goes wrong...
   */
public static PathData[] expandAsGlob(String pattern, Configuration conf) throws IOException {
    Path globPath = new Path(pattern);
    FileSystem fs = globPath.getFileSystem(conf);
    FileStatus[] stats = fs.globStatus(globPath);
    PathData[] items = null;
    if (stats == null) {
        // remove any quoting in the glob pattern
        pattern = pattern.replaceAll("\\\\(.)", "$1");
        // not a glob & file not found, so add the path with a null stat
        items = new PathData[] { new PathData(fs, pattern, null) };
    } else {
        // figure out what type of glob path was given, will convert globbed
        // paths to match the type to preserve relativity
        PathType globType;
        URI globUri = globPath.toUri();
        if (globUri.getScheme() != null) {
            globType = PathType.HAS_SCHEME;
        } else if (!globUri.getPath().isEmpty() && new Path(globUri.getPath()).isAbsolute()) {
            globType = PathType.SCHEMELESS_ABSOLUTE;
        } else {
            globType = PathType.RELATIVE;
        }
        // convert stats to PathData
        items = new PathData[stats.length];
        int i = 0;
        for (FileStatus stat : stats) {
            URI matchUri = stat.getPath().toUri();
            String globMatch = null;
            switch(globType) {
                case // use as-is, but remove authority if necessary
                HAS_SCHEME:
                    if (globUri.getAuthority() == null) {
                        matchUri = removeAuthority(matchUri);
                    }
                    globMatch = uriToString(matchUri, false);
                    break;
                case // take just the uri's path
                SCHEMELESS_ABSOLUTE:
                    globMatch = matchUri.getPath();
                    break;
                case // make it relative to the current working dir
                RELATIVE:
                    URI cwdUri = fs.getWorkingDirectory().toUri();
                    globMatch = relativize(cwdUri, matchUri, stat.isDirectory());
                    break;
            }
            items[i++] = new PathData(fs, globMatch, stat);
        }
    }
    Arrays.sort(items);
    return items;
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) FileSystem(org.apache.hadoop.fs.FileSystem) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) URI(java.net.URI)

Example 89 with URI

use of java.net.URI in project hadoop by apache.

the class InodeTree method createLink.

private void createLink(final String src, final String target, final boolean isLinkMerge, final UserGroupInformation aUgi) throws URISyntaxException, IOException, FileAlreadyExistsException, UnsupportedFileSystemException {
    // Validate that src is valid absolute path
    final Path srcPath = new Path(src);
    if (!srcPath.isAbsoluteAndSchemeAuthorityNull()) {
        throw new IOException("ViewFs: Non absolute mount name in config:" + src);
    }
    final String[] srcPaths = breakIntoPathComponents(src);
    INodeDir<T> curInode = root;
    int i;
    // Ignore first initial slash, process all except last component
    for (i = 1; i < srcPaths.length - 1; i++) {
        final String iPath = srcPaths[i];
        INode<T> nextInode = curInode.resolveInternal(iPath);
        if (nextInode == null) {
            INodeDir<T> newDir = curInode.addDir(iPath, aUgi);
            newDir.InodeDirFs = getTargetFileSystem(newDir);
            nextInode = newDir;
        }
        if (nextInode instanceof INodeLink) {
            // Error - expected a dir but got a link
            throw new FileAlreadyExistsException("Path " + nextInode.fullPath + " already exists as link");
        } else {
            assert (nextInode instanceof INodeDir);
            curInode = (INodeDir<T>) nextInode;
        }
    }
    // Now process the last component
    // Add the link in 2 cases: does not exist or a link exists
    // last component
    String iPath = srcPaths[i];
    if (curInode.resolveInternal(iPath) != null) {
        //  directory/link already exists
        StringBuilder strB = new StringBuilder(srcPaths[0]);
        for (int j = 1; j <= i; ++j) {
            strB.append('/').append(srcPaths[j]);
        }
        throw new FileAlreadyExistsException("Path " + strB + " already exists as dir; cannot create link here");
    }
    final INodeLink<T> newLink;
    final String fullPath = curInode.fullPath + (curInode == root ? "" : "/") + iPath;
    if (isLinkMerge) {
        // Target is list of URIs
        String[] targetsList = StringUtils.getStrings(target);
        URI[] targetsListURI = new URI[targetsList.length];
        int k = 0;
        for (String itarget : targetsList) {
            targetsListURI[k++] = new URI(itarget);
        }
        newLink = new INodeLink<T>(fullPath, aUgi, getTargetFileSystem(targetsListURI), targetsListURI);
    } else {
        newLink = new INodeLink<T>(fullPath, aUgi, getTargetFileSystem(new URI(target)), new URI(target));
    }
    curInode.addLink(iPath, newLink);
    mountPoints.add(new MountPoint<T>(src, newLink));
}
Also used : Path(org.apache.hadoop.fs.Path) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) IOException(java.io.IOException) URI(java.net.URI)

Example 90 with URI

use of java.net.URI in project hadoop by apache.

the class ViewFileSystem method initialize.

/**
   * Called after a new FileSystem instance is constructed.
   * @param theUri a uri whose authority section names the host, port, etc. for
   *        this FileSystem
   * @param conf the configuration
   */
@Override
public void initialize(final URI theUri, final Configuration conf) throws IOException {
    super.initialize(theUri, conf);
    setConf(conf);
    config = conf;
    // Now build  client side view (i.e. client side mount table) from config.
    final String authority = theUri.getAuthority();
    try {
        myUri = new URI(FsConstants.VIEWFS_SCHEME, authority, "/", null, null);
        fsState = new InodeTree<FileSystem>(conf, authority) {

            @Override
            protected FileSystem getTargetFileSystem(final URI uri) throws URISyntaxException, IOException {
                return new ChRootedFileSystem(uri, config);
            }

            @Override
            protected FileSystem getTargetFileSystem(final INodeDir<FileSystem> dir) throws URISyntaxException {
                return new InternalDirOfViewFs(dir, creationTime, ugi, myUri);
            }

            @Override
            protected FileSystem getTargetFileSystem(URI[] mergeFsURIList) throws URISyntaxException, UnsupportedFileSystemException {
                throw new UnsupportedFileSystemException("mergefs not implemented");
            // return MergeFs.createMergeFs(mergeFsURIList, config);
            }
        };
        workingDir = this.getHomeDirectory();
    } catch (URISyntaxException e) {
        throw new IOException("URISyntax exception: " + theUri);
    }
}
Also used : FileSystem(org.apache.hadoop.fs.FileSystem) UnsupportedFileSystemException(org.apache.hadoop.fs.UnsupportedFileSystemException) URISyntaxException(java.net.URISyntaxException) IOException(java.io.IOException) URI(java.net.URI)

Aggregations

URI (java.net.URI)5680 Test (org.junit.Test)1852 URISyntaxException (java.net.URISyntaxException)1016 IOException (java.io.IOException)749 File (java.io.File)531 HashMap (java.util.HashMap)458 ArrayList (java.util.ArrayList)452 Test (org.testng.annotations.Test)394 Configuration (org.apache.hadoop.conf.Configuration)321 Path (org.apache.hadoop.fs.Path)267 URL (java.net.URL)266 Map (java.util.Map)262 Response (javax.ws.rs.core.Response)218 List (java.util.List)184 InputStream (java.io.InputStream)154 HashSet (java.util.HashSet)136 FileSystem (org.apache.hadoop.fs.FileSystem)135 RequestContext (com.linkedin.r2.message.RequestContext)129 RestRequestBuilder (com.linkedin.r2.message.rest.RestRequestBuilder)128 RestRequest (com.linkedin.r2.message.rest.RestRequest)112