Search in sources :

Example 51 with Path

use of org.apache.hadoop.fs.Path in project hadoop by apache.

the class LevelDBCacheTimelineStore method serviceStop.

@Override
protected synchronized void serviceStop() throws Exception {
    IOUtils.cleanup(LOG, entityDb);
    Path dbPath = new Path(configuration.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH), dbId + CACHED_LDB_FILE_PREFIX);
    FileSystem localFS = null;
    try {
        localFS = FileSystem.getLocal(configuration);
        if (!localFS.delete(dbPath, true)) {
            throw new IOException("Couldn't delete data file for leveldb " + "timeline store " + dbPath);
        }
    } finally {
        IOUtils.cleanup(LOG, localFS);
    }
    super.serviceStop();
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException)

Example 52 with Path

use of org.apache.hadoop.fs.Path in project hadoop by apache.

the class LevelDBCacheTimelineStore method serviceInit.

@Override
protected synchronized void serviceInit(Configuration conf) throws Exception {
    configuration = conf;
    Options options = new Options();
    options.createIfMissing(true);
    options.cacheSize(conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_CACHE_READ_CACHE_SIZE, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_CACHE_READ_CACHE_SIZE));
    JniDBFactory factory = new JniDBFactory();
    Path dbPath = new Path(conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH), dbId + CACHED_LDB_FILE_PREFIX);
    FileSystem localFS = null;
    try {
        localFS = FileSystem.getLocal(conf);
        if (!localFS.exists(dbPath)) {
            if (!localFS.mkdirs(dbPath)) {
                throw new IOException("Couldn't create directory for leveldb " + "timeline store " + dbPath);
            }
            localFS.setPermission(dbPath, LeveldbUtils.LEVELDB_DIR_UMASK);
        }
    } finally {
        IOUtils.cleanup(LOG, localFS);
    }
    LOG.info("Using leveldb path " + dbPath);
    entityDb = factory.open(new File(dbPath.toString()), options);
    entities = new LevelDBMapAdapter<>(entityDb);
    super.serviceInit(conf);
}
Also used : Path(org.apache.hadoop.fs.Path) Options(org.iq80.leveldb.Options) FileSystem(org.apache.hadoop.fs.FileSystem) JniDBFactory(org.fusesource.leveldbjni.JniDBFactory) IOException(java.io.IOException) File(java.io.File)

Example 53 with Path

use of org.apache.hadoop.fs.Path in project hadoop by apache.

the class DomainLogInfo method parseForStore.

public long parseForStore(TimelineDataManager tdm, Path appDirPath, boolean appCompleted, JsonFactory jsonFactory, ObjectMapper objMapper, FileSystem fs) throws IOException {
    LOG.debug("Parsing for log dir {} on attempt {}", appDirPath, attemptDirName);
    Path logPath = getPath(appDirPath);
    FileStatus status = fs.getFileStatus(logPath);
    long numParsed = 0;
    if (status != null) {
        long startTime = Time.monotonicNow();
        try {
            LOG.debug("Parsing {} at offset {}", logPath, offset);
            long count = parsePath(tdm, logPath, appCompleted, jsonFactory, objMapper, fs);
            LOG.info("Parsed {} entities from {} in {} msec", count, logPath, Time.monotonicNow() - startTime);
            numParsed += count;
        } catch (RuntimeException e) {
            // If AppLogs cannot parse this log, it may be corrupted or just empty
            if (e.getCause() instanceof JsonParseException && (status.getLen() > 0 || offset > 0)) {
                // log on parse problems if the file as been read in the past or
                // is visibly non-empty
                LOG.info("Log {} appears to be corrupted. Skip. ", logPath);
            }
        }
    } else {
        LOG.warn("{} no longer exists. Skip for scanning. ", logPath);
    }
    return numParsed;
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) JsonParseException(com.fasterxml.jackson.core.JsonParseException)

Example 54 with Path

use of org.apache.hadoop.fs.Path in project hadoop by apache.

the class PluginStoreTestUtils method prepareFileSystemForPluginStore.

/**
   * For a given file system, setup directories ready to test the plugin storage.
   *
   * @param fs a {@link FileSystem} object that the plugin storage will work with
   * @return the dfsCluster ready to start plugin storage tests.
   * @throws IOException
   */
public static FileSystem prepareFileSystemForPluginStore(FileSystem fs) throws IOException {
    Path activeDir = new Path(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR_DEFAULT);
    Path doneDir = new Path(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_DONE_DIR_DEFAULT);
    fs.mkdirs(activeDir);
    fs.mkdirs(doneDir);
    return fs;
}
Also used : Path(org.apache.hadoop.fs.Path)

Example 55 with Path

use of org.apache.hadoop.fs.Path in project hbase by apache.

the class ProtobufUtil method toFlushDescriptor.

public static FlushDescriptor toFlushDescriptor(FlushAction action, HRegionInfo hri, long flushSeqId, Map<byte[], List<Path>> committedFiles) {
    FlushDescriptor.Builder desc = FlushDescriptor.newBuilder().setAction(action).setEncodedRegionName(UnsafeByteOperations.unsafeWrap(hri.getEncodedNameAsBytes())).setRegionName(UnsafeByteOperations.unsafeWrap(hri.getRegionName())).setFlushSequenceNumber(flushSeqId).setTableName(UnsafeByteOperations.unsafeWrap(hri.getTable().getName()));
    for (Map.Entry<byte[], List<Path>> entry : committedFiles.entrySet()) {
        WALProtos.FlushDescriptor.StoreFlushDescriptor.Builder builder = WALProtos.FlushDescriptor.StoreFlushDescriptor.newBuilder().setFamilyName(UnsafeByteOperations.unsafeWrap(entry.getKey())).setStoreHomeDir(//relative to region
        Bytes.toString(entry.getKey()));
        if (entry.getValue() != null) {
            for (Path path : entry.getValue()) {
                builder.addFlushOutput(path.getName());
            }
        }
        desc.addStoreFlushes(builder);
    }
    return desc.build();
}
Also used : Path(org.apache.hadoop.fs.Path) ArrayList(java.util.ArrayList) List(java.util.List) WALProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) Map(java.util.Map) HashMap(java.util.HashMap)

Aggregations

Path (org.apache.hadoop.fs.Path)11752 Test (org.junit.Test)4193 FileSystem (org.apache.hadoop.fs.FileSystem)3587 IOException (java.io.IOException)2631 Configuration (org.apache.hadoop.conf.Configuration)2621 FileStatus (org.apache.hadoop.fs.FileStatus)1568 ArrayList (java.util.ArrayList)1145 File (java.io.File)987 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)924 HashMap (java.util.HashMap)570 Job (org.apache.hadoop.mapreduce.Job)492 JobConf (org.apache.hadoop.mapred.JobConf)477 URI (java.net.URI)465 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)462 FileNotFoundException (java.io.FileNotFoundException)441 FsPermission (org.apache.hadoop.fs.permission.FsPermission)375 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)362 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)355 Map (java.util.Map)326 List (java.util.List)316