use of org.apache.hadoop.fs.Path in project hadoop by apache.
the class LevelDBCacheTimelineStore method serviceStop.
@Override
protected synchronized void serviceStop() throws Exception {
IOUtils.cleanup(LOG, entityDb);
Path dbPath = new Path(configuration.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH), dbId + CACHED_LDB_FILE_PREFIX);
FileSystem localFS = null;
try {
localFS = FileSystem.getLocal(configuration);
if (!localFS.delete(dbPath, true)) {
throw new IOException("Couldn't delete data file for leveldb " + "timeline store " + dbPath);
}
} finally {
IOUtils.cleanup(LOG, localFS);
}
super.serviceStop();
}
use of org.apache.hadoop.fs.Path in project hadoop by apache.
the class LevelDBCacheTimelineStore method serviceInit.
@Override
protected synchronized void serviceInit(Configuration conf) throws Exception {
configuration = conf;
Options options = new Options();
options.createIfMissing(true);
options.cacheSize(conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_CACHE_READ_CACHE_SIZE, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_CACHE_READ_CACHE_SIZE));
JniDBFactory factory = new JniDBFactory();
Path dbPath = new Path(conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH), dbId + CACHED_LDB_FILE_PREFIX);
FileSystem localFS = null;
try {
localFS = FileSystem.getLocal(conf);
if (!localFS.exists(dbPath)) {
if (!localFS.mkdirs(dbPath)) {
throw new IOException("Couldn't create directory for leveldb " + "timeline store " + dbPath);
}
localFS.setPermission(dbPath, LeveldbUtils.LEVELDB_DIR_UMASK);
}
} finally {
IOUtils.cleanup(LOG, localFS);
}
LOG.info("Using leveldb path " + dbPath);
entityDb = factory.open(new File(dbPath.toString()), options);
entities = new LevelDBMapAdapter<>(entityDb);
super.serviceInit(conf);
}
use of org.apache.hadoop.fs.Path in project hadoop by apache.
the class DomainLogInfo method parseForStore.
public long parseForStore(TimelineDataManager tdm, Path appDirPath, boolean appCompleted, JsonFactory jsonFactory, ObjectMapper objMapper, FileSystem fs) throws IOException {
LOG.debug("Parsing for log dir {} on attempt {}", appDirPath, attemptDirName);
Path logPath = getPath(appDirPath);
FileStatus status = fs.getFileStatus(logPath);
long numParsed = 0;
if (status != null) {
long startTime = Time.monotonicNow();
try {
LOG.debug("Parsing {} at offset {}", logPath, offset);
long count = parsePath(tdm, logPath, appCompleted, jsonFactory, objMapper, fs);
LOG.info("Parsed {} entities from {} in {} msec", count, logPath, Time.monotonicNow() - startTime);
numParsed += count;
} catch (RuntimeException e) {
// If AppLogs cannot parse this log, it may be corrupted or just empty
if (e.getCause() instanceof JsonParseException && (status.getLen() > 0 || offset > 0)) {
// log on parse problems if the file as been read in the past or
// is visibly non-empty
LOG.info("Log {} appears to be corrupted. Skip. ", logPath);
}
}
} else {
LOG.warn("{} no longer exists. Skip for scanning. ", logPath);
}
return numParsed;
}
use of org.apache.hadoop.fs.Path in project hadoop by apache.
the class PluginStoreTestUtils method prepareFileSystemForPluginStore.
/**
* For a given file system, setup directories ready to test the plugin storage.
*
* @param fs a {@link FileSystem} object that the plugin storage will work with
* @return the dfsCluster ready to start plugin storage tests.
* @throws IOException
*/
public static FileSystem prepareFileSystemForPluginStore(FileSystem fs) throws IOException {
Path activeDir = new Path(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR_DEFAULT);
Path doneDir = new Path(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_DONE_DIR_DEFAULT);
fs.mkdirs(activeDir);
fs.mkdirs(doneDir);
return fs;
}
use of org.apache.hadoop.fs.Path in project hbase by apache.
the class ProtobufUtil method toFlushDescriptor.
public static FlushDescriptor toFlushDescriptor(FlushAction action, HRegionInfo hri, long flushSeqId, Map<byte[], List<Path>> committedFiles) {
FlushDescriptor.Builder desc = FlushDescriptor.newBuilder().setAction(action).setEncodedRegionName(UnsafeByteOperations.unsafeWrap(hri.getEncodedNameAsBytes())).setRegionName(UnsafeByteOperations.unsafeWrap(hri.getRegionName())).setFlushSequenceNumber(flushSeqId).setTableName(UnsafeByteOperations.unsafeWrap(hri.getTable().getName()));
for (Map.Entry<byte[], List<Path>> entry : committedFiles.entrySet()) {
WALProtos.FlushDescriptor.StoreFlushDescriptor.Builder builder = WALProtos.FlushDescriptor.StoreFlushDescriptor.newBuilder().setFamilyName(UnsafeByteOperations.unsafeWrap(entry.getKey())).setStoreHomeDir(//relative to region
Bytes.toString(entry.getKey()));
if (entry.getValue() != null) {
for (Path path : entry.getValue()) {
builder.addFlushOutput(path.getName());
}
}
desc.addStoreFlushes(builder);
}
return desc.build();
}
Aggregations