use of org.apache.hadoop.fs.Path in project hbase by apache.
the class SnapshotReferenceUtil method verifyStoreFile.
/**
* Verify the validity of the snapshot store file
*
* @param conf The current {@link Configuration} instance.
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory of the snapshot to verify
* @param snapshot the {@link SnapshotDescription} of the snapshot to verify
* @param regionInfo {@link HRegionInfo} of the region that contains the store file
* @param family family that contains the store file
* @param storeFile the store file to verify
* @throws CorruptedSnapshotException if the snapshot is corrupted
* @throws IOException if an error occurred while scanning the directory
*/
private static void verifyStoreFile(final Configuration conf, final FileSystem fs, final Path snapshotDir, final SnapshotDescription snapshot, final HRegionInfo regionInfo, final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
TableName table = TableName.valueOf(snapshot.getTable());
String fileName = storeFile.getName();
Path refPath = null;
if (StoreFileInfo.isReference(fileName)) {
// If is a reference file check if the parent file is present in the snapshot
refPath = new Path(new Path(regionInfo.getEncodedName(), family), fileName);
refPath = StoreFileInfo.getReferredToFile(refPath);
String refRegion = refPath.getParent().getParent().getName();
refPath = HFileLink.createPath(table, refRegion, family, refPath.getName());
if (!HFileLink.buildFromHFileLinkPattern(conf, refPath).exists(fs)) {
throw new CorruptedSnapshotException("Missing parent hfile for: " + fileName + " path=" + refPath, ProtobufUtil.createSnapshotDesc(snapshot));
}
if (storeFile.hasReference()) {
// we already have the Reference information embedded here.
return;
}
}
Path linkPath;
if (refPath != null && HFileLink.isHFileLink(refPath)) {
linkPath = new Path(family, refPath.getName());
} else if (HFileLink.isHFileLink(fileName)) {
linkPath = new Path(family, fileName);
} else {
linkPath = new Path(family, HFileLink.createHFileLinkName(table, regionInfo.getEncodedName(), fileName));
}
// check if the linked file exists (in the archive, or in the table dir)
HFileLink link = null;
if (MobUtils.isMobRegionInfo(regionInfo)) {
// for mob region
link = HFileLink.buildFromHFileLinkPattern(MobUtils.getQualifiedMobRootDir(conf), HFileArchiveUtil.getArchivePath(conf), linkPath);
} else {
// not mob region
link = HFileLink.buildFromHFileLinkPattern(conf, linkPath);
}
try {
FileStatus fstat = link.getFileStatus(fs);
if (storeFile.hasFileSize() && storeFile.getFileSize() != fstat.getLen()) {
String msg = "hfile: " + fileName + " size does not match with the expected one. " + " found=" + fstat.getLen() + " expected=" + storeFile.getFileSize();
LOG.error(msg);
throw new CorruptedSnapshotException(msg, ProtobufUtil.createSnapshotDesc(snapshot));
}
} catch (FileNotFoundException e) {
String msg = "Can't find hfile: " + fileName + " in the real (" + link.getOriginPath() + ") or archive (" + link.getArchivePath() + ") directory for the primary table.";
LOG.error(msg);
throw new CorruptedSnapshotException(msg, ProtobufUtil.createSnapshotDesc(snapshot));
}
}
use of org.apache.hadoop.fs.Path in project hbase by apache.
the class TestBackupLogCleaner method getListOfWALFiles.
private List<FileStatus> getListOfWALFiles(Configuration c) throws IOException {
Path logRoot = new Path(FSUtils.getRootDir(c), HConstants.HREGION_LOGDIR_NAME);
FileSystem fs = FileSystem.get(c);
RemoteIterator<LocatedFileStatus> it = fs.listFiles(logRoot, true);
List<FileStatus> logFiles = new ArrayList<FileStatus>();
while (it.hasNext()) {
LocatedFileStatus lfs = it.next();
if (lfs.isFile() && !AbstractFSWALProvider.isMetaFile(lfs.getPath())) {
logFiles.add(lfs);
LOG.info(lfs);
}
}
return logFiles;
}
use of org.apache.hadoop.fs.Path in project hbase by apache.
the class TestHFileArchiving method testRemovesRegionDirOnArchive.
@Test
public void testRemovesRegionDirOnArchive() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
UTIL.createTable(tableName, TEST_FAM);
final Admin admin = UTIL.getAdmin();
// get the current store files for the region
List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(tableName);
// make sure we only have 1 region serving this table
assertEquals(1, servingRegions.size());
HRegion region = servingRegions.get(0);
// and load the table
UTIL.loadRegion(region, TEST_FAM);
// shutdown the table so we can manipulate the files
admin.disableTable(tableName);
FileSystem fs = UTIL.getTestFileSystem();
// now attempt to depose the region
Path rootDir = region.getRegionFileSystem().getTableDir().getParent();
Path regionDir = HRegion.getRegionDir(rootDir, region.getRegionInfo());
HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo());
// check for the existence of the archive directory and some files in it
Path archiveDir = HFileArchiveTestingUtil.getRegionArchiveDir(UTIL.getConfiguration(), region);
assertTrue(fs.exists(archiveDir));
// check to make sure the store directory was copied
// check to make sure the store directory was copied
FileStatus[] stores = fs.listStatus(archiveDir, new PathFilter() {
@Override
public boolean accept(Path p) {
if (p.getName().contains(HConstants.RECOVERED_EDITS_DIR)) {
return false;
}
return true;
}
});
assertTrue(stores.length == 1);
// make sure we archived the store files
FileStatus[] storeFiles = fs.listStatus(stores[0].getPath());
assertTrue(storeFiles.length > 0);
// then ensure the region's directory isn't present
assertFalse(fs.exists(regionDir));
UTIL.deleteTable(tableName);
}
use of org.apache.hadoop.fs.Path in project hive by apache.
the class TextMetaDataFormatter method showTableStatus.
@Override
public void showTableStatus(DataOutputStream outStream, Hive db, HiveConf conf, List<Table> tbls, Map<String, String> part, Partition par) throws HiveException {
try {
Iterator<Table> iterTables = tbls.iterator();
while (iterTables.hasNext()) {
// create a row per table name
Table tbl = iterTables.next();
String tableName = tbl.getTableName();
String tblLoc = null;
String inputFormattCls = null;
String outputFormattCls = null;
if (part != null) {
if (par != null) {
if (par.getLocation() != null) {
tblLoc = par.getDataLocation().toString();
}
inputFormattCls = par.getInputFormatClass().getName();
outputFormattCls = par.getOutputFormatClass().getName();
}
} else {
if (tbl.getPath() != null) {
tblLoc = tbl.getDataLocation().toString();
}
inputFormattCls = tbl.getInputFormatClass().getName();
outputFormattCls = tbl.getOutputFormatClass().getName();
}
String owner = tbl.getOwner();
List<FieldSchema> cols = tbl.getCols();
String ddlCols = MetaStoreUtils.getDDLFromFieldSchema("columns", cols);
boolean isPartitioned = tbl.isPartitioned();
String partitionCols = "";
if (isPartitioned) {
partitionCols = MetaStoreUtils.getDDLFromFieldSchema("partition_columns", tbl.getPartCols());
}
outStream.write(("tableName:" + tableName).getBytes("UTF-8"));
outStream.write(terminator);
outStream.write(("owner:" + owner).getBytes("UTF-8"));
outStream.write(terminator);
outStream.write(("location:" + tblLoc).getBytes("UTF-8"));
outStream.write(terminator);
outStream.write(("inputformat:" + inputFormattCls).getBytes("UTF-8"));
outStream.write(terminator);
outStream.write(("outputformat:" + outputFormattCls).getBytes("UTF-8"));
outStream.write(terminator);
outStream.write(("columns:" + ddlCols).getBytes("UTF-8"));
outStream.write(terminator);
outStream.write(("partitioned:" + isPartitioned).getBytes("UTF-8"));
outStream.write(terminator);
outStream.write(("partitionColumns:" + partitionCols).getBytes("UTF-8"));
outStream.write(terminator);
// output file system information
Path tblPath = tbl.getPath();
List<Path> locations = new ArrayList<Path>();
if (isPartitioned) {
if (par == null) {
for (Partition curPart : db.getPartitions(tbl)) {
if (curPart.getLocation() != null) {
locations.add(new Path(curPart.getLocation()));
}
}
} else {
if (par.getLocation() != null) {
locations.add(new Path(par.getLocation()));
}
}
} else {
if (tblPath != null) {
locations.add(tblPath);
}
}
if (!locations.isEmpty()) {
writeFileSystemStats(outStream, conf, locations, tblPath, false, 0);
}
outStream.write(terminator);
}
} catch (IOException e) {
throw new HiveException(e);
}
}
use of org.apache.hadoop.fs.Path in project hive by apache.
the class AbstractBucketJoinProc method getBucketFilePathsOfPartition.
public static List<String> getBucketFilePathsOfPartition(Path location, ParseContext pGraphContext) throws SemanticException {
List<String> fileNames = new ArrayList<String>();
try {
FileSystem fs = location.getFileSystem(pGraphContext.getConf());
FileStatus[] files = fs.listStatus(new Path(location.toString()), FileUtils.HIDDEN_FILES_PATH_FILTER);
if (files != null) {
for (FileStatus file : files) {
fileNames.add(file.getPath().toString());
}
}
} catch (IOException e) {
throw new SemanticException(e);
}
return fileNames;
}
Aggregations