use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.
the class ReplChangeManager method getFileStatus.
/***
* Get original file specified by src and chksumString. If the file exists and checksum
* matches, return the file; otherwise, use chksumString to retrieve it from cmroot
* @param src Original file location
* @param chksumString Checksum of the original file
* @param conf
* @return Corresponding FileStatus object
* @throws MetaException
*/
public static FileStatus getFileStatus(Path src, String chksumString, HiveConf conf) throws MetaException {
try {
FileSystem srcFs = src.getFileSystem(conf);
if (chksumString == null) {
return srcFs.getFileStatus(src);
}
if (!srcFs.exists(src)) {
return srcFs.getFileStatus(getCMPath(src, conf, chksumString));
}
String currentChksumString = getChksumString(src, srcFs);
if (currentChksumString == null || chksumString.equals(currentChksumString)) {
return srcFs.getFileStatus(src);
} else {
return srcFs.getFileStatus(getCMPath(src, conf, chksumString));
}
} catch (IOException e) {
throw new MetaException(StringUtils.stringifyException(e));
}
}
use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.
the class ReplChangeManager method recycle.
/***
* Move a path into cmroot. If the path is a directory (of a partition, or table if nonpartitioned),
* recursively move files inside directory to cmroot. Note the table must be managed table
* @param path a single file or directory
* @param ifPurge if the file should skip Trash when delete
* @return
* @throws MetaException
*/
public int recycle(Path path, boolean ifPurge) throws MetaException {
if (!enabled) {
return 0;
}
try {
int count = 0;
if (fs.isDirectory(path)) {
FileStatus[] files = fs.listStatus(path, hiddenFileFilter);
for (FileStatus file : files) {
count += recycle(file.getPath(), ifPurge);
}
} else {
Path cmPath = getCMPath(path, hiveConf, getChksumString(path, fs));
if (LOG.isDebugEnabled()) {
LOG.debug("Moving " + path.toString() + " to " + cmPath.toString());
}
// set timestamp before moving to cmroot, so we can
// avoid race condition CM remove the file before setting
// timestamp
long now = System.currentTimeMillis();
fs.setTimes(path, now, now);
boolean succ = fs.rename(path, cmPath);
// We might want to setXAttr for the new location in the future
if (!succ) {
if (LOG.isDebugEnabled()) {
LOG.debug("A file with the same content of " + path.toString() + " already exists, ignore");
}
// Need to extend the tenancy if we saw a newer file with the same content
fs.setTimes(cmPath, now, now);
} else {
// set the file owner to hive (or the id metastore run as)
fs.setOwner(cmPath, msUser, msGroup);
// locations if orig-loc becomes important
try {
fs.setXAttr(cmPath, ORIG_LOC_TAG, path.toString().getBytes());
} catch (UnsupportedOperationException e) {
LOG.warn("Error setting xattr for " + path.toString());
}
count++;
}
// any file claim remain in trash would be granted
if (!ifPurge) {
try {
fs.setXAttr(cmPath, REMAIN_IN_TRASH_TAG, new byte[] { 0 });
} catch (UnsupportedOperationException e) {
LOG.warn("Error setting xattr for " + cmPath.toString());
}
}
}
return count;
} catch (IOException e) {
throw new MetaException(StringUtils.stringifyException(e));
}
}
use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.
the class HBaseStore method getFileMetadataByExpr.
@Override
public void getFileMetadataByExpr(List<Long> fileIds, FileMetadataExprType type, byte[] expr, ByteBuffer[] metadatas, ByteBuffer[] results, boolean[] eliminated) throws MetaException {
FileMetadataHandler fmh = fmHandlers.get(type);
boolean commit = true;
try {
fmh.getFileMetadataByExpr(fileIds, expr, metadatas, results, eliminated);
} catch (IOException e) {
LOG.error("Unable to get file metadata by expr", e);
commit = false;
throw new MetaException("Error reading file metadata by expr" + e.getMessage());
} finally {
commitOrRoleBack(commit);
}
}
use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.
the class HBaseStore method getPartitions.
@Override
public List<Partition> getPartitions(String dbName, String tableName, int max) throws MetaException, NoSuchObjectException {
boolean commit = false;
openTransaction();
try {
List<Partition> parts = getHBase().scanPartitionsInTable(HiveStringUtils.normalizeIdentifier(dbName), HiveStringUtils.normalizeIdentifier(tableName), max);
commit = true;
return parts;
} catch (IOException e) {
LOG.error("Unable to get partitions", e);
throw new MetaException("Error scanning partitions");
} finally {
commitOrRoleBack(commit);
}
}
use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.
the class HBaseStore method listPartitionNames.
@Override
public List<String> listPartitionNames(String db_name, String tbl_name, short max_parts) throws MetaException {
boolean commit = false;
openTransaction();
try {
List<Partition> parts = getHBase().scanPartitionsInTable(HiveStringUtils.normalizeIdentifier(db_name), HiveStringUtils.normalizeIdentifier(tbl_name), max_parts);
if (parts == null)
return null;
List<String> names = new ArrayList<String>(parts.size());
Table table = getHBase().getTable(HiveStringUtils.normalizeIdentifier(db_name), HiveStringUtils.normalizeIdentifier(tbl_name));
for (Partition p : parts) {
names.add(buildExternalPartName(table, p));
}
commit = true;
return names;
} catch (IOException e) {
LOG.error("Unable to get partitions", e);
throw new MetaException("Error scanning partitions");
} finally {
commitOrRoleBack(commit);
}
}
Aggregations