use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project SSM by Intel-bigdata.
the class DBAdapter method getFile.
public HdfsFileStatus getFile(long fid) throws SQLException {
String sql = "SELECT * FROM files WHERE fid = " + fid;
QueryHelper queryHelper = new QueryHelper(sql);
try {
ResultSet result = queryHelper.executeQuery();
List<HdfsFileStatus> ret = convertFilesTableItem(result);
return ret.size() > 0 ? ret.get(0) : null;
} finally {
queryHelper.close();
}
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hbase by apache.
the class TestBlockReorder method testHBaseCluster.
/**
* Test that the hook works within HBase, including when there are multiple blocks.
*/
@Test()
public void testHBaseCluster() throws Exception {
byte[] sb = "sb".getBytes();
htu.startMiniZKCluster();
MiniHBaseCluster hbm = htu.startMiniHBaseCluster(1, 1);
hbm.waitForActiveAndReadyMaster();
HRegionServer targetRs = hbm.getMaster();
// We want to have a datanode with the same name as the region server, so
// we're going to get the regionservername, and start a new datanode with this name.
String host4 = targetRs.getServerName().getHostname();
LOG.info("Starting a new datanode with the name=" + host4);
cluster.startDataNodes(conf, 1, true, null, new String[] { "/r4" }, new String[] { host4 }, null);
cluster.waitClusterUp();
final int repCount = 3;
// We use the regionserver file system & conf as we expect it to have the hook.
conf = targetRs.getConfiguration();
HFileSystem rfs = (HFileSystem) targetRs.getFileSystem();
Table h = htu.createTable(TableName.valueOf(name.getMethodName()), sb);
// Now, we have 4 datanodes and a replication count of 3. So we don't know if the datanode
// with the same node will be used. We can't really stop an existing datanode, this would
// make us fall in nasty hdfs bugs/issues. So we're going to try multiple times.
// Now we need to find the log file, its locations, and look at it
String rootDir = new Path(FSUtils.getRootDir(conf) + "/" + HConstants.HREGION_LOGDIR_NAME + "/" + targetRs.getServerName().toString()).toUri().getPath();
DistributedFileSystem mdfs = (DistributedFileSystem) hbm.getMaster().getMasterFileSystem().getFileSystem();
int nbTest = 0;
while (nbTest < 10) {
final List<Region> regions = targetRs.getOnlineRegions(h.getName());
final CountDownLatch latch = new CountDownLatch(regions.size());
// listen for successful log rolls
final WALActionsListener listener = new WALActionsListener.Base() {
@Override
public void postLogRoll(final Path oldPath, final Path newPath) throws IOException {
latch.countDown();
}
};
for (Region region : regions) {
((HRegion) region).getWAL().registerWALActionsListener(listener);
}
htu.getAdmin().rollWALWriter(targetRs.getServerName());
// wait
try {
latch.await();
} catch (InterruptedException exception) {
LOG.warn("Interrupted while waiting for the wal of '" + targetRs + "' to roll. If later " + "tests fail, it's probably because we should still be waiting.");
Thread.currentThread().interrupt();
}
for (Region region : regions) {
((HRegion) region).getWAL().unregisterWALActionsListener(listener);
}
// We need a sleep as the namenode is informed asynchronously
Thread.sleep(100);
// insert one put to ensure a minimal size
Put p = new Put(sb);
p.addColumn(sb, sb, sb);
h.put(p);
DirectoryListing dl = dfs.getClient().listPaths(rootDir, HdfsFileStatus.EMPTY_NAME);
HdfsFileStatus[] hfs = dl.getPartialListing();
// As we wrote a put, we should have at least one log file.
Assert.assertTrue(hfs.length >= 1);
for (HdfsFileStatus hf : hfs) {
// Because this is a live cluster, log files might get archived while we're processing
try {
LOG.info("Log file found: " + hf.getLocalName() + " in " + rootDir);
String logFile = rootDir + "/" + hf.getLocalName();
FileStatus fsLog = rfs.getFileStatus(new Path(logFile));
LOG.info("Checking log file: " + logFile);
// Now checking that the hook is up and running
// We can't call directly getBlockLocations, it's not available in HFileSystem
// We're trying multiple times to be sure, as the order is random
BlockLocation[] bls = rfs.getFileBlockLocations(fsLog, 0, 1);
if (bls.length > 0) {
BlockLocation bl = bls[0];
LOG.info(bl.getHosts().length + " replicas for block 0 in " + logFile + " ");
for (int i = 0; i < bl.getHosts().length - 1; i++) {
LOG.info(bl.getHosts()[i] + " " + logFile);
Assert.assertNotSame(bl.getHosts()[i], host4);
}
String last = bl.getHosts()[bl.getHosts().length - 1];
LOG.info(last + " " + logFile);
if (host4.equals(last)) {
nbTest++;
LOG.info(logFile + " is on the new datanode and is ok");
if (bl.getHosts().length == 3) {
// We can test this case from the file system as well
// Checking the underlying file system. Multiple times as the order is random
testFromDFS(dfs, logFile, repCount, host4);
// now from the master
testFromDFS(mdfs, logFile, repCount, host4);
}
}
}
} catch (FileNotFoundException exception) {
LOG.debug("Failed to find log file '" + hf.getLocalName() + "'; it probably was " + "archived out from under us so we'll ignore and retry. If this test hangs " + "indefinitely you should treat this failure as a symptom.", exception);
} catch (RemoteException exception) {
if (exception.unwrapRemoteException() instanceof FileNotFoundException) {
LOG.debug("Failed to find log file '" + hf.getLocalName() + "'; it probably was " + "archived out from under us so we'll ignore and retry. If this test hangs " + "indefinitely you should treat this failure as a symptom.", exception);
} else {
throw exception;
}
}
}
}
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hbase by apache.
the class FanOutOneBlockAsyncDFSOutputHelper method createOutput.
private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src, boolean overwrite, boolean createParent, short replication, long blockSize, EventLoop eventLoop) throws IOException {
Configuration conf = dfs.getConf();
FSUtils fsUtils = FSUtils.getInstance(dfs, conf);
DFSClient client = dfs.getClient();
String clientName = client.getClientName();
ClientProtocol namenode = client.getNamenode();
HdfsFileStatus stat;
try {
stat = namenode.create(src, FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(conf)), clientName, new EnumSetWritable<>(overwrite ? EnumSet.of(CREATE, OVERWRITE) : EnumSet.of(CREATE)), createParent, replication, blockSize, CryptoProtocolVersion.supported());
} catch (Exception e) {
if (e instanceof RemoteException) {
throw (RemoteException) e;
} else {
throw new NameNodeException(e);
}
}
beginFileLease(client, stat.getFileId());
boolean succ = false;
LocatedBlock locatedBlock = null;
List<Future<Channel>> futureList = null;
try {
DataChecksum summer = createChecksum(client);
locatedBlock = BLOCK_ADDER.addBlock(namenode, src, client.getClientName(), null, null, stat.getFileId(), null);
List<Channel> datanodeList = new ArrayList<>();
futureList = connectToDataNodes(conf, client, clientName, locatedBlock, 0L, 0L, PIPELINE_SETUP_CREATE, summer, eventLoop);
for (Future<Channel> future : futureList) {
// fail the creation if there are connection failures since we are fail-fast. The upper
// layer should retry itself if needed.
datanodeList.add(future.syncUninterruptibly().getNow());
}
Encryptor encryptor = createEncryptor(conf, stat, client);
FanOutOneBlockAsyncDFSOutput output = new FanOutOneBlockAsyncDFSOutput(conf, fsUtils, dfs, client, namenode, clientName, src, stat.getFileId(), locatedBlock, encryptor, eventLoop, datanodeList, summer, ALLOC);
succ = true;
return output;
} finally {
if (!succ) {
if (futureList != null) {
for (Future<Channel> f : futureList) {
f.addListener(new FutureListener<Channel>() {
@Override
public void operationComplete(Future<Channel> future) throws Exception {
if (future.isSuccess()) {
future.getNow().close();
}
}
});
}
}
endFileLease(client, stat.getFileId());
fsUtils.recoverFileLease(dfs, new Path(src), conf, new CancelOnClose(client));
}
}
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hbase by apache.
the class HFileSystem method getStoragePolicyForOldHDFSVersion.
/**
* Before Hadoop 2.8.0, there's no getStoragePolicy method for FileSystem interface, and we need
* to keep compatible with it. See HADOOP-12161 for more details.
* @param path Path to get storage policy against
* @return the storage policy name
*/
private String getStoragePolicyForOldHDFSVersion(Path path) {
try {
if (this.fs instanceof DistributedFileSystem) {
DistributedFileSystem dfs = (DistributedFileSystem) this.fs;
HdfsFileStatus status = dfs.getClient().getFileInfo(path.toUri().getPath());
if (null != status) {
if (unspecifiedStoragePolicyId < 0) {
// Get the unspecified id field through reflection to avoid compilation error.
// In later version BlockStoragePolicySuite#ID_UNSPECIFIED is moved to
// HdfsConstants#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED
Field idUnspecified = BlockStoragePolicySuite.class.getField("ID_UNSPECIFIED");
unspecifiedStoragePolicyId = idUnspecified.getByte(BlockStoragePolicySuite.class);
}
byte storagePolicyId = status.getStoragePolicy();
if (storagePolicyId != unspecifiedStoragePolicyId) {
BlockStoragePolicy[] policies = dfs.getStoragePolicies();
for (BlockStoragePolicy policy : policies) {
if (policy.getId() == storagePolicyId) {
return policy.getName();
}
}
}
}
}
} catch (Throwable e) {
LOG.warn("failed to get block storage policy of [" + path + "]", e);
}
return null;
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class DistributedFileSystem method getLinkTarget.
@Override
public Path getLinkTarget(final Path f) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_LINK_TARGET);
final Path absF = fixRelativePart(f);
return new FileSystemLinkResolver<Path>() {
@Override
public Path doCall(final Path p) throws IOException {
HdfsFileStatus fi = dfs.getFileLinkInfo(getPathName(p));
if (fi != null) {
return fi.makeQualified(getUri(), p).getSymlink();
} else {
throw new FileNotFoundException("File does not exist: " + p);
}
}
@Override
public Path next(final FileSystem fs, final Path p) throws IOException {
return fs.getLinkTarget(p);
}
}.resolve(this, absF);
}
Aggregations