use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.
the class TestDiskError method testReplicationError.
/**
* Test that when there is a failure replicating a block the temporary
* and meta files are cleaned up and subsequent replication succeeds.
*/
@Test
public void testReplicationError() throws Exception {
// create a file of replication factor of 1
final Path fileName = new Path("/test.txt");
final int fileLen = 1;
DFSTestUtil.createFile(fs, fileName, 1, (short) 1, 1L);
DFSTestUtil.waitReplication(fs, fileName, (short) 1);
// get the block belonged to the created file
LocatedBlocks blocks = NameNodeAdapter.getBlockLocations(cluster.getNameNode(), fileName.toString(), 0, (long) fileLen);
assertEquals("Should only find 1 block", blocks.locatedBlockCount(), 1);
LocatedBlock block = blocks.get(0);
// bring up a second datanode
cluster.startDataNodes(conf, 1, true, null, null);
cluster.waitActive();
final int sndNode = 1;
DataNode datanode = cluster.getDataNodes().get(sndNode);
FsDatasetTestUtils utils = cluster.getFsDatasetTestUtils(datanode);
// replicate the block to the second datanode
InetSocketAddress target = datanode.getXferAddress();
Socket s = new Socket(target.getAddress(), target.getPort());
// write the header.
DataOutputStream out = new DataOutputStream(s.getOutputStream());
DataChecksum checksum = DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512);
new Sender(out).writeBlock(block.getBlock(), StorageType.DEFAULT, BlockTokenSecretManager.DUMMY_TOKEN, "", new DatanodeInfo[0], new StorageType[0], null, BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L, checksum, CachingStrategy.newDefaultStrategy(), false, false, null);
out.flush();
// close the connection before sending the content of the block
out.close();
// the temporary block & meta files should be deleted
String bpid = cluster.getNamesystem().getBlockPoolId();
while (utils.getStoredReplicas(bpid).hasNext()) {
Thread.sleep(100);
}
// then increase the file's replication factor
fs.setReplication(fileName, (short) 2);
// replication should succeed
DFSTestUtil.waitReplication(fs, fileName, (short) 1);
// clean up the file
fs.delete(fileName, false);
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project SSM by Intel-bigdata.
the class TestAllSsdFileAction method testAllSsd.
@Test
public void testAllSsd() throws Exception {
final String file = "/testAllSsd/file";
Path dir = new Path("/testAllSsd");
dfs.mkdirs(dir);
// write to DISK
dfs.setStoragePolicy(dir, "HOT");
final FSDataOutputStream out = dfs.create(new Path(file));
out.writeChars("testAllSSD");
out.close();
// schedule move to SSD
AllSsdFileAction action = new AllSsdFileAction();
action.setDfsClient(dfsClient);
action.setContext(smartContext);
action.init(file);
ActionStatus status = action.getActionStatus();
action.run();
while (!status.isFinished()) {
System.out.println("Mover running time : " + StringUtils.formatTime(status.getRunningTime()));
Thread.sleep(1000);
}
// verify after movement
Assert.assertTrue(status.isSuccessful());
LocatedBlock lb = dfsClient.getLocatedBlocks(file, 0).get(0);
StorageType[] storageTypes = lb.getStorageTypes();
for (StorageType storageType : storageTypes) {
Assert.assertTrue(StorageType.SSD == storageType);
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project SSM by Intel-bigdata.
the class TestOneSsdFileAction method testAllSsd.
@Test
public void testAllSsd() throws Exception {
final String file = "/testArchive/file";
Path dir = new Path("/testArchive");
dfs.mkdirs(dir);
// write to DISK
dfs.setStoragePolicy(dir, "HOT");
final FSDataOutputStream out = dfs.create(new Path(file));
out.writeChars("testArchive");
out.close();
// schedule move to Archive
OneSsdFileAction action = new OneSsdFileAction();
action.setDfsClient(dfsClient);
action.setContext(smartContext);
action.init(file);
ActionStatus status = action.getActionStatus();
action.run();
while (!status.isFinished()) {
System.out.println("Mover running time : " + StringUtils.formatTime(status.getRunningTime()));
Thread.sleep(1000);
}
// verify after movement
Assert.assertTrue(status.isSuccessful());
LocatedBlock lb = dfsClient.getLocatedBlocks(file, 0).get(0);
StorageType[] storageTypes = lb.getStorageTypes();
int ssdCount = 0;
int hddCount = 0;
for (StorageType storageType : storageTypes) {
if (storageType == StorageType.SSD) {
ssdCount++;
} else if (storageType == StorageType.DISK) {
hddCount++;
}
}
Assert.assertEquals(1, ssdCount);
Assert.assertEquals(2, hddCount);
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hbase by apache.
the class FanOutOneBlockAsyncDFSOutputHelper method createOutput.
private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src, boolean overwrite, boolean createParent, short replication, long blockSize, EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass, StreamSlowMonitor monitor) throws IOException {
Configuration conf = dfs.getConf();
DFSClient client = dfs.getClient();
String clientName = client.getClientName();
ClientProtocol namenode = client.getNamenode();
int createMaxRetries = conf.getInt(ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES, DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES);
ExcludeDatanodeManager excludeDatanodeManager = monitor.getExcludeDatanodeManager();
Set<DatanodeInfo> toExcludeNodes = new HashSet<>(excludeDatanodeManager.getExcludeDNs().keySet());
for (int retry = 0; ; retry++) {
LOG.debug("When create output stream for {}, exclude list is {}, retry={}", src, toExcludeNodes, retry);
HdfsFileStatus stat;
try {
stat = FILE_CREATOR.create(namenode, src, FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(conf)), clientName, getCreateFlags(overwrite), createParent, replication, blockSize, CryptoProtocolVersion.supported());
} catch (Exception e) {
if (e instanceof RemoteException) {
throw (RemoteException) e;
} else {
throw new NameNodeException(e);
}
}
beginFileLease(client, stat.getFileId());
boolean succ = false;
LocatedBlock locatedBlock = null;
List<Future<Channel>> futureList = null;
try {
DataChecksum summer = createChecksum(client);
locatedBlock = namenode.addBlock(src, client.getClientName(), null, toExcludeNodes.toArray(new DatanodeInfo[0]), stat.getFileId(), null, null);
Map<Channel, DatanodeInfo> datanodes = new IdentityHashMap<>();
futureList = connectToDataNodes(conf, client, clientName, locatedBlock, 0L, 0L, PIPELINE_SETUP_CREATE, summer, eventLoopGroup, channelClass);
for (int i = 0, n = futureList.size(); i < n; i++) {
DatanodeInfo datanodeInfo = locatedBlock.getLocations()[i];
try {
datanodes.put(futureList.get(i).syncUninterruptibly().getNow(), datanodeInfo);
} catch (Exception e) {
// exclude the broken DN next time
toExcludeNodes.add(datanodeInfo);
excludeDatanodeManager.tryAddExcludeDN(datanodeInfo, "connect error");
throw e;
}
}
Encryptor encryptor = createEncryptor(conf, stat, client);
FanOutOneBlockAsyncDFSOutput output = new FanOutOneBlockAsyncDFSOutput(conf, dfs, client, namenode, clientName, src, stat.getFileId(), locatedBlock, encryptor, datanodes, summer, ALLOC, monitor);
succ = true;
return output;
} catch (RemoteException e) {
LOG.warn("create fan-out dfs output {} failed, retry = {}", src, retry, e);
if (shouldRetryCreate(e)) {
if (retry >= createMaxRetries) {
throw e.unwrapRemoteException();
}
} else {
throw e.unwrapRemoteException();
}
} catch (IOException e) {
LOG.warn("create fan-out dfs output {} failed, retry = {}", src, retry, e);
if (retry >= createMaxRetries) {
throw e;
}
// overwrite the old broken file.
overwrite = true;
try {
Thread.sleep(ConnectionUtils.getPauseTime(100, retry));
} catch (InterruptedException ie) {
throw new InterruptedIOException();
}
} finally {
if (!succ) {
if (futureList != null) {
for (Future<Channel> f : futureList) {
f.addListener(new FutureListener<Channel>() {
@Override
public void operationComplete(Future<Channel> future) throws Exception {
if (future.isSuccess()) {
future.getNow().close();
}
}
});
}
}
endFileLease(client, stat.getFileId());
}
}
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hbase by apache.
the class FSUtils method computeHDFSBlocksDistribution.
/**
* Compute HDFS block distribution of a given HdfsDataInputStream. All HdfsDataInputStreams
* are backed by a series of LocatedBlocks, which are fetched periodically from the namenode.
* This method retrieves those blocks from the input stream and uses them to calculate
* HDFSBlockDistribution.
*
* The underlying method in DFSInputStream does attempt to use locally cached blocks, but
* may hit the namenode if the cache is determined to be incomplete. The method also involves
* making copies of all LocatedBlocks rather than return the underlying blocks themselves.
*/
public static HDFSBlocksDistribution computeHDFSBlocksDistribution(HdfsDataInputStream inputStream) throws IOException {
List<LocatedBlock> blocks = inputStream.getAllBlocks();
HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
for (LocatedBlock block : blocks) {
String[] hosts = getHostsForLocations(block);
long len = block.getBlockSize();
StorageType[] storageTypes = block.getStorageTypes();
blocksDistribution.addHostsAndBlockWeight(hosts, len, storageTypes);
}
return blocksDistribution;
}
Aggregations