use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class TestStripedINodeFile method testUnsuitableStoragePoliciesWithECStripedMode.
/**
* Tests when choosing blocks on file creation of EC striped mode should
* ignore storage policy if that is not suitable. Supported storage policies
* for EC Striped mode are HOT, COLD and ALL_SSD. For all other policies set
* will be ignored and considered default policy.
*/
@Test(timeout = 60000)
public void testUnsuitableStoragePoliciesWithECStripedMode() throws Exception {
final Configuration conf = new HdfsConfiguration();
int defaultStripedBlockSize = testECPolicy.getCellSize() * 4;
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultStripedBlockSize);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1L);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false);
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
// start 10 datanodes
int numOfDatanodes = 10;
int storagesPerDatanode = 2;
long capacity = 10 * defaultStripedBlockSize;
long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
for (int i = 0; i < numOfDatanodes; i++) {
for (int j = 0; j < storagesPerDatanode; j++) {
capacities[i][j] = capacity;
}
}
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numOfDatanodes).storagesPerDatanode(storagesPerDatanode).storageTypes(new StorageType[][] { { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.DISK, StorageType.SSD }, { StorageType.DISK, StorageType.SSD }, { StorageType.DISK, StorageType.SSD }, { StorageType.DISK, StorageType.SSD }, { StorageType.DISK, StorageType.SSD } }).storageCapacities(capacities).build();
try {
cluster.waitActive();
// set "/foo" directory with ONE_SSD storage policy.
ClientProtocol client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
String fooDir = "/foo";
client.mkdirs(fooDir, new FsPermission((short) 777), true);
client.setStoragePolicy(fooDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
// set an EC policy on "/foo" directory
client.setErasureCodingPolicy(fooDir, StripedFileTestUtil.getDefaultECPolicy().getName());
// write file to fooDir
final String barFile = "/foo/bar";
long fileLen = 20 * defaultStripedBlockSize;
DFSTestUtil.createFile(cluster.getFileSystem(), new Path(barFile), fileLen, (short) 3, 0);
// verify storage types and locations
LocatedBlocks locatedBlocks = client.getBlockLocations(barFile, 0, fileLen);
for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
for (StorageType type : lb.getStorageTypes()) {
Assert.assertEquals(StorageType.DISK, type);
}
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class TestStripedBlockUtil method createDummyLocatedBlock.
private LocatedStripedBlock createDummyLocatedBlock(int bgSize) {
final long blockGroupID = -1048576;
DatanodeInfo[] locs = new DatanodeInfo[groupSize];
String[] storageIDs = new String[groupSize];
StorageType[] storageTypes = new StorageType[groupSize];
byte[] indices = new byte[groupSize];
for (int i = 0; i < groupSize; i++) {
indices[i] = (byte) ((i + 2) % dataBlocks);
// Location port always equal to logical index of a block,
// for easier verification
locs[i] = DFSTestUtil.getLocalDatanodeInfo(indices[i]);
storageIDs[i] = locs[i].getDatanodeUuid();
storageTypes[i] = StorageType.DISK;
}
return new LocatedStripedBlock(new ExtendedBlock("pool", blockGroupID, bgSize, 1001), locs, storageIDs, storageTypes, indices, 0, false, null);
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class DFSInputStream method blockSeekTo.
/**
* Open a DataInputStream to a DataNode so that it can be read from.
* We get block ID and the IDs of the destinations at startup, from the namenode.
*/
private synchronized DatanodeInfo blockSeekTo(long target) throws IOException {
if (target >= getFileLength()) {
throw new IOException("Attempted to read past end of file");
}
// Will be getting a new BlockReader.
closeCurrentBlockReaders();
//
// Connect to best DataNode for desired Block, with potential offset
//
DatanodeInfo chosenNode;
// only need to get a new access token once
int refetchToken = 1;
// only need to get a new encryption key once
int refetchEncryptionKey = 1;
boolean connectFailedOnce = false;
while (true) {
//
// Compute desired block
//
LocatedBlock targetBlock = getBlockAt(target);
// update current position
this.pos = target;
this.blockEnd = targetBlock.getStartOffset() + targetBlock.getBlockSize() - 1;
this.currentLocatedBlock = targetBlock;
long offsetIntoBlock = target - targetBlock.getStartOffset();
DNAddrPair retval = chooseDataNode(targetBlock, null);
chosenNode = retval.info;
InetSocketAddress targetAddr = retval.addr;
StorageType storageType = retval.storageType;
try {
blockReader = getBlockReader(targetBlock, offsetIntoBlock, targetBlock.getBlockSize() - offsetIntoBlock, targetAddr, storageType, chosenNode);
if (connectFailedOnce) {
DFSClient.LOG.info("Successfully connected to " + targetAddr + " for " + targetBlock.getBlock());
}
return chosenNode;
} catch (IOException ex) {
checkInterrupted(ex);
if (ex instanceof InvalidEncryptionKeyException && refetchEncryptionKey > 0) {
DFSClient.LOG.info("Will fetch a new encryption key and retry, " + "encryption key was invalid when connecting to " + targetAddr + " : " + ex);
// The encryption key used is invalid.
refetchEncryptionKey--;
dfsClient.clearDataEncryptionKey();
} else if (refetchToken > 0 && tokenRefetchNeeded(ex, targetAddr)) {
refetchToken--;
fetchBlockAt(target);
} else {
connectFailedOnce = true;
DFSClient.LOG.warn("Failed to connect to " + targetAddr + " for block" + ", add to deadNodes and continue. " + ex, ex);
// Put chosen node into dead list, continue
addToDeadNodes(chosenNode);
}
}
}
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class PBHelperClient method getBuilder.
private static HdfsProtos.StorageTypeQuotaInfosProto.Builder getBuilder(QuotaUsage qu) {
HdfsProtos.StorageTypeQuotaInfosProto.Builder isb = HdfsProtos.StorageTypeQuotaInfosProto.newBuilder();
for (StorageType t : StorageType.getTypesSupportingQuota()) {
HdfsProtos.StorageTypeQuotaInfoProto info = HdfsProtos.StorageTypeQuotaInfoProto.newBuilder().setType(convertStorageType(t)).setConsumed(qu.getTypeConsumed(t)).setQuota(qu.getTypeQuota(t)).build();
isb.addTypeQuotaInfo(info);
}
return isb;
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class StripedBlockUtil method constructInternalBlock.
/**
* This method creates an internal block at the given index of a block group.
*
* @param idxInReturnedLocs The index in the stored locations in the
* {@link LocatedStripedBlock} object
* @param idxInBlockGroup The logical index in the striped block group
* @return The constructed internal block
*/
public static LocatedBlock constructInternalBlock(LocatedStripedBlock bg, int idxInReturnedLocs, int cellSize, int dataBlkNum, int idxInBlockGroup) {
final ExtendedBlock blk = constructInternalBlock(bg.getBlock(), cellSize, dataBlkNum, idxInBlockGroup);
final LocatedBlock locatedBlock;
if (idxInReturnedLocs < bg.getLocations().length) {
locatedBlock = new LocatedBlock(blk, new DatanodeInfo[] { bg.getLocations()[idxInReturnedLocs] }, new String[] { bg.getStorageIDs()[idxInReturnedLocs] }, new StorageType[] { bg.getStorageTypes()[idxInReturnedLocs] }, bg.getStartOffset(), bg.isCorrupt(), null);
} else {
locatedBlock = new LocatedBlock(blk, null, null, null, bg.getStartOffset(), bg.isCorrupt(), null);
}
Token<BlockTokenIdentifier>[] blockTokens = bg.getBlockTokens();
if (idxInReturnedLocs < blockTokens.length) {
locatedBlock.setBlockToken(blockTokens[idxInReturnedLocs]);
}
return locatedBlock;
}
Aggregations