use of org.apache.hadoop.HadoopIllegalArgumentException in project hadoop by apache.
the class RSRawDecoderLegacy method doDecode.
@Override
protected void doDecode(ByteBufferDecodingState decodingState) {
int dataLen = decodingState.decodeLength;
CoderUtil.resetOutputBuffers(decodingState.outputs, dataLen);
/**
* As passed parameters are friendly to callers but not to the underlying
* implementations, so we have to adjust them before calling doDecodeImpl.
*/
int[] erasedOrNotToReadIndexes = CoderUtil.getNullIndexes(decodingState.inputs);
ByteBuffer[] directBuffers = new ByteBuffer[getNumParityUnits()];
ByteBuffer[] adjustedDirectBufferOutputsParameter = new ByteBuffer[getNumParityUnits()];
// Use the caller passed buffers in erasedIndexes positions
for (int outputIdx = 0, i = 0; i < decodingState.erasedIndexes.length; i++) {
boolean found = false;
for (int j = 0; j < erasedOrNotToReadIndexes.length; j++) {
// we use the passed output buffer to avoid copying data thereafter.
if (decodingState.erasedIndexes[i] == erasedOrNotToReadIndexes[j]) {
found = true;
adjustedDirectBufferOutputsParameter[j] = CoderUtil.resetBuffer(decodingState.outputs[outputIdx++], dataLen);
}
}
if (!found) {
throw new HadoopIllegalArgumentException("Inputs not fully corresponding to erasedIndexes in null places");
}
}
// Use shared buffers for other positions (not set yet)
for (int bufferIdx = 0, i = 0; i < erasedOrNotToReadIndexes.length; i++) {
if (adjustedDirectBufferOutputsParameter[i] == null) {
ByteBuffer buffer = checkGetDirectBuffer(directBuffers, bufferIdx, dataLen);
buffer.position(0);
buffer.limit(dataLen);
adjustedDirectBufferOutputsParameter[i] = CoderUtil.resetBuffer(buffer, dataLen);
bufferIdx++;
}
}
doDecodeImpl(decodingState.inputs, erasedOrNotToReadIndexes, adjustedDirectBufferOutputsParameter);
}
use of org.apache.hadoop.HadoopIllegalArgumentException in project hadoop by apache.
the class LightWeightGSet method put.
@Override
public E put(final E element) {
// validate element
if (element == null) {
throw new NullPointerException("Null element is not supported.");
}
LinkedElement e = null;
try {
e = (LinkedElement) element;
} catch (ClassCastException ex) {
throw new HadoopIllegalArgumentException("!(element instanceof LinkedElement), element.getClass()=" + element.getClass());
}
// find index
final int index = getIndex(element);
// remove if it already exists
final E existing = remove(index, element);
// insert the element to the head of the linked list
modification++;
size++;
e.setNext(entries[index]);
entries[index] = e;
return existing;
}
use of org.apache.hadoop.HadoopIllegalArgumentException in project hadoop by apache.
the class DFSUtil method getSuffixIDs.
/**
* Returns nameservice Id and namenode Id when the local host matches the
* configuration parameter {@code addressKey}.<nameservice Id>.<namenode Id>
*
* @param conf Configuration
* @param addressKey configuration key corresponding to the address.
* @param knownNsId only look at configs for the given nameservice, if not-null
* @param knownNNId only look at configs for the given namenode, if not null
* @param matcher matching criteria for matching the address
* @return Array with nameservice Id and namenode Id on success. First element
* in the array is nameservice Id and second element is namenode Id.
* Null value indicates that the configuration does not have the the
* Id.
* @throws HadoopIllegalArgumentException on error
*/
static String[] getSuffixIDs(final Configuration conf, final String addressKey, String knownNsId, String knownNNId, final AddressMatcher matcher) {
String nameserviceId = null;
String namenodeId = null;
int found = 0;
Collection<String> nsIds = DFSUtilClient.getNameServiceIds(conf);
for (String nsId : DFSUtilClient.emptyAsSingletonNull(nsIds)) {
if (knownNsId != null && !knownNsId.equals(nsId)) {
continue;
}
Collection<String> nnIds = DFSUtilClient.getNameNodeIds(conf, nsId);
for (String nnId : DFSUtilClient.emptyAsSingletonNull(nnIds)) {
if (LOG.isTraceEnabled()) {
LOG.trace(String.format("addressKey: %s nsId: %s nnId: %s", addressKey, nsId, nnId));
}
if (knownNNId != null && !knownNNId.equals(nnId)) {
continue;
}
String key = addKeySuffixes(addressKey, nsId, nnId);
String addr = conf.get(key);
if (addr == null) {
continue;
}
InetSocketAddress s = null;
try {
s = NetUtils.createSocketAddr(addr);
} catch (Exception e) {
LOG.warn("Exception in creating socket address " + addr, e);
continue;
}
if (!s.isUnresolved() && matcher.match(s)) {
nameserviceId = nsId;
namenodeId = nnId;
found++;
}
}
}
if (found > 1) {
// Only one address must match the local address
String msg = "Configuration has multiple addresses that match " + "local node's address. Please configure the system with " + DFS_NAMESERVICE_ID + " and " + DFS_HA_NAMENODE_ID_KEY;
throw new HadoopIllegalArgumentException(msg);
}
return new String[] { nameserviceId, namenodeId };
}
use of org.apache.hadoop.HadoopIllegalArgumentException in project hadoop by apache.
the class FSDirTruncateOp method truncate.
/**
* Truncate a file to a given size.
*
* @param fsn namespace
* @param srcArg path name
* @param newLength the target file size
* @param clientName client name
* @param clientMachine client machine info
* @param mtime modified time
* @param toRemoveBlocks to be removed blocks
* @param pc permission checker to check fs permission
* @return tuncate result
* @throws IOException
*/
static TruncateResult truncate(final FSNamesystem fsn, final String srcArg, final long newLength, final String clientName, final String clientMachine, final long mtime, final BlocksMapUpdateInfo toRemoveBlocks, final FSPermissionChecker pc) throws IOException, UnresolvedLinkException {
assert fsn.hasWriteLock();
FSDirectory fsd = fsn.getFSDirectory();
final String src;
final INodesInPath iip;
final boolean onBlockBoundary;
Block truncateBlock = null;
fsd.writeLock();
try {
iip = fsd.resolvePath(pc, srcArg, DirOp.WRITE);
src = iip.getPath();
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.WRITE);
}
INodeFile file = INodeFile.valueOf(iip.getLastINode(), src);
// not support truncating file with striped blocks
if (file.isStriped()) {
throw new UnsupportedOperationException("Cannot truncate file with striped block " + src);
}
final BlockStoragePolicy lpPolicy = fsd.getBlockManager().getStoragePolicy("LAZY_PERSIST");
if (lpPolicy != null && lpPolicy.getId() == file.getStoragePolicyID()) {
throw new UnsupportedOperationException("Cannot truncate lazy persist file " + src);
}
// Check if the file is already being truncated with the same length
final BlockInfo last = file.getLastBlock();
if (last != null && last.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
final Block truncatedBlock = last.getUnderConstructionFeature().getTruncateBlock();
if (truncatedBlock != null) {
final long truncateLength = file.computeFileSize(false, false) + truncatedBlock.getNumBytes();
if (newLength == truncateLength) {
return new TruncateResult(false, fsd.getAuditFileInfo(iip));
}
}
}
// Opening an existing file for truncate. May need lease recovery.
fsn.recoverLeaseInternal(RecoverLeaseOp.TRUNCATE_FILE, iip, src, clientName, clientMachine, false);
// Truncate length check.
long oldLength = file.computeFileSize();
if (oldLength == newLength) {
return new TruncateResult(true, fsd.getAuditFileInfo(iip));
}
if (oldLength < newLength) {
throw new HadoopIllegalArgumentException("Cannot truncate to a larger file size. Current size: " + oldLength + ", truncate size: " + newLength + ".");
}
// Perform INodeFile truncation.
final QuotaCounts delta = new QuotaCounts.Builder().build();
onBlockBoundary = unprotectedTruncate(fsn, iip, newLength, toRemoveBlocks, mtime, delta);
if (!onBlockBoundary) {
// Open file for write, but don't log into edits
long lastBlockDelta = file.computeFileSize() - newLength;
assert lastBlockDelta > 0 : "delta is 0 only if on block bounday";
truncateBlock = prepareFileForTruncate(fsn, iip, clientName, clientMachine, lastBlockDelta, null);
}
// update the quota: use the preferred block size for UC block
fsd.updateCountNoQuotaCheck(iip, iip.length() - 1, delta);
} finally {
fsd.writeUnlock();
}
fsn.getEditLog().logTruncate(src, clientName, clientMachine, newLength, mtime, truncateBlock);
return new TruncateResult(onBlockBoundary, fsd.getAuditFileInfo(iip));
}
use of org.apache.hadoop.HadoopIllegalArgumentException in project hadoop by apache.
the class FSDirErasureCodingOp method setErasureCodingPolicy.
/**
* Set an erasure coding policy on the given path.
*
* @param fsn The namespace
* @param srcArg The path of the target directory.
* @param ecPolicyName The erasure coding policy name to set on the target
* directory.
* @param logRetryCache whether to record RPC ids in editlog for retry
* cache rebuilding
* @return {@link HdfsFileStatus}
* @throws IOException
* @throws HadoopIllegalArgumentException if the policy is not enabled
* @throws AccessControlException if the user does not have write access
*/
static HdfsFileStatus setErasureCodingPolicy(final FSNamesystem fsn, final String srcArg, final String ecPolicyName, final FSPermissionChecker pc, final boolean logRetryCache) throws IOException, AccessControlException {
assert fsn.hasWriteLock();
String src = srcArg;
FSDirectory fsd = fsn.getFSDirectory();
final INodesInPath iip;
List<XAttr> xAttrs;
fsd.writeLock();
try {
ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicyManager().getEnabledPolicyByName(ecPolicyName);
if (ecPolicy == null) {
final String sysPolicies = Arrays.asList(fsn.getErasureCodingPolicyManager().getEnabledPolicies()).stream().map(ErasureCodingPolicy::getName).collect(Collectors.joining(", "));
final String message = String.format("Policy '%s' does not match any " + "enabled erasure" + " coding policies: [%s]. The set of enabled erasure coding " + "policies can be configured at '%s'.", ecPolicyName, sysPolicies, DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY);
throw new HadoopIllegalArgumentException(message);
}
iip = fsd.resolvePath(pc, src, DirOp.WRITE_LINK);
// Write access is required to set erasure coding policy
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.WRITE);
}
src = iip.getPath();
xAttrs = setErasureCodingPolicyXAttr(fsn, iip, ecPolicy);
} finally {
fsd.writeUnlock();
}
fsn.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
return fsd.getAuditFileInfo(iip);
}
Aggregations