use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.
the class DatanodeProtocolClientSideTranslatorPB method blockReport.
@Override
public DatanodeCommand blockReport(DatanodeRegistration registration, String poolId, StorageBlockReport[] reports, BlockReportContext context) throws IOException {
BlockReportRequestProto.Builder builder = BlockReportRequestProto.newBuilder().setRegistration(PBHelper.convert(registration)).setBlockPoolId(poolId);
boolean useBlocksBuffer = registration.getNamespaceInfo().isCapabilitySupported(Capability.STORAGE_BLOCK_REPORT_BUFFERS);
for (StorageBlockReport r : reports) {
StorageBlockReportProto.Builder reportBuilder = StorageBlockReportProto.newBuilder().setStorage(PBHelperClient.convert(r.getStorage()));
BlockListAsLongs blocks = r.getBlocks();
if (useBlocksBuffer) {
reportBuilder.setNumberOfBlocks(blocks.getNumberOfBlocks());
reportBuilder.addAllBlocksBuffers(blocks.getBlocksBuffers());
} else {
for (long value : blocks.getBlockListAsLongs()) {
reportBuilder.addBlocks(value);
}
}
builder.addReports(reportBuilder.build());
}
builder.setContext(PBHelper.convert(context));
BlockReportResponseProto resp;
try {
resp = rpcProxy.blockReport(NULL_CONTROLLER, builder.build());
} catch (ServiceException se) {
throw ProtobufHelper.getRemoteException(se);
}
return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
}
use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.
the class DatanodeProtocolServerSideTranslatorPB method blockReport.
@Override
public BlockReportResponseProto blockReport(RpcController controller, BlockReportRequestProto request) throws ServiceException {
DatanodeCommand cmd = null;
StorageBlockReport[] report = new StorageBlockReport[request.getReportsCount()];
int index = 0;
for (StorageBlockReportProto s : request.getReportsList()) {
final BlockListAsLongs blocks;
if (s.hasNumberOfBlocks()) {
// new style buffer based reports
int num = (int) s.getNumberOfBlocks();
Preconditions.checkState(s.getBlocksCount() == 0, "cannot send both blocks list and buffers");
blocks = BlockListAsLongs.decodeBuffers(num, s.getBlocksBuffersList(), maxDataLength);
} else {
blocks = BlockListAsLongs.decodeLongs(s.getBlocksList(), maxDataLength);
}
report[index++] = new StorageBlockReport(PBHelperClient.convert(s.getStorage()), blocks);
}
try {
cmd = impl.blockReport(PBHelper.convert(request.getRegistration()), request.getBlockPoolId(), report, request.hasContext() ? PBHelper.convert(request.getContext()) : null);
} catch (IOException e) {
throw new ServiceException(e);
}
BlockReportResponseProto.Builder builder = BlockReportResponseProto.newBuilder();
if (cmd != null) {
builder.setCmd(PBHelper.convert(cmd));
}
return builder.build();
}
use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.
the class FsDatasetImpl method shutdownBlockPool.
@Override
public void shutdownBlockPool(String bpid) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
LOG.info("Removing block pool " + bpid);
Map<DatanodeStorage, BlockListAsLongs> blocksPerVolume = getBlockReports(bpid);
volumeMap.cleanUpBlockPool(bpid);
volumes.removeBlockPool(bpid, blocksPerVolume);
}
}
use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.
the class FsDatasetImpl method getBlockReports.
@Override
public Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid) {
Map<DatanodeStorage, BlockListAsLongs> blockReportsMap = new HashMap<DatanodeStorage, BlockListAsLongs>();
Map<String, BlockListAsLongs.Builder> builders = new HashMap<String, BlockListAsLongs.Builder>();
List<FsVolumeImpl> curVolumes = null;
try (AutoCloseableLock lock = datasetLock.acquire()) {
curVolumes = volumes.getVolumes();
for (FsVolumeSpi v : curVolumes) {
builders.put(v.getStorageID(), BlockListAsLongs.builder(maxDataLength));
}
Set<String> missingVolumesReported = new HashSet<>();
for (ReplicaInfo b : volumeMap.replicas(bpid)) {
String volStorageID = b.getVolume().getStorageID();
if (!builders.containsKey(volStorageID)) {
if (!missingVolumesReported.contains(volStorageID)) {
LOG.warn("Storage volume: " + volStorageID + " missing for the" + " replica block: " + b + ". Probably being removed!");
missingVolumesReported.add(volStorageID);
}
continue;
}
switch(b.getState()) {
case FINALIZED:
case RBW:
case RWR:
builders.get(b.getVolume().getStorageID()).add(b);
break;
case RUR:
ReplicaInfo orig = b.getOriginalReplica();
builders.get(b.getVolume().getStorageID()).add(orig);
break;
case TEMPORARY:
break;
default:
assert false : "Illegal ReplicaInfo state.";
}
}
}
for (FsVolumeImpl v : curVolumes) {
blockReportsMap.put(v.toDatanodeStorage(), builders.get(v.getStorageID()).build());
}
return blockReportsMap;
}
use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.
the class BlockPoolSlice method readReplicasFromCache.
private boolean readReplicasFromCache(ReplicaMap volumeMap, final RamDiskReplicaTracker lazyWriteReplicaMap) {
ReplicaMap tmpReplicaMap = new ReplicaMap(new AutoCloseableLock());
File replicaFile = new File(currentDir, REPLICA_CACHE_FILE);
// Check whether the file exists or not.
if (!replicaFile.exists()) {
LOG.info("Replica Cache file: " + replicaFile.getPath() + " doesn't exist ");
return false;
}
long fileLastModifiedTime = replicaFile.lastModified();
if (System.currentTimeMillis() > fileLastModifiedTime + replicaCacheExpiry) {
LOG.info("Replica Cache file: " + replicaFile.getPath() + " has gone stale");
// Just to make findbugs happy
if (!replicaFile.delete()) {
LOG.info("Replica Cache file: " + replicaFile.getPath() + " cannot be deleted");
}
return false;
}
FileInputStream inputStream = null;
try {
inputStream = fileIoProvider.getFileInputStream(volume, replicaFile);
BlockListAsLongs blocksList = BlockListAsLongs.readFrom(inputStream, maxDataLength);
if (blocksList == null) {
return false;
}
for (BlockReportReplica replica : blocksList) {
switch(replica.getState()) {
case FINALIZED:
addReplicaToReplicasMap(replica, tmpReplicaMap, lazyWriteReplicaMap, true);
break;
case RUR:
case RBW:
case RWR:
addReplicaToReplicasMap(replica, tmpReplicaMap, lazyWriteReplicaMap, false);
break;
default:
break;
}
}
inputStream.close();
// to scan all the files on disk.
for (Iterator<ReplicaInfo> iter = tmpReplicaMap.replicas(bpid).iterator(); iter.hasNext(); ) {
ReplicaInfo info = iter.next();
// We use a lightweight GSet to store replicaInfo, we need to remove
// it from one GSet before adding to another.
iter.remove();
volumeMap.add(bpid, info);
}
LOG.info("Successfully read replica from cache file : " + replicaFile.getPath());
return true;
} catch (Exception e) {
// Any exception we need to revert back to read from disk
// Log the error and return false
LOG.info("Exception occured while reading the replicas cache file: " + replicaFile.getPath(), e);
return false;
} finally {
if (!fileIoProvider.delete(volume, replicaFile)) {
LOG.info("Failed to delete replica cache file: " + replicaFile.getPath());
}
// close the inputStream
IOUtils.closeStream(inputStream);
}
}
Aggregations