Search in sources :

Example 1 with ChecksumProto

use of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutputHelper method connectToDataNodes.

private static List<Future<Channel>> connectToDataNodes(Configuration conf, DFSClient client, String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS, BlockConstructionStage stage, DataChecksum summer, EventLoop eventLoop) {
    Enum<?>[] storageTypes = locatedBlock.getStorageTypes();
    DatanodeInfo[] datanodeInfos = locatedBlock.getLocations();
    boolean connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
    int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT);
    ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock());
    blockCopy.setNumBytes(locatedBlock.getBlockSize());
    ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder().setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PB_HELPER.convert(blockCopy)).setToken(PB_HELPER.convert(locatedBlock.getBlockToken()))).setClientName(clientName).build();
    ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer);
    OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder().setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())).setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()).setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS).setRequestedChecksum(checksumProto).setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build());
    List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length);
    for (int i = 0; i < datanodeInfos.length; i++) {
        DatanodeInfo dnInfo = datanodeInfos[i];
        Enum<?> storageType = storageTypes[i];
        Promise<Channel> promise = eventLoop.newPromise();
        futureList.add(promise);
        String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname);
        new Bootstrap().group(eventLoop).channel(NioSocketChannel.class).option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() {

            @Override
            protected void initChannel(Channel ch) throws Exception {
            // we need to get the remote address of the channel so we can only move on after
            // channel connected. Leave an empty implementation here because netty does not allow
            // a null handler.
            }
        }).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() {

            @Override
            public void operationComplete(ChannelFuture future) throws Exception {
                if (future.isSuccess()) {
                    initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder, timeoutMs, client, locatedBlock.getBlockToken(), promise);
                } else {
                    promise.tryFailure(future.cause());
                }
            }
        });
    }
    return futureList;
}
Also used : ChannelFuture(io.netty.channel.ChannelFuture) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) NioSocketChannel(io.netty.channel.socket.nio.NioSocketChannel) Channel(io.netty.channel.Channel) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) ChecksumProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto) OpWriteBlockProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) ChannelFutureListener(io.netty.channel.ChannelFutureListener) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) LeaseExpiredException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException) InvalidBlockTokenException(org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException) InvocationTargetException(java.lang.reflect.InvocationTargetException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) ChannelFuture(io.netty.channel.ChannelFuture) Future(io.netty.util.concurrent.Future) Bootstrap(io.netty.bootstrap.Bootstrap) ClientOperationHeaderProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto)

Example 2 with ChecksumProto

use of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutputHelper method connectToDataNodes.

private static List<Future<Channel>> connectToDataNodes(Configuration conf, DFSClient client, String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS, BlockConstructionStage stage, DataChecksum summer, EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass) {
    StorageType[] storageTypes = locatedBlock.getStorageTypes();
    DatanodeInfo[] datanodeInfos = locatedBlock.getLocations();
    boolean connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
    int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT);
    ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock());
    blockCopy.setNumBytes(locatedBlock.getBlockSize());
    ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder().setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PBHelperClient.convert(blockCopy)).setToken(PBHelperClient.convert(locatedBlock.getBlockToken()))).setClientName(clientName).build();
    ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer);
    OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder().setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())).setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()).setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS).setRequestedChecksum(checksumProto).setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build());
    List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length);
    for (int i = 0; i < datanodeInfos.length; i++) {
        DatanodeInfo dnInfo = datanodeInfos[i];
        StorageType storageType = storageTypes[i];
        Promise<Channel> promise = eventLoopGroup.next().newPromise();
        futureList.add(promise);
        String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname);
        new Bootstrap().group(eventLoopGroup).channel(channelClass).option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() {

            @Override
            protected void initChannel(Channel ch) throws Exception {
            // we need to get the remote address of the channel so we can only move on after
            // channel connected. Leave an empty implementation here because netty does not allow
            // a null handler.
            }
        }).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() {

            @Override
            public void operationComplete(ChannelFuture future) throws Exception {
                if (future.isSuccess()) {
                    initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder, timeoutMs, client, locatedBlock.getBlockToken(), promise);
                } else {
                    promise.tryFailure(future.cause());
                }
            }
        });
    }
    return futureList;
}
Also used : ChannelFuture(org.apache.hbase.thirdparty.io.netty.channel.ChannelFuture) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) Channel(org.apache.hbase.thirdparty.io.netty.channel.Channel) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) ChecksumProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto) OpWriteBlockProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) ChannelFutureListener(org.apache.hbase.thirdparty.io.netty.channel.ChannelFutureListener) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) LeaseExpiredException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException) InvalidBlockTokenException(org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException) InvocationTargetException(java.lang.reflect.InvocationTargetException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) ChannelFuture(org.apache.hbase.thirdparty.io.netty.channel.ChannelFuture) Future(org.apache.hbase.thirdparty.io.netty.util.concurrent.Future) Bootstrap(org.apache.hbase.thirdparty.io.netty.bootstrap.Bootstrap) ClientOperationHeaderProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto)

Example 3 with ChecksumProto

use of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto in project hadoop by apache.

the class Sender method writeBlock.

@Override
public void writeBlock(final ExtendedBlock blk, final StorageType storageType, final Token<BlockTokenIdentifier> blockToken, final String clientName, final DatanodeInfo[] targets, final StorageType[] targetStorageTypes, final DatanodeInfo source, final BlockConstructionStage stage, final int pipelineSize, final long minBytesRcvd, final long maxBytesRcvd, final long latestGenerationStamp, DataChecksum requestedChecksum, final CachingStrategy cachingStrategy, final boolean allowLazyPersist, final boolean pinning, final boolean[] targetPinnings) throws IOException {
    ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken);
    ChecksumProto checksumProto = DataTransferProtoUtil.toProto(requestedChecksum);
    OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder().setHeader(header).setStorageType(PBHelperClient.convertStorageType(storageType)).addAllTargets(PBHelperClient.convert(targets, 1)).addAllTargetStorageTypes(PBHelperClient.convertStorageTypes(targetStorageTypes, 1)).setStage(toProto(stage)).setPipelineSize(pipelineSize).setMinBytesRcvd(minBytesRcvd).setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGenerationStamp).setRequestedChecksum(checksumProto).setCachingStrategy(getCachingStrategy(cachingStrategy)).setAllowLazyPersist(allowLazyPersist).setPinning(pinning).addAllTargetPinnings(PBHelperClient.convert(targetPinnings, 1));
    if (source != null) {
        proto.setSource(PBHelperClient.convertDatanodeInfo(source));
    }
    send(out, Op.WRITE_BLOCK, proto.build());
}
Also used : OpBlockGroupChecksumProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto) ChecksumProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto) OpBlockChecksumProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto) OpWriteBlockProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) ClientOperationHeaderProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto)

Aggregations

ChecksumProto (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto)3 ClientOperationHeaderProto (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto)3 OpWriteBlockProto (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto)3 IOException (java.io.IOException)2 InvocationTargetException (java.lang.reflect.InvocationTargetException)2 ArrayList (java.util.ArrayList)2 UnresolvedLinkException (org.apache.hadoop.fs.UnresolvedLinkException)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)2 InvalidBlockTokenException (org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException)2 LeaseExpiredException (org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException)2 RemoteException (org.apache.hadoop.ipc.RemoteException)2 Bootstrap (io.netty.bootstrap.Bootstrap)1 Channel (io.netty.channel.Channel)1 ChannelFuture (io.netty.channel.ChannelFuture)1 ChannelFutureListener (io.netty.channel.ChannelFutureListener)1 NioSocketChannel (io.netty.channel.socket.nio.NioSocketChannel)1 Future (io.netty.util.concurrent.Future)1 InterruptedIOException (java.io.InterruptedIOException)1 StorageType (org.apache.hadoop.fs.StorageType)1