Search in sources :

Example 61 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hbase by apache.

the class HBaseAdmin method getCompactionState.

/**
   * {@inheritDoc}
   */
@Override
public CompactionState getCompactionState(final TableName tableName, CompactType compactType) throws IOException {
    AdminProtos.GetRegionInfoResponse.CompactionState state = AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
    checkTableExists(tableName);
    // TODO: There is no timeout on this controller. Set one!
    final HBaseRpcController rpcController = rpcControllerFactory.newController();
    switch(compactType) {
        case MOB:
            final AdminProtos.AdminService.BlockingInterface masterAdmin = this.connection.getAdmin(getMasterAddress());
            Callable<AdminProtos.GetRegionInfoResponse.CompactionState> callable = new Callable<AdminProtos.GetRegionInfoResponse.CompactionState>() {

                @Override
                public AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
                    HRegionInfo info = getMobRegionInfo(tableName);
                    GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
                    GetRegionInfoResponse response = masterAdmin.getRegionInfo(rpcController, request);
                    return response.getCompactionState();
                }
            };
            state = ProtobufUtil.call(callable);
            break;
        case NORMAL:
        default:
            ZooKeeperWatcher zookeeper = null;
            try {
                List<Pair<HRegionInfo, ServerName>> pairs;
                if (TableName.META_TABLE_NAME.equals(tableName)) {
                    zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(), new ThrowableAbortable());
                    pairs = new MetaTableLocator().getMetaRegionsAndLocations(zookeeper);
                } else {
                    pairs = MetaTableAccessor.getTableRegionsAndLocations(connection, tableName);
                }
                for (Pair<HRegionInfo, ServerName> pair : pairs) {
                    if (pair.getFirst().isOffline())
                        continue;
                    if (pair.getSecond() == null)
                        continue;
                    final ServerName sn = pair.getSecond();
                    final byte[] regionName = pair.getFirst().getRegionName();
                    final AdminService.BlockingInterface snAdmin = this.connection.getAdmin(sn);
                    try {
                        Callable<GetRegionInfoResponse> regionInfoCallable = new Callable<GetRegionInfoResponse>() {

                            @Override
                            public GetRegionInfoResponse call() throws Exception {
                                GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(regionName, true);
                                return snAdmin.getRegionInfo(rpcController, request);
                            }
                        };
                        GetRegionInfoResponse response = ProtobufUtil.call(regionInfoCallable);
                        switch(response.getCompactionState()) {
                            case MAJOR_AND_MINOR:
                                return CompactionState.MAJOR_AND_MINOR;
                            case MAJOR:
                                if (state == AdminProtos.GetRegionInfoResponse.CompactionState.MINOR) {
                                    return CompactionState.MAJOR_AND_MINOR;
                                }
                                state = AdminProtos.GetRegionInfoResponse.CompactionState.MAJOR;
                                break;
                            case MINOR:
                                if (state == AdminProtos.GetRegionInfoResponse.CompactionState.MAJOR) {
                                    return CompactionState.MAJOR_AND_MINOR;
                                }
                                state = AdminProtos.GetRegionInfoResponse.CompactionState.MINOR;
                                break;
                            case NONE:
                            // nothing, continue
                            default:
                        }
                    } catch (NotServingRegionException e) {
                        if (LOG.isDebugEnabled()) {
                            LOG.debug("Trying to get compaction state of " + pair.getFirst() + ": " + StringUtils.stringifyException(e));
                        }
                    } catch (RemoteException e) {
                        if (e.getMessage().indexOf(NotServingRegionException.class.getName()) >= 0) {
                            if (LOG.isDebugEnabled()) {
                                LOG.debug("Trying to get compaction state of " + pair.getFirst() + ": " + StringUtils.stringifyException(e));
                            }
                        } else {
                            throw e;
                        }
                    }
                }
            } finally {
                if (zookeeper != null) {
                    zookeeper.close();
                }
            }
            break;
    }
    if (state != null) {
        return ProtobufUtil.createCompactionState(state);
    }
    return null;
}
Also used : AdminService(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) Callable(java.util.concurrent.Callable) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HBaseRpcController(org.apache.hadoop.hbase.ipc.HBaseRpcController) MetaTableLocator(org.apache.hadoop.hbase.zookeeper.MetaTableLocator) GetRegionInfoRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest) GetRegionInfoResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) AdminProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos) ServerName(org.apache.hadoop.hbase.ServerName) RemoteException(org.apache.hadoop.ipc.RemoteException) Pair(org.apache.hadoop.hbase.util.Pair) NameStringPair(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair)

Example 62 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hbase by apache.

the class LogRoller method run.

@Override
public void run() {
    while (running) {
        long now = System.currentTimeMillis();
        boolean periodic = false;
        if (!rollLog.get()) {
            periodic = (now - this.lastrolltime) > this.rollperiod;
            if (!periodic) {
                synchronized (rollLog) {
                    try {
                        if (!rollLog.get()) {
                            rollLog.wait(this.threadWakeFrequency);
                        }
                    } catch (InterruptedException e) {
                    // Fall through
                    }
                }
                continue;
            }
            // Time for periodic roll
            if (LOG.isDebugEnabled()) {
                LOG.debug("Wal roll period " + this.rollperiod + "ms elapsed");
            }
        } else if (LOG.isDebugEnabled()) {
            LOG.debug("WAL roll requested");
        }
        // FindBugs UL_UNRELEASED_LOCK_EXCEPTION_PATH
        rollLock.lock();
        try {
            this.lastrolltime = now;
            for (Entry<WAL, Boolean> entry : walNeedsRoll.entrySet()) {
                final WAL wal = entry.getKey();
                // Force the roll if the logroll.period is elapsed or if a roll was requested.
                // The returned value is an array of actual region names.
                final byte[][] regionsToFlush = wal.rollWriter(periodic || entry.getValue().booleanValue());
                walNeedsRoll.put(wal, Boolean.FALSE);
                if (regionsToFlush != null) {
                    for (byte[] r : regionsToFlush) scheduleFlush(r);
                }
            }
        } catch (FailedLogCloseException e) {
            server.abort("Failed log close in log roller", e);
        } catch (java.net.ConnectException e) {
            server.abort("Failed log close in log roller", e);
        } catch (IOException ex) {
            // Abort if we get here.  We probably won't recover an IOE. HBASE-1132
            server.abort("IOE in log roller", ex instanceof RemoteException ? ((RemoteException) ex).unwrapRemoteException() : ex);
        } catch (Exception ex) {
            LOG.error("Log rolling failed", ex);
            server.abort("Log rolling failed", ex);
        } finally {
            try {
                rollLog.set(false);
            } finally {
                rollLock.unlock();
            }
        }
    }
    LOG.info("LogRoller exiting.");
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) FailedLogCloseException(org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException) IOException(java.io.IOException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) FailedLogCloseException(org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) RemoteException(org.apache.hadoop.ipc.RemoteException)

Example 63 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hbase by apache.

the class TestBlockReorder method testHBaseCluster.

/**
   * Test that the hook works within HBase, including when there are multiple blocks.
   */
@Test()
public void testHBaseCluster() throws Exception {
    byte[] sb = "sb".getBytes();
    htu.startMiniZKCluster();
    MiniHBaseCluster hbm = htu.startMiniHBaseCluster(1, 1);
    hbm.waitForActiveAndReadyMaster();
    HRegionServer targetRs = hbm.getMaster();
    // We want to have a datanode with the same name as the region server, so
    //  we're going to get the regionservername, and start a new datanode with this name.
    String host4 = targetRs.getServerName().getHostname();
    LOG.info("Starting a new datanode with the name=" + host4);
    cluster.startDataNodes(conf, 1, true, null, new String[] { "/r4" }, new String[] { host4 }, null);
    cluster.waitClusterUp();
    final int repCount = 3;
    // We use the regionserver file system & conf as we expect it to have the hook.
    conf = targetRs.getConfiguration();
    HFileSystem rfs = (HFileSystem) targetRs.getFileSystem();
    Table h = htu.createTable(TableName.valueOf(name.getMethodName()), sb);
    // Now, we have 4 datanodes and a replication count of 3. So we don't know if the datanode
    // with the same node will be used. We can't really stop an existing datanode, this would
    // make us fall in nasty hdfs bugs/issues. So we're going to try multiple times.
    // Now we need to find the log file, its locations, and look at it
    String rootDir = new Path(FSUtils.getRootDir(conf) + "/" + HConstants.HREGION_LOGDIR_NAME + "/" + targetRs.getServerName().toString()).toUri().getPath();
    DistributedFileSystem mdfs = (DistributedFileSystem) hbm.getMaster().getMasterFileSystem().getFileSystem();
    int nbTest = 0;
    while (nbTest < 10) {
        final List<Region> regions = targetRs.getOnlineRegions(h.getName());
        final CountDownLatch latch = new CountDownLatch(regions.size());
        // listen for successful log rolls
        final WALActionsListener listener = new WALActionsListener.Base() {

            @Override
            public void postLogRoll(final Path oldPath, final Path newPath) throws IOException {
                latch.countDown();
            }
        };
        for (Region region : regions) {
            ((HRegion) region).getWAL().registerWALActionsListener(listener);
        }
        htu.getAdmin().rollWALWriter(targetRs.getServerName());
        // wait
        try {
            latch.await();
        } catch (InterruptedException exception) {
            LOG.warn("Interrupted while waiting for the wal of '" + targetRs + "' to roll. If later " + "tests fail, it's probably because we should still be waiting.");
            Thread.currentThread().interrupt();
        }
        for (Region region : regions) {
            ((HRegion) region).getWAL().unregisterWALActionsListener(listener);
        }
        // We need a sleep as the namenode is informed asynchronously
        Thread.sleep(100);
        // insert one put to ensure a minimal size
        Put p = new Put(sb);
        p.addColumn(sb, sb, sb);
        h.put(p);
        DirectoryListing dl = dfs.getClient().listPaths(rootDir, HdfsFileStatus.EMPTY_NAME);
        HdfsFileStatus[] hfs = dl.getPartialListing();
        // As we wrote a put, we should have at least one log file.
        Assert.assertTrue(hfs.length >= 1);
        for (HdfsFileStatus hf : hfs) {
            // Because this is a live cluster, log files might get archived while we're processing
            try {
                LOG.info("Log file found: " + hf.getLocalName() + " in " + rootDir);
                String logFile = rootDir + "/" + hf.getLocalName();
                FileStatus fsLog = rfs.getFileStatus(new Path(logFile));
                LOG.info("Checking log file: " + logFile);
                // Now checking that the hook is up and running
                // We can't call directly getBlockLocations, it's not available in HFileSystem
                // We're trying multiple times to be sure, as the order is random
                BlockLocation[] bls = rfs.getFileBlockLocations(fsLog, 0, 1);
                if (bls.length > 0) {
                    BlockLocation bl = bls[0];
                    LOG.info(bl.getHosts().length + " replicas for block 0 in " + logFile + " ");
                    for (int i = 0; i < bl.getHosts().length - 1; i++) {
                        LOG.info(bl.getHosts()[i] + "    " + logFile);
                        Assert.assertNotSame(bl.getHosts()[i], host4);
                    }
                    String last = bl.getHosts()[bl.getHosts().length - 1];
                    LOG.info(last + "    " + logFile);
                    if (host4.equals(last)) {
                        nbTest++;
                        LOG.info(logFile + " is on the new datanode and is ok");
                        if (bl.getHosts().length == 3) {
                            // We can test this case from the file system as well
                            // Checking the underlying file system. Multiple times as the order is random
                            testFromDFS(dfs, logFile, repCount, host4);
                            // now from the master
                            testFromDFS(mdfs, logFile, repCount, host4);
                        }
                    }
                }
            } catch (FileNotFoundException exception) {
                LOG.debug("Failed to find log file '" + hf.getLocalName() + "'; it probably was " + "archived out from under us so we'll ignore and retry. If this test hangs " + "indefinitely you should treat this failure as a symptom.", exception);
            } catch (RemoteException exception) {
                if (exception.unwrapRemoteException() instanceof FileNotFoundException) {
                    LOG.debug("Failed to find log file '" + hf.getLocalName() + "'; it probably was " + "archived out from under us so we'll ignore and retry. If this test hangs " + "indefinitely you should treat this failure as a symptom.", exception);
                } else {
                    throw exception;
                }
            }
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) Table(org.apache.hadoop.hbase.client.Table) FileStatus(org.apache.hadoop.fs.FileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FileNotFoundException(java.io.FileNotFoundException) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) WALActionsListener(org.apache.hadoop.hbase.regionserver.wal.WALActionsListener) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) CountDownLatch(java.util.concurrent.CountDownLatch) BlockLocation(org.apache.hadoop.fs.BlockLocation) Put(org.apache.hadoop.hbase.client.Put) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Region(org.apache.hadoop.hbase.regionserver.Region) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 64 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutputHelper method createOutput.

private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src, boolean overwrite, boolean createParent, short replication, long blockSize, EventLoop eventLoop) throws IOException {
    Configuration conf = dfs.getConf();
    FSUtils fsUtils = FSUtils.getInstance(dfs, conf);
    DFSClient client = dfs.getClient();
    String clientName = client.getClientName();
    ClientProtocol namenode = client.getNamenode();
    HdfsFileStatus stat;
    try {
        stat = namenode.create(src, FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(conf)), clientName, new EnumSetWritable<>(overwrite ? EnumSet.of(CREATE, OVERWRITE) : EnumSet.of(CREATE)), createParent, replication, blockSize, CryptoProtocolVersion.supported());
    } catch (Exception e) {
        if (e instanceof RemoteException) {
            throw (RemoteException) e;
        } else {
            throw new NameNodeException(e);
        }
    }
    beginFileLease(client, stat.getFileId());
    boolean succ = false;
    LocatedBlock locatedBlock = null;
    List<Future<Channel>> futureList = null;
    try {
        DataChecksum summer = createChecksum(client);
        locatedBlock = BLOCK_ADDER.addBlock(namenode, src, client.getClientName(), null, null, stat.getFileId(), null);
        List<Channel> datanodeList = new ArrayList<>();
        futureList = connectToDataNodes(conf, client, clientName, locatedBlock, 0L, 0L, PIPELINE_SETUP_CREATE, summer, eventLoop);
        for (Future<Channel> future : futureList) {
            // fail the creation if there are connection failures since we are fail-fast. The upper
            // layer should retry itself if needed.
            datanodeList.add(future.syncUninterruptibly().getNow());
        }
        Encryptor encryptor = createEncryptor(conf, stat, client);
        FanOutOneBlockAsyncDFSOutput output = new FanOutOneBlockAsyncDFSOutput(conf, fsUtils, dfs, client, namenode, clientName, src, stat.getFileId(), locatedBlock, encryptor, eventLoop, datanodeList, summer, ALLOC);
        succ = true;
        return output;
    } finally {
        if (!succ) {
            if (futureList != null) {
                for (Future<Channel> f : futureList) {
                    f.addListener(new FutureListener<Channel>() {

                        @Override
                        public void operationComplete(Future<Channel> future) throws Exception {
                            if (future.isSuccess()) {
                                future.getNow().close();
                            }
                        }
                    });
                }
            }
            endFileLease(client, stat.getFileId());
            fsUtils.recoverFileLease(dfs, new Path(src), conf, new CancelOnClose(client));
        }
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) ArrayList(java.util.ArrayList) FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor(org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor) Encryptor(org.apache.hadoop.crypto.Encryptor) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FSUtils(org.apache.hadoop.hbase.util.FSUtils) DFSClient(org.apache.hadoop.hdfs.DFSClient) EnumSetWritable(org.apache.hadoop.io.EnumSetWritable) Path(org.apache.hadoop.fs.Path) NioSocketChannel(io.netty.channel.socket.nio.NioSocketChannel) Channel(io.netty.channel.Channel) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) LeaseExpiredException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException) InvalidBlockTokenException(org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException) InvocationTargetException(java.lang.reflect.InvocationTargetException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) DataChecksum(org.apache.hadoop.util.DataChecksum) ChannelFuture(io.netty.channel.ChannelFuture) Future(io.netty.util.concurrent.Future) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) RemoteException(org.apache.hadoop.ipc.RemoteException)

Example 65 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project storm by apache.

the class TestHdfsSemantics method testDoubleCreateSemantics.

@Test
public void testDoubleCreateSemantics() throws Exception {
    //1 create an already existing open file w/o override flag
    Path file1 = new Path(dir.toString() + Path.SEPARATOR_CHAR + "file1");
    FSDataOutputStream os1 = fs.create(file1, false);
    try {
        // should fail
        fs.create(file1, false);
        Assert.assertTrue("Create did not throw an exception", false);
    } catch (RemoteException e) {
        Assert.assertEquals(AlreadyBeingCreatedException.class, e.unwrapRemoteException().getClass());
    }
    //2 close file and retry creation
    os1.close();
    try {
        // should still fail
        fs.create(file1, false);
    } catch (FileAlreadyExistsException e) {
    // expecting this exception
    }
    //3 delete file and retry creation
    fs.delete(file1, false);
    // should pass
    FSDataOutputStream os2 = fs.create(file1, false);
    Assert.assertNotNull(os2);
    os2.close();
}
Also used : Path(org.apache.hadoop.fs.Path) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) AlreadyBeingCreatedException(org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Aggregations

RemoteException (org.apache.hadoop.ipc.RemoteException)99 IOException (java.io.IOException)53 Test (org.junit.Test)39 Path (org.apache.hadoop.fs.Path)36 Configuration (org.apache.hadoop.conf.Configuration)20 FileNotFoundException (java.io.FileNotFoundException)19 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 FileSystem (org.apache.hadoop.fs.FileSystem)12 InterruptedIOException (java.io.InterruptedIOException)10 AccessControlException (org.apache.hadoop.security.AccessControlException)10 ServerName (org.apache.hadoop.hbase.ServerName)9 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)8 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)8 FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)7 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)7 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)7 EOFException (java.io.EOFException)6 ArrayList (java.util.ArrayList)6 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)6 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)6