Search in sources :

Example 26 with InterruptedIOException

use of java.io.InterruptedIOException in project hbase by apache.

the class ServerCrashProcedure method verifyAndAssignMetaWithRetries.

/**
   * If hbase:meta is not assigned already, assign.
   * @throws IOException
   */
private void verifyAndAssignMetaWithRetries(final MasterProcedureEnv env) throws IOException {
    MasterServices services = env.getMasterServices();
    int iTimes = services.getConfiguration().getInt(KEY_RETRIES_ON_META, DEFAULT_RETRIES_ON_META);
    // Just reuse same time as we have for short wait on meta. Adding another config is overkill.
    long waitTime = services.getConfiguration().getLong(KEY_SHORT_WAIT_ON_META, DEFAULT_SHORT_WAIT_ON_META);
    int iFlag = 0;
    while (true) {
        try {
            verifyAndAssignMeta(env);
            break;
        } catch (KeeperException e) {
            services.abort("In server shutdown processing, assigning meta", e);
            throw new IOException("Aborting", e);
        } catch (Exception e) {
            if (iFlag >= iTimes) {
                services.abort("verifyAndAssignMeta failed after" + iTimes + " retries, aborting", e);
                throw new IOException("Aborting", e);
            }
            try {
                Thread.sleep(waitTime);
            } catch (InterruptedException e1) {
                LOG.warn("Interrupted when is the thread sleep", e1);
                Thread.currentThread().interrupt();
                throw (InterruptedIOException) new InterruptedIOException().initCause(e1);
            }
            iFlag++;
        }
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) MasterServices(org.apache.hadoop.hbase.master.MasterServices) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) KeeperException(org.apache.zookeeper.KeeperException) InterruptedIOException(java.io.InterruptedIOException) ProcedureYieldException(org.apache.hadoop.hbase.procedure2.ProcedureYieldException) KeeperException(org.apache.zookeeper.KeeperException) IOException(java.io.IOException)

Example 27 with InterruptedIOException

use of java.io.InterruptedIOException in project hbase by apache.

the class SnapshotManifestV1 method loadRegionManifests.

static List<SnapshotRegionManifest> loadRegionManifests(final Configuration conf, final Executor executor, final FileSystem fs, final Path snapshotDir, final SnapshotDescription desc) throws IOException {
    FileStatus[] regions = FSUtils.listStatus(fs, snapshotDir, new FSUtils.RegionDirFilter(fs));
    if (regions == null) {
        LOG.debug("No regions under directory:" + snapshotDir);
        return null;
    }
    final ExecutorCompletionService<SnapshotRegionManifest> completionService = new ExecutorCompletionService<>(executor);
    for (final FileStatus region : regions) {
        completionService.submit(new Callable<SnapshotRegionManifest>() {

            @Override
            public SnapshotRegionManifest call() throws IOException {
                HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, region.getPath());
                return buildManifestFromDisk(conf, fs, snapshotDir, hri);
            }
        });
    }
    ArrayList<SnapshotRegionManifest> regionsManifest = new ArrayList<>(regions.length);
    try {
        for (int i = 0; i < regions.length; ++i) {
            regionsManifest.add(completionService.take().get());
        }
    } catch (InterruptedException e) {
        throw new InterruptedIOException(e.getMessage());
    } catch (ExecutionException e) {
        IOException ex = new IOException();
        ex.initCause(e.getCause());
        throw ex;
    }
    return regionsManifest;
}
Also used : InterruptedIOException(java.io.InterruptedIOException) FileStatus(org.apache.hadoop.fs.FileStatus) ArrayList(java.util.ArrayList) SnapshotRegionManifest(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) ExecutionException(java.util.concurrent.ExecutionException) FSUtils(org.apache.hadoop.hbase.util.FSUtils)

Example 28 with InterruptedIOException

use of java.io.InterruptedIOException in project hbase by apache.

the class TableNamespaceManager method start.

public void start() throws IOException {
    if (!MetaTableAccessor.tableExists(masterServices.getConnection(), TableName.NAMESPACE_TABLE_NAME)) {
        LOG.info("Namespace table not found. Creating...");
        createNamespaceTable(masterServices);
    }
    try {
        // Wait for the namespace table to be initialized.
        long startTime = EnvironmentEdgeManager.currentTime();
        int timeout = conf.getInt(NS_INIT_TIMEOUT, DEFAULT_NS_INIT_TIMEOUT);
        while (!isTableAvailableAndInitialized()) {
            if (EnvironmentEdgeManager.currentTime() - startTime + 100 > timeout) {
                // We can't do anything if ns is not online.
                throw new IOException("Timedout " + timeout + "ms waiting for namespace table to " + "be assigned and enabled: " + getTableState());
            }
            Thread.sleep(100);
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) InterruptedIOException(java.io.InterruptedIOException) TimeoutIOException(org.apache.hadoop.hbase.exceptions.TimeoutIOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException)

Example 29 with InterruptedIOException

use of java.io.InterruptedIOException in project hbase by apache.

the class MergeTableRegionsProcedure method MoveRegionsToSameRS.

/**
   * Move all regions to the same region server
   * @param env MasterProcedureEnv
   * @return whether target regions hosted by the same RS
   * @throws IOException
   */
private boolean MoveRegionsToSameRS(final MasterProcedureEnv env) throws IOException {
    // Make sure regions are on the same regionserver before send merge
    // regions request to region server.
    //
    boolean onSameRS = isRegionsOnTheSameServer(env);
    if (!onSameRS) {
        // Note: the following logic assumes that we only have 2 regions to merge.  In the future,
        // if we want to extend to more than 2 regions, the code needs to modify a little bit.
        //
        RegionStates regionStates = getAssignmentManager(env).getRegionStates();
        ServerName regionLocation2 = regionStates.getRegionServerOfRegion(regionsToMerge[1]);
        RegionLoad loadOfRegionA = getRegionLoad(env, regionLocation, regionsToMerge[0]);
        RegionLoad loadOfRegionB = getRegionLoad(env, regionLocation2, regionsToMerge[1]);
        if (loadOfRegionA != null && loadOfRegionB != null && loadOfRegionA.getRequestsCount() < loadOfRegionB.getRequestsCount()) {
            // switch regionsToMerge[0] and regionsToMerge[1]
            HRegionInfo tmpRegion = this.regionsToMerge[0];
            this.regionsToMerge[0] = this.regionsToMerge[1];
            this.regionsToMerge[1] = tmpRegion;
            ServerName tmpLocation = regionLocation;
            regionLocation = regionLocation2;
            regionLocation2 = tmpLocation;
        }
        long startTime = EnvironmentEdgeManager.currentTime();
        RegionPlan regionPlan = new RegionPlan(regionsToMerge[1], regionLocation2, regionLocation);
        LOG.info("Moving regions to same server for merge: " + regionPlan.toString());
        getAssignmentManager(env).balance(regionPlan);
        do {
            try {
                Thread.sleep(20);
                // Make sure check RIT first, then get region location, otherwise
                // we would make a wrong result if region is online between getting
                // region location and checking RIT
                boolean isRIT = regionStates.isRegionInTransition(regionsToMerge[1]);
                regionLocation2 = regionStates.getRegionServerOfRegion(regionsToMerge[1]);
                onSameRS = regionLocation.equals(regionLocation2);
                if (onSameRS || !isRIT) {
                    // RegionInTransition any more
                    break;
                }
            } catch (InterruptedException e) {
                InterruptedIOException iioe = new InterruptedIOException();
                iioe.initCause(e);
                throw iioe;
            }
        } while ((EnvironmentEdgeManager.currentTime() - startTime) <= getTimeout(env));
    }
    return onSameRS;
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) InterruptedIOException(java.io.InterruptedIOException) RegionLoad(org.apache.hadoop.hbase.RegionLoad) RegionStates(org.apache.hadoop.hbase.master.RegionStates) RegionPlan(org.apache.hadoop.hbase.master.RegionPlan) ServerName(org.apache.hadoop.hbase.ServerName)

Example 30 with InterruptedIOException

use of java.io.InterruptedIOException in project hbase by apache.

the class Compactor method performCompaction.

/**
   * Performs the compaction.
   * @param fd FileDetails of cell sink writer
   * @param scanner Where to read from.
   * @param writer Where to write to.
   * @param smallestReadPoint Smallest read point.
   * @param cleanSeqId When true, remove seqId(used to be mvcc) value which is &lt;=
   *          smallestReadPoint
   * @param major Is a major compaction.
   * @param numofFilesToCompact the number of files to compact
   * @return Whether compaction ended; false if it was interrupted for some reason.
   */
protected boolean performCompaction(FileDetails fd, InternalScanner scanner, CellSink writer, long smallestReadPoint, boolean cleanSeqId, ThroughputController throughputController, boolean major, int numofFilesToCompact) throws IOException {
    assert writer instanceof ShipperListener;
    long bytesWrittenProgressForCloseCheck = 0;
    long bytesWrittenProgressForLog = 0;
    long bytesWrittenProgressForShippedCall = 0;
    // Since scanner.next() can return 'false' but still be delivering data,
    // we have to use a do/while loop.
    List<Cell> cells = new ArrayList<>();
    long closeCheckSizeLimit = HStore.getCloseCheckInterval();
    long lastMillis = 0;
    if (LOG.isDebugEnabled()) {
        lastMillis = EnvironmentEdgeManager.currentTime();
    }
    String compactionName = ThroughputControlUtil.getNameForThrottling(store, "compaction");
    long now = 0;
    boolean hasMore;
    ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
    throughputController.start(compactionName);
    KeyValueScanner kvs = (scanner instanceof KeyValueScanner) ? (KeyValueScanner) scanner : null;
    long shippedCallSizeLimit = (long) numofFilesToCompact * this.store.getFamily().getBlocksize();
    try {
        do {
            hasMore = scanner.next(cells, scannerContext);
            if (LOG.isDebugEnabled()) {
                now = EnvironmentEdgeManager.currentTime();
            }
            // output to writer:
            Cell lastCleanCell = null;
            long lastCleanCellSeqId = 0;
            for (Cell c : cells) {
                if (cleanSeqId && c.getSequenceId() <= smallestReadPoint) {
                    lastCleanCell = c;
                    lastCleanCellSeqId = c.getSequenceId();
                    CellUtil.setSequenceId(c, 0);
                } else {
                    lastCleanCell = null;
                    lastCleanCellSeqId = 0;
                }
                writer.append(c);
                int len = KeyValueUtil.length(c);
                ++progress.currentCompactedKVs;
                progress.totalCompactedSize += len;
                bytesWrittenProgressForShippedCall += len;
                if (LOG.isDebugEnabled()) {
                    bytesWrittenProgressForLog += len;
                }
                throughputController.control(compactionName, len);
                // check periodically to see if a system stop is requested
                if (closeCheckSizeLimit > 0) {
                    bytesWrittenProgressForCloseCheck += len;
                    if (bytesWrittenProgressForCloseCheck > closeCheckSizeLimit) {
                        bytesWrittenProgressForCloseCheck = 0;
                        if (!store.areWritesEnabled()) {
                            progress.cancel();
                            return false;
                        }
                    }
                }
                if (kvs != null && bytesWrittenProgressForShippedCall > shippedCallSizeLimit) {
                    if (lastCleanCell != null) {
                        // HBASE-16931, set back sequence id to avoid affecting scan order unexpectedly.
                        // ShipperListener will do a clone of the last cells it refer, so need to set back
                        // sequence id before ShipperListener.beforeShipped
                        CellUtil.setSequenceId(lastCleanCell, lastCleanCellSeqId);
                    }
                    // Clone the cells that are in the writer so that they are freed of references,
                    // if they are holding any.
                    ((ShipperListener) writer).beforeShipped();
                    // The SHARED block references, being read for compaction, will be kept in prevBlocks
                    // list(See HFileScannerImpl#prevBlocks). In case of scan flow, after each set of cells
                    // being returned to client, we will call shipped() which can clear this list. Here by
                    // we are doing the similar thing. In between the compaction (after every N cells
                    // written with collective size of 'shippedCallSizeLimit') we will call shipped which
                    // may clear prevBlocks list.
                    kvs.shipped();
                    bytesWrittenProgressForShippedCall = 0;
                }
            }
            if (lastCleanCell != null) {
                // HBASE-16931, set back sequence id to avoid affecting scan order unexpectedly
                CellUtil.setSequenceId(lastCleanCell, lastCleanCellSeqId);
            }
            // logging at DEBUG level
            if (LOG.isDebugEnabled()) {
                if ((now - lastMillis) >= COMPACTION_PROGRESS_LOG_INTERVAL) {
                    LOG.debug("Compaction progress: " + compactionName + " " + progress + String.format(", rate=%.2f kB/sec", (bytesWrittenProgressForLog / 1024.0) / ((now - lastMillis) / 1000.0)) + ", throughputController is " + throughputController);
                    lastMillis = now;
                    bytesWrittenProgressForLog = 0;
                }
            }
            cells.clear();
        } while (hasMore);
    } catch (InterruptedException e) {
        progress.cancel();
        throw new InterruptedIOException("Interrupted while control throughput of compacting " + compactionName);
    } finally {
        throughputController.finish(compactionName);
    }
    progress.complete();
    return true;
}
Also used : InterruptedIOException(java.io.InterruptedIOException) ArrayList(java.util.ArrayList) KeyValueScanner(org.apache.hadoop.hbase.regionserver.KeyValueScanner) Cell(org.apache.hadoop.hbase.Cell) ScannerContext(org.apache.hadoop.hbase.regionserver.ScannerContext) ShipperListener(org.apache.hadoop.hbase.regionserver.ShipperListener)

Aggregations

InterruptedIOException (java.io.InterruptedIOException)274 IOException (java.io.IOException)186 Test (org.junit.Test)39 ArrayList (java.util.ArrayList)27 Socket (java.net.Socket)26 ConnectException (java.net.ConnectException)22 ExecutionException (java.util.concurrent.ExecutionException)22 InputStream (java.io.InputStream)21 InetSocketAddress (java.net.InetSocketAddress)21 ByteBuffer (java.nio.ByteBuffer)21 Path (org.apache.hadoop.fs.Path)20 NoRouteToHostException (java.net.NoRouteToHostException)19 ServletException (javax.servlet.ServletException)17 CountDownLatch (java.util.concurrent.CountDownLatch)16 SocketTimeoutException (java.net.SocketTimeoutException)15 HttpServletRequest (javax.servlet.http.HttpServletRequest)15 HttpServletResponse (javax.servlet.http.HttpServletResponse)15 EOFException (java.io.EOFException)14 SocketException (java.net.SocketException)14 OutputStream (java.io.OutputStream)13