Search in sources :

Example 56 with InterruptedIOException

use of java.io.InterruptedIOException in project hbase by apache.

the class MergeTableRegionsProcedure method MoveRegionsToSameRS.

/**
   * Move all regions to the same region server
   * @param env MasterProcedureEnv
   * @return whether target regions hosted by the same RS
   * @throws IOException
   */
private boolean MoveRegionsToSameRS(final MasterProcedureEnv env) throws IOException {
    // Make sure regions are on the same regionserver before send merge
    // regions request to region server.
    //
    boolean onSameRS = isRegionsOnTheSameServer(env);
    if (!onSameRS) {
        // Note: the following logic assumes that we only have 2 regions to merge.  In the future,
        // if we want to extend to more than 2 regions, the code needs to modify a little bit.
        //
        RegionStates regionStates = getAssignmentManager(env).getRegionStates();
        ServerName regionLocation2 = regionStates.getRegionServerOfRegion(regionsToMerge[1]);
        RegionLoad loadOfRegionA = getRegionLoad(env, regionLocation, regionsToMerge[0]);
        RegionLoad loadOfRegionB = getRegionLoad(env, regionLocation2, regionsToMerge[1]);
        if (loadOfRegionA != null && loadOfRegionB != null && loadOfRegionA.getRequestsCount() < loadOfRegionB.getRequestsCount()) {
            // switch regionsToMerge[0] and regionsToMerge[1]
            HRegionInfo tmpRegion = this.regionsToMerge[0];
            this.regionsToMerge[0] = this.regionsToMerge[1];
            this.regionsToMerge[1] = tmpRegion;
            ServerName tmpLocation = regionLocation;
            regionLocation = regionLocation2;
            regionLocation2 = tmpLocation;
        }
        long startTime = EnvironmentEdgeManager.currentTime();
        RegionPlan regionPlan = new RegionPlan(regionsToMerge[1], regionLocation2, regionLocation);
        LOG.info("Moving regions to same server for merge: " + regionPlan.toString());
        getAssignmentManager(env).balance(regionPlan);
        do {
            try {
                Thread.sleep(20);
                // Make sure check RIT first, then get region location, otherwise
                // we would make a wrong result if region is online between getting
                // region location and checking RIT
                boolean isRIT = regionStates.isRegionInTransition(regionsToMerge[1]);
                regionLocation2 = regionStates.getRegionServerOfRegion(regionsToMerge[1]);
                onSameRS = regionLocation.equals(regionLocation2);
                if (onSameRS || !isRIT) {
                    // RegionInTransition any more
                    break;
                }
            } catch (InterruptedException e) {
                InterruptedIOException iioe = new InterruptedIOException();
                iioe.initCause(e);
                throw iioe;
            }
        } while ((EnvironmentEdgeManager.currentTime() - startTime) <= getTimeout(env));
    }
    return onSameRS;
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) InterruptedIOException(java.io.InterruptedIOException) RegionLoad(org.apache.hadoop.hbase.RegionLoad) RegionStates(org.apache.hadoop.hbase.master.RegionStates) RegionPlan(org.apache.hadoop.hbase.master.RegionPlan) ServerName(org.apache.hadoop.hbase.ServerName)

Example 57 with InterruptedIOException

use of java.io.InterruptedIOException in project hbase by apache.

the class Compactor method performCompaction.

/**
   * Performs the compaction.
   * @param fd FileDetails of cell sink writer
   * @param scanner Where to read from.
   * @param writer Where to write to.
   * @param smallestReadPoint Smallest read point.
   * @param cleanSeqId When true, remove seqId(used to be mvcc) value which is &lt;=
   *          smallestReadPoint
   * @param major Is a major compaction.
   * @param numofFilesToCompact the number of files to compact
   * @return Whether compaction ended; false if it was interrupted for some reason.
   */
protected boolean performCompaction(FileDetails fd, InternalScanner scanner, CellSink writer, long smallestReadPoint, boolean cleanSeqId, ThroughputController throughputController, boolean major, int numofFilesToCompact) throws IOException {
    assert writer instanceof ShipperListener;
    long bytesWrittenProgressForCloseCheck = 0;
    long bytesWrittenProgressForLog = 0;
    long bytesWrittenProgressForShippedCall = 0;
    // Since scanner.next() can return 'false' but still be delivering data,
    // we have to use a do/while loop.
    List<Cell> cells = new ArrayList<>();
    long closeCheckSizeLimit = HStore.getCloseCheckInterval();
    long lastMillis = 0;
    if (LOG.isDebugEnabled()) {
        lastMillis = EnvironmentEdgeManager.currentTime();
    }
    String compactionName = ThroughputControlUtil.getNameForThrottling(store, "compaction");
    long now = 0;
    boolean hasMore;
    ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
    throughputController.start(compactionName);
    KeyValueScanner kvs = (scanner instanceof KeyValueScanner) ? (KeyValueScanner) scanner : null;
    long shippedCallSizeLimit = (long) numofFilesToCompact * this.store.getFamily().getBlocksize();
    try {
        do {
            hasMore = scanner.next(cells, scannerContext);
            if (LOG.isDebugEnabled()) {
                now = EnvironmentEdgeManager.currentTime();
            }
            // output to writer:
            Cell lastCleanCell = null;
            long lastCleanCellSeqId = 0;
            for (Cell c : cells) {
                if (cleanSeqId && c.getSequenceId() <= smallestReadPoint) {
                    lastCleanCell = c;
                    lastCleanCellSeqId = c.getSequenceId();
                    CellUtil.setSequenceId(c, 0);
                } else {
                    lastCleanCell = null;
                    lastCleanCellSeqId = 0;
                }
                writer.append(c);
                int len = KeyValueUtil.length(c);
                ++progress.currentCompactedKVs;
                progress.totalCompactedSize += len;
                bytesWrittenProgressForShippedCall += len;
                if (LOG.isDebugEnabled()) {
                    bytesWrittenProgressForLog += len;
                }
                throughputController.control(compactionName, len);
                // check periodically to see if a system stop is requested
                if (closeCheckSizeLimit > 0) {
                    bytesWrittenProgressForCloseCheck += len;
                    if (bytesWrittenProgressForCloseCheck > closeCheckSizeLimit) {
                        bytesWrittenProgressForCloseCheck = 0;
                        if (!store.areWritesEnabled()) {
                            progress.cancel();
                            return false;
                        }
                    }
                }
                if (kvs != null && bytesWrittenProgressForShippedCall > shippedCallSizeLimit) {
                    if (lastCleanCell != null) {
                        // HBASE-16931, set back sequence id to avoid affecting scan order unexpectedly.
                        // ShipperListener will do a clone of the last cells it refer, so need to set back
                        // sequence id before ShipperListener.beforeShipped
                        CellUtil.setSequenceId(lastCleanCell, lastCleanCellSeqId);
                    }
                    // Clone the cells that are in the writer so that they are freed of references,
                    // if they are holding any.
                    ((ShipperListener) writer).beforeShipped();
                    // The SHARED block references, being read for compaction, will be kept in prevBlocks
                    // list(See HFileScannerImpl#prevBlocks). In case of scan flow, after each set of cells
                    // being returned to client, we will call shipped() which can clear this list. Here by
                    // we are doing the similar thing. In between the compaction (after every N cells
                    // written with collective size of 'shippedCallSizeLimit') we will call shipped which
                    // may clear prevBlocks list.
                    kvs.shipped();
                    bytesWrittenProgressForShippedCall = 0;
                }
            }
            if (lastCleanCell != null) {
                // HBASE-16931, set back sequence id to avoid affecting scan order unexpectedly
                CellUtil.setSequenceId(lastCleanCell, lastCleanCellSeqId);
            }
            // logging at DEBUG level
            if (LOG.isDebugEnabled()) {
                if ((now - lastMillis) >= COMPACTION_PROGRESS_LOG_INTERVAL) {
                    LOG.debug("Compaction progress: " + compactionName + " " + progress + String.format(", rate=%.2f kB/sec", (bytesWrittenProgressForLog / 1024.0) / ((now - lastMillis) / 1000.0)) + ", throughputController is " + throughputController);
                    lastMillis = now;
                    bytesWrittenProgressForLog = 0;
                }
            }
            cells.clear();
        } while (hasMore);
    } catch (InterruptedException e) {
        progress.cancel();
        throw new InterruptedIOException("Interrupted while control throughput of compacting " + compactionName);
    } finally {
        throughputController.finish(compactionName);
    }
    progress.complete();
    return true;
}
Also used : InterruptedIOException(java.io.InterruptedIOException) ArrayList(java.util.ArrayList) KeyValueScanner(org.apache.hadoop.hbase.regionserver.KeyValueScanner) Cell(org.apache.hadoop.hbase.Cell) ScannerContext(org.apache.hadoop.hbase.regionserver.ScannerContext) ShipperListener(org.apache.hadoop.hbase.regionserver.ShipperListener)

Example 58 with InterruptedIOException

use of java.io.InterruptedIOException in project hbase by apache.

the class RegionReplicaFlushHandler method triggerFlushInPrimaryRegion.

void triggerFlushInPrimaryRegion(final HRegion region) throws IOException, RuntimeException {
    long pause = connection.getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
    int maxAttempts = getRetriesCount(connection.getConfiguration());
    RetryCounter counter = new RetryCounterFactory(maxAttempts, (int) pause).create();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Attempting to do an RPC to the primary region replica " + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName() + " of region " + region.getRegionInfo().getEncodedName() + " to trigger a flush");
    }
    while (!region.isClosing() && !region.isClosed() && !server.isAborted() && !server.isStopped()) {
        FlushRegionCallable flushCallable = new FlushRegionCallable(connection, rpcControllerFactory, RegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()), true);
        // TODO: flushRegion() is a blocking call waiting for the flush to complete. Ideally we
        // do not have to wait for the whole flush here, just initiate it.
        FlushRegionResponse response = null;
        try {
            response = rpcRetryingCallerFactory.<FlushRegionResponse>newCaller().callWithRetries(flushCallable, this.operationTimeout);
        } catch (IOException ex) {
            if (ex instanceof TableNotFoundException || connection.isTableDisabled(region.getRegionInfo().getTable())) {
                return;
            }
            throw ex;
        }
        if (response.getFlushed()) {
            // a complete flush cycle or replay a region open event
            if (LOG.isDebugEnabled()) {
                LOG.debug("Successfully triggered a flush of primary region replica " + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName() + " of region " + region.getRegionInfo().getEncodedName() + " Now waiting and blocking reads until observing a full flush cycle");
            }
            break;
        } else {
            if (response.hasWroteFlushWalMarker()) {
                if (response.getWroteFlushWalMarker()) {
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Successfully triggered an empty flush marker(memstore empty) of primary " + "region replica " + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName() + " of region " + region.getRegionInfo().getEncodedName() + " Now waiting and " + "blocking reads until observing a flush marker");
                    }
                    break;
                } else {
                    // closing or already flushing. Retry flush again after some sleep.
                    if (!counter.shouldRetry()) {
                        throw new IOException("Cannot cause primary to flush or drop a wal marker after " + "retries. Failing opening of this region replica " + region.getRegionInfo().getEncodedName());
                    }
                }
            } else {
                // nothing to do. Are we dealing with an old server?
                LOG.warn("Was not able to trigger a flush from primary region due to old server version? " + "Continuing to open the secondary region replica: " + region.getRegionInfo().getEncodedName());
                region.setReadsEnabled(true);
                break;
            }
        }
        try {
            counter.sleepUntilNextRetry();
        } catch (InterruptedException e) {
            throw new InterruptedIOException(e.getMessage());
        }
    }
}
Also used : TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) InterruptedIOException(java.io.InterruptedIOException) RetryCounterFactory(org.apache.hadoop.hbase.util.RetryCounterFactory) RetryCounter(org.apache.hadoop.hbase.util.RetryCounter) FlushRegionCallable(org.apache.hadoop.hbase.client.FlushRegionCallable) FlushRegionResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException)

Example 59 with InterruptedIOException

use of java.io.InterruptedIOException in project hbase by apache.

the class HTable method existsAll.

/**
   * {@inheritDoc}
   */
@Override
public boolean[] existsAll(final List<Get> gets) throws IOException {
    if (gets.isEmpty())
        return new boolean[] {};
    if (gets.size() == 1)
        return new boolean[] { exists(gets.get(0)) };
    ArrayList<Get> exists = new ArrayList<>(gets.size());
    for (Get g : gets) {
        Get ge = new Get(g);
        ge.setCheckExistenceOnly(true);
        exists.add(ge);
    }
    Object[] r1 = new Object[exists.size()];
    try {
        batch(exists, r1, readRpcTimeout);
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }
    // translate.
    boolean[] results = new boolean[r1.length];
    int i = 0;
    for (Object o : r1) {
        // batch ensures if there is a failure we get an exception instead
        results[i++] = ((Result) o).getExists();
    }
    return results;
}
Also used : InterruptedIOException(java.io.InterruptedIOException) ArrayList(java.util.ArrayList)

Example 60 with InterruptedIOException

use of java.io.InterruptedIOException in project hbase by apache.

the class HBaseAdmin method execProcedure.

@Override
public void execProcedure(String signature, String instance, Map<String, String> props) throws IOException {
    ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
    builder.setSignature(signature).setInstance(instance);
    for (Entry<String, String> entry : props.entrySet()) {
        NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey()).setValue(entry.getValue()).build();
        builder.addConfiguration(pair);
    }
    final ExecProcedureRequest request = ExecProcedureRequest.newBuilder().setProcedure(builder.build()).build();
    // run the procedure on the master
    ExecProcedureResponse response = executeCallable(new MasterCallable<ExecProcedureResponse>(getConnection(), getRpcControllerFactory()) {

        @Override
        protected ExecProcedureResponse rpcCall() throws Exception {
            return master.execProcedure(getRpcController(), request);
        }
    });
    long start = EnvironmentEdgeManager.currentTime();
    long max = response.getExpectedTimeout();
    long maxPauseTime = max / this.numRetries;
    int tries = 0;
    LOG.debug("Waiting a max of " + max + " ms for procedure '" + signature + " : " + instance + "'' to complete. (max " + maxPauseTime + " ms per retry)");
    boolean done = false;
    while (tries == 0 || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) {
        try {
            // sleep a backoff <= pauseTime amount
            long sleep = getPauseTime(tries++);
            sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
            LOG.debug("(#" + tries + ") Sleeping: " + sleep + "ms while waiting for procedure completion.");
            Thread.sleep(sleep);
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException("Interrupted").initCause(e);
        }
        LOG.debug("Getting current status of procedure from master...");
        done = isProcedureFinished(signature, instance, props);
    }
    if (!done) {
        throw new IOException("Procedure '" + signature + " : " + instance + "' wasn't completed in expectedTime:" + max + " ms");
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) TimeoutIOException(org.apache.hadoop.hbase.exceptions.TimeoutIOException) ExecProcedureResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse) RestoreSnapshotException(org.apache.hadoop.hbase.snapshot.RestoreSnapshotException) SnapshotCreationException(org.apache.hadoop.hbase.snapshot.SnapshotCreationException) InterruptedIOException(java.io.InterruptedIOException) ZooKeeperConnectionException(org.apache.hadoop.hbase.ZooKeeperConnectionException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) TableNotDisabledException(org.apache.hadoop.hbase.TableNotDisabledException) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) HBaseSnapshotException(org.apache.hadoop.hbase.snapshot.HBaseSnapshotException) TimeoutException(java.util.concurrent.TimeoutException) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) ReplicationException(org.apache.hadoop.hbase.replication.ReplicationException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) UnknownRegionException(org.apache.hadoop.hbase.UnknownRegionException) FailedLogCloseException(org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException) MasterNotRunningException(org.apache.hadoop.hbase.MasterNotRunningException) TimeoutIOException(org.apache.hadoop.hbase.exceptions.TimeoutIOException) NamespaceNotFoundException(org.apache.hadoop.hbase.NamespaceNotFoundException) TableExistsException(org.apache.hadoop.hbase.TableExistsException) KeeperException(org.apache.zookeeper.KeeperException) UnknownSnapshotException(org.apache.hadoop.hbase.snapshot.UnknownSnapshotException) RemoteException(org.apache.hadoop.ipc.RemoteException) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) NameStringPair(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair) ProcedureDescription(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription) ExecProcedureRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest)

Aggregations

InterruptedIOException (java.io.InterruptedIOException)286 IOException (java.io.IOException)195 Test (org.junit.Test)40 Socket (java.net.Socket)28 ArrayList (java.util.ArrayList)27 InputStream (java.io.InputStream)23 ExecutionException (java.util.concurrent.ExecutionException)23 ConnectException (java.net.ConnectException)22 InetSocketAddress (java.net.InetSocketAddress)21 ByteBuffer (java.nio.ByteBuffer)21 Path (org.apache.hadoop.fs.Path)20 NoRouteToHostException (java.net.NoRouteToHostException)19 EOFException (java.io.EOFException)17 OutputStream (java.io.OutputStream)17 SocketTimeoutException (java.net.SocketTimeoutException)17 ServletException (javax.servlet.ServletException)17 CountDownLatch (java.util.concurrent.CountDownLatch)16 SocketException (java.net.SocketException)15 HttpServletRequest (javax.servlet.http.HttpServletRequest)15 HttpServletResponse (javax.servlet.http.HttpServletResponse)15