Search in sources :

Example 21 with Connection

use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.

the class IntegrationTestDDLMasterFailover method verifyNamespaces.

protected void verifyNamespaces() throws IOException {
    Connection connection = getConnection();
    Admin admin = connection.getAdmin();
    // iterating concurrent map
    for (String nsName : namespaceMap.keySet()) {
        try {
            Assert.assertTrue("Namespace: " + nsName + " in namespaceMap does not exist", admin.getNamespaceDescriptor(nsName) != null);
        } catch (NamespaceNotFoundException nsnfe) {
            Assert.fail("Namespace: " + nsName + " in namespaceMap does not exist: " + nsnfe.getMessage());
        }
    }
    admin.close();
}
Also used : Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin)

Example 22 with Connection

use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.

the class RegionsResource method get.

@GET
@Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF })
public Response get(@Context final UriInfo uriInfo) {
    if (LOG.isTraceEnabled()) {
        LOG.trace("GET " + uriInfo.getAbsolutePath());
    }
    servlet.getMetrics().incrementRequests(1);
    try {
        TableName tableName = TableName.valueOf(tableResource.getName());
        TableInfoModel model = new TableInfoModel(tableName.getNameAsString());
        Connection connection = ConnectionFactory.createConnection(servlet.getConfiguration());
        @SuppressWarnings("deprecation") Map<HRegionInfo, ServerName> regions = MetaTableAccessor.allTableRegions(connection, tableName);
        connection.close();
        for (Map.Entry<HRegionInfo, ServerName> e : regions.entrySet()) {
            HRegionInfo hri = e.getKey();
            ServerName addr = e.getValue();
            model.add(new TableRegionModel(tableName.getNameAsString(), hri.getRegionId(), hri.getStartKey(), hri.getEndKey(), addr.getHostAndPort()));
        }
        ResponseBuilder response = Response.ok(model);
        response.cacheControl(cacheControl);
        servlet.getMetrics().incrementSucessfulGetRequests(1);
        return response.build();
    } catch (TableNotFoundException e) {
        servlet.getMetrics().incrementFailedGetRequests(1);
        return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT).entity("Not found" + CRLF).build();
    } catch (IOException e) {
        servlet.getMetrics().incrementFailedGetRequests(1);
        return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT).entity("Unavailable" + CRLF).build();
    }
}
Also used : Connection(org.apache.hadoop.hbase.client.Connection) IOException(java.io.IOException) TableInfoModel(org.apache.hadoop.hbase.rest.model.TableInfoModel) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) ServerName(org.apache.hadoop.hbase.ServerName) TableRegionModel(org.apache.hadoop.hbase.rest.model.TableRegionModel) ResponseBuilder(javax.ws.rs.core.Response.ResponseBuilder) Map(java.util.Map) Produces(javax.ws.rs.Produces) GET(javax.ws.rs.GET)

Example 23 with Connection

use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.

the class PartitionedMobCompactor method compactMobFiles.

/**
   * Compacts the selected small mob files and all the del files.
   * @param request The compaction request.
   * @return The paths of new mob files after compactions.
   * @throws IOException if IO failure is encountered
   */
protected List<Path> compactMobFiles(final PartitionedMobCompactionRequest request) throws IOException {
    Collection<CompactionPartition> partitions = request.compactionPartitions;
    if (partitions == null || partitions.isEmpty()) {
        LOG.info("No partitions of mob files");
        return Collections.emptyList();
    }
    List<Path> paths = new ArrayList<>();
    final Connection c = ConnectionFactory.createConnection(conf);
    final Table table = c.getTable(tableName);
    try {
        Map<CompactionPartitionId, Future<List<Path>>> results = new HashMap<>();
        // compact the mob files by partitions in parallel.
        for (final CompactionPartition partition : partitions) {
            // How to efficiently come up a list of delFiles for one partition?
            // Search the delPartitions and collect all the delFiles for the partition
            // One optimization can do is that if there is no del file, we do not need to
            // come up with startKey/endKey.
            List<StoreFile> delFiles = getListOfDelFilesForPartition(partition, request.getDelPartitions());
            results.put(partition.getPartitionId(), pool.submit(new Callable<List<Path>>() {

                @Override
                public List<Path> call() throws Exception {
                    LOG.info("Compacting mob files for partition " + partition.getPartitionId());
                    return compactMobFilePartition(request, partition, delFiles, c, table);
                }
            }));
        }
        // compact the partitions in parallel.
        List<CompactionPartitionId> failedPartitions = new ArrayList<>();
        for (Entry<CompactionPartitionId, Future<List<Path>>> result : results.entrySet()) {
            try {
                paths.addAll(result.getValue().get());
            } catch (Exception e) {
                // just log the error
                LOG.error("Failed to compact the partition " + result.getKey(), e);
                failedPartitions.add(result.getKey());
            }
        }
        if (!failedPartitions.isEmpty()) {
            // if any partition fails in the compaction, directly throw an exception.
            throw new IOException("Failed to compact the partitions " + failedPartitions);
        }
    } finally {
        try {
            table.close();
        } catch (IOException e) {
            LOG.error("Failed to close the Table", e);
        }
    }
    return paths;
}
Also used : Path(org.apache.hadoop.fs.Path) CompactionPartition(org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartition) Table(org.apache.hadoop.hbase.client.Table) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Connection(org.apache.hadoop.hbase.client.Connection) CompactionPartitionId(org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartitionId) IOException(java.io.IOException) Callable(java.util.concurrent.Callable) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) Future(java.util.concurrent.Future)

Example 24 with Connection

use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.

the class ExpiredMobFileCleaner method run.

@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "REC_CATCH_EXCEPTION", justification = "Intentional")
public int run(String[] args) throws Exception {
    if (args.length != 2) {
        printUsage();
        return 1;
    }
    String tableName = args[0];
    String familyName = args[1];
    TableName tn = TableName.valueOf(tableName);
    HBaseAdmin.available(getConf());
    Connection connection = ConnectionFactory.createConnection(getConf());
    Admin admin = connection.getAdmin();
    try {
        HTableDescriptor htd = admin.getTableDescriptor(tn);
        HColumnDescriptor family = htd.getFamily(Bytes.toBytes(familyName));
        if (family == null || !family.isMobEnabled()) {
            throw new IOException("Column family " + familyName + " is not a MOB column family");
        }
        if (family.getMinVersions() > 0) {
            throw new IOException("The minVersions of the column family is not 0, could not be handled by this cleaner");
        }
        cleanExpiredMobFiles(tableName, family);
        return 0;
    } finally {
        try {
            admin.close();
        } catch (IOException e) {
            LOG.error("Failed to close the HBaseAdmin.", e);
        }
        try {
            connection.close();
        } catch (IOException e) {
            LOG.error("Failed to close the connection.", e);
        }
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Connection(org.apache.hadoop.hbase.client.Connection) IOException(java.io.IOException) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) Admin(org.apache.hadoop.hbase.client.Admin) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 25 with Connection

use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.

the class RestoreSnapshotProcedure method updateMETA.

/**
   * Apply changes to hbase:meta
   * @param env MasterProcedureEnv
   * @throws IOException
   **/
private void updateMETA(final MasterProcedureEnv env) throws IOException {
    try {
        Connection conn = env.getMasterServices().getConnection();
        // 1. Forces all the RegionStates to be offline
        //
        // The AssignmentManager keeps all the region states around
        // with no possibility to remove them, until the master is restarted.
        // This means that a region marked as SPLIT before the restore will never be assigned again.
        // To avoid having all states around all the regions are switched to the OFFLINE state,
        // which is the same state that the regions will be after a delete table.
        forceRegionsOffline(env, regionsToAdd);
        forceRegionsOffline(env, regionsToRestore);
        forceRegionsOffline(env, regionsToRemove);
        getMonitorStatus().setStatus("Preparing to restore each region");
        // that are not correct after the restore.
        if (regionsToRemove != null) {
            MetaTableAccessor.deleteRegions(conn, regionsToRemove);
        }
        // in the snapshot folder.
        if (regionsToAdd != null) {
            MetaTableAccessor.addRegionsToMeta(conn, regionsToAdd, modifiedHTableDescriptor.getRegionReplication());
        }
        if (regionsToRestore != null) {
            MetaTableAccessor.overwriteRegions(conn, regionsToRestore, modifiedHTableDescriptor.getRegionReplication());
        }
        RestoreSnapshotHelper.RestoreMetaChanges metaChanges = new RestoreSnapshotHelper.RestoreMetaChanges(modifiedHTableDescriptor, parentsToChildrenPairMap);
        metaChanges.updateMetaParentRegions(conn, regionsToAdd);
        // At this point the restore is complete.
        LOG.info("Restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) + " on table=" + getTableName() + " completed!");
    } catch (IOException e) {
        final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher();
        String msg = "restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) + " failed in meta update. Try re-running the restore command.";
        LOG.error(msg, e);
        monitorException.receive(new ForeignException(env.getMasterServices().getServerName().toString(), e));
        throw new IOException(msg, e);
    }
    monitorStatus.markComplete("Restore snapshot '" + snapshot.getName() + "'!");
    MetricsSnapshot metricsSnapshot = new MetricsSnapshot();
    metricsSnapshot.addSnapshotRestore(monitorStatus.getCompletionTimestamp() - monitorStatus.getStartTime());
}
Also used : MetricsSnapshot(org.apache.hadoop.hbase.master.MetricsSnapshot) ForeignException(org.apache.hadoop.hbase.errorhandling.ForeignException) Connection(org.apache.hadoop.hbase.client.Connection) RestoreSnapshotHelper(org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) ForeignExceptionDispatcher(org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher)

Aggregations

Connection (org.apache.hadoop.hbase.client.Connection)297 Table (org.apache.hadoop.hbase.client.Table)191 Test (org.junit.Test)171 IOException (java.io.IOException)113 TableName (org.apache.hadoop.hbase.TableName)103 Result (org.apache.hadoop.hbase.client.Result)101 Admin (org.apache.hadoop.hbase.client.Admin)86 Scan (org.apache.hadoop.hbase.client.Scan)79 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)75 PrivilegedExceptionAction (java.security.PrivilegedExceptionAction)71 Put (org.apache.hadoop.hbase.client.Put)68 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)57 Delete (org.apache.hadoop.hbase.client.Delete)55 Configuration (org.apache.hadoop.conf.Configuration)53 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)51 Get (org.apache.hadoop.hbase.client.Get)48 InterruptedIOException (java.io.InterruptedIOException)45 Cell (org.apache.hadoop.hbase.Cell)41 CellScanner (org.apache.hadoop.hbase.CellScanner)34 ArrayList (java.util.ArrayList)25