use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class IntegrationTestDDLMasterFailover method verifyNamespaces.
protected void verifyNamespaces() throws IOException {
Connection connection = getConnection();
Admin admin = connection.getAdmin();
// iterating concurrent map
for (String nsName : namespaceMap.keySet()) {
try {
Assert.assertTrue("Namespace: " + nsName + " in namespaceMap does not exist", admin.getNamespaceDescriptor(nsName) != null);
} catch (NamespaceNotFoundException nsnfe) {
Assert.fail("Namespace: " + nsName + " in namespaceMap does not exist: " + nsnfe.getMessage());
}
}
admin.close();
}
use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class RegionsResource method get.
@GET
@Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF })
public Response get(@Context final UriInfo uriInfo) {
if (LOG.isTraceEnabled()) {
LOG.trace("GET " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
try {
TableName tableName = TableName.valueOf(tableResource.getName());
TableInfoModel model = new TableInfoModel(tableName.getNameAsString());
Connection connection = ConnectionFactory.createConnection(servlet.getConfiguration());
@SuppressWarnings("deprecation") Map<HRegionInfo, ServerName> regions = MetaTableAccessor.allTableRegions(connection, tableName);
connection.close();
for (Map.Entry<HRegionInfo, ServerName> e : regions.entrySet()) {
HRegionInfo hri = e.getKey();
ServerName addr = e.getValue();
model.add(new TableRegionModel(tableName.getNameAsString(), hri.getRegionId(), hri.getStartKey(), hri.getEndKey(), addr.getHostAndPort()));
}
ResponseBuilder response = Response.ok(model);
response.cacheControl(cacheControl);
servlet.getMetrics().incrementSucessfulGetRequests(1);
return response.build();
} catch (TableNotFoundException e) {
servlet.getMetrics().incrementFailedGetRequests(1);
return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT).entity("Not found" + CRLF).build();
} catch (IOException e) {
servlet.getMetrics().incrementFailedGetRequests(1);
return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT).entity("Unavailable" + CRLF).build();
}
}
use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class PartitionedMobCompactor method compactMobFiles.
/**
* Compacts the selected small mob files and all the del files.
* @param request The compaction request.
* @return The paths of new mob files after compactions.
* @throws IOException if IO failure is encountered
*/
protected List<Path> compactMobFiles(final PartitionedMobCompactionRequest request) throws IOException {
Collection<CompactionPartition> partitions = request.compactionPartitions;
if (partitions == null || partitions.isEmpty()) {
LOG.info("No partitions of mob files");
return Collections.emptyList();
}
List<Path> paths = new ArrayList<>();
final Connection c = ConnectionFactory.createConnection(conf);
final Table table = c.getTable(tableName);
try {
Map<CompactionPartitionId, Future<List<Path>>> results = new HashMap<>();
// compact the mob files by partitions in parallel.
for (final CompactionPartition partition : partitions) {
// How to efficiently come up a list of delFiles for one partition?
// Search the delPartitions and collect all the delFiles for the partition
// One optimization can do is that if there is no del file, we do not need to
// come up with startKey/endKey.
List<StoreFile> delFiles = getListOfDelFilesForPartition(partition, request.getDelPartitions());
results.put(partition.getPartitionId(), pool.submit(new Callable<List<Path>>() {
@Override
public List<Path> call() throws Exception {
LOG.info("Compacting mob files for partition " + partition.getPartitionId());
return compactMobFilePartition(request, partition, delFiles, c, table);
}
}));
}
// compact the partitions in parallel.
List<CompactionPartitionId> failedPartitions = new ArrayList<>();
for (Entry<CompactionPartitionId, Future<List<Path>>> result : results.entrySet()) {
try {
paths.addAll(result.getValue().get());
} catch (Exception e) {
// just log the error
LOG.error("Failed to compact the partition " + result.getKey(), e);
failedPartitions.add(result.getKey());
}
}
if (!failedPartitions.isEmpty()) {
// if any partition fails in the compaction, directly throw an exception.
throw new IOException("Failed to compact the partitions " + failedPartitions);
}
} finally {
try {
table.close();
} catch (IOException e) {
LOG.error("Failed to close the Table", e);
}
}
return paths;
}
use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class ExpiredMobFileCleaner method run.
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "REC_CATCH_EXCEPTION", justification = "Intentional")
public int run(String[] args) throws Exception {
if (args.length != 2) {
printUsage();
return 1;
}
String tableName = args[0];
String familyName = args[1];
TableName tn = TableName.valueOf(tableName);
HBaseAdmin.available(getConf());
Connection connection = ConnectionFactory.createConnection(getConf());
Admin admin = connection.getAdmin();
try {
HTableDescriptor htd = admin.getTableDescriptor(tn);
HColumnDescriptor family = htd.getFamily(Bytes.toBytes(familyName));
if (family == null || !family.isMobEnabled()) {
throw new IOException("Column family " + familyName + " is not a MOB column family");
}
if (family.getMinVersions() > 0) {
throw new IOException("The minVersions of the column family is not 0, could not be handled by this cleaner");
}
cleanExpiredMobFiles(tableName, family);
return 0;
} finally {
try {
admin.close();
} catch (IOException e) {
LOG.error("Failed to close the HBaseAdmin.", e);
}
try {
connection.close();
} catch (IOException e) {
LOG.error("Failed to close the connection.", e);
}
}
}
use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class RestoreSnapshotProcedure method updateMETA.
/**
* Apply changes to hbase:meta
* @param env MasterProcedureEnv
* @throws IOException
**/
private void updateMETA(final MasterProcedureEnv env) throws IOException {
try {
Connection conn = env.getMasterServices().getConnection();
// 1. Forces all the RegionStates to be offline
//
// The AssignmentManager keeps all the region states around
// with no possibility to remove them, until the master is restarted.
// This means that a region marked as SPLIT before the restore will never be assigned again.
// To avoid having all states around all the regions are switched to the OFFLINE state,
// which is the same state that the regions will be after a delete table.
forceRegionsOffline(env, regionsToAdd);
forceRegionsOffline(env, regionsToRestore);
forceRegionsOffline(env, regionsToRemove);
getMonitorStatus().setStatus("Preparing to restore each region");
// that are not correct after the restore.
if (regionsToRemove != null) {
MetaTableAccessor.deleteRegions(conn, regionsToRemove);
}
// in the snapshot folder.
if (regionsToAdd != null) {
MetaTableAccessor.addRegionsToMeta(conn, regionsToAdd, modifiedHTableDescriptor.getRegionReplication());
}
if (regionsToRestore != null) {
MetaTableAccessor.overwriteRegions(conn, regionsToRestore, modifiedHTableDescriptor.getRegionReplication());
}
RestoreSnapshotHelper.RestoreMetaChanges metaChanges = new RestoreSnapshotHelper.RestoreMetaChanges(modifiedHTableDescriptor, parentsToChildrenPairMap);
metaChanges.updateMetaParentRegions(conn, regionsToAdd);
// At this point the restore is complete.
LOG.info("Restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) + " on table=" + getTableName() + " completed!");
} catch (IOException e) {
final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher();
String msg = "restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) + " failed in meta update. Try re-running the restore command.";
LOG.error(msg, e);
monitorException.receive(new ForeignException(env.getMasterServices().getServerName().toString(), e));
throw new IOException(msg, e);
}
monitorStatus.markComplete("Restore snapshot '" + snapshot.getName() + "'!");
MetricsSnapshot metricsSnapshot = new MetricsSnapshot();
metricsSnapshot.addSnapshotRestore(monitorStatus.getCompletionTimestamp() - monitorStatus.getStartTime());
}
Aggregations