use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class ChangeCompressionAction method perform.
@Override
public void perform() throws Exception {
HTableDescriptor tableDescriptor = admin.getTableDescriptor(tableName);
HColumnDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies();
if (columnDescriptors == null || columnDescriptors.length == 0) {
return;
}
// Possible compression algorithms. If an algorithm is not supported,
// modifyTable will fail, so there is no harm.
Algorithm[] possibleAlgos = Algorithm.values();
// Since not every compression algorithm is supported,
// let's use the same algorithm for all column families.
// If an unsupported compression algorithm is chosen, pick a different one.
// This is to work around the issue that modifyTable() does not throw remote
// exception.
Algorithm algo;
do {
algo = possibleAlgos[random.nextInt(possibleAlgos.length)];
try {
Compressor c = algo.getCompressor();
// call returnCompressor() to release the Compressor
algo.returnCompressor(c);
break;
} catch (Throwable t) {
LOG.info("Performing action: Changing compression algorithms to " + algo + " is not supported, pick another one");
}
} while (true);
LOG.debug("Performing action: Changing compression algorithms on " + tableName.getNameAsString() + " to " + algo);
for (HColumnDescriptor descriptor : columnDescriptors) {
if (random.nextBoolean()) {
descriptor.setCompactionCompressionType(algo);
} else {
descriptor.setCompressionType(algo);
}
}
// Don't try the modify if we're stopping
if (context.isStopping()) {
return;
}
admin.modifyTable(tableName, tableDescriptor);
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class SchemaResource method update.
private Response update(final TableName name, final TableSchemaModel model, final UriInfo uriInfo, final Admin admin) {
if (servlet.isReadOnly()) {
return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).entity("Forbidden" + CRLF).build();
}
try {
HTableDescriptor htd = admin.getTableDescriptor(name);
admin.disableTable(name);
try {
for (ColumnSchemaModel family : model.getColumns()) {
HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
for (Map.Entry<QName, Object> e : family.getAny().entrySet()) {
hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
if (htd.hasFamily(hcd.getName())) {
admin.modifyColumnFamily(name, hcd);
} else {
admin.addColumnFamily(name, hcd);
}
}
} catch (IOException e) {
return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT).entity("Unavailable" + CRLF).build();
} finally {
admin.enableTable(TableName.valueOf(tableResource.getName()));
}
servlet.getMetrics().incrementSucessfulPutRequests(1);
return Response.ok().build();
} catch (Exception e) {
servlet.getMetrics().incrementFailedPutRequests(1);
return processException(e);
}
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class SchemaResource method replace.
private Response replace(final TableName name, final TableSchemaModel model, final UriInfo uriInfo, final Admin admin) {
if (servlet.isReadOnly()) {
return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).entity("Forbidden" + CRLF).build();
}
try {
HTableDescriptor htd = new HTableDescriptor(name);
for (Map.Entry<QName, Object> e : model.getAny().entrySet()) {
htd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
for (ColumnSchemaModel family : model.getColumns()) {
HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
for (Map.Entry<QName, Object> e : family.getAny().entrySet()) {
hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
htd.addFamily(hcd);
}
if (admin.tableExists(name)) {
admin.disableTable(name);
admin.modifyTable(name, htd);
admin.enableTable(name);
servlet.getMetrics().incrementSucessfulPutRequests(1);
} else
try {
admin.createTable(htd);
servlet.getMetrics().incrementSucessfulPutRequests(1);
} catch (TableExistsException e) {
// race, someone else created a table with the same name
return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT).entity("Not modified" + CRLF).build();
}
return Response.created(uriInfo.getAbsolutePath()).build();
} catch (Exception e) {
servlet.getMetrics().incrementFailedPutRequests(1);
return processException(e);
}
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class TableSchemaModel method getTableDescriptor.
/**
* @return a table descriptor
*/
@JsonIgnore
public HTableDescriptor getTableDescriptor() {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(getName()));
for (Map.Entry<QName, Object> e : getAny().entrySet()) {
htd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
for (ColumnSchemaModel column : getColumns()) {
HColumnDescriptor hcd = new HColumnDescriptor(column.getName());
for (Map.Entry<QName, Object> e : column.getAny().entrySet()) {
hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
htd.addFamily(hcd);
}
return htd;
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class ExpiredMobFileCleaner method run.
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "REC_CATCH_EXCEPTION", justification = "Intentional")
public int run(String[] args) throws Exception {
if (args.length != 2) {
printUsage();
return 1;
}
String tableName = args[0];
String familyName = args[1];
TableName tn = TableName.valueOf(tableName);
HBaseAdmin.available(getConf());
Connection connection = ConnectionFactory.createConnection(getConf());
Admin admin = connection.getAdmin();
try {
HTableDescriptor htd = admin.getTableDescriptor(tn);
HColumnDescriptor family = htd.getFamily(Bytes.toBytes(familyName));
if (family == null || !family.isMobEnabled()) {
throw new IOException("Column family " + familyName + " is not a MOB column family");
}
if (family.getMinVersions() > 0) {
throw new IOException("The minVersions of the column family is not 0, could not be handled by this cleaner");
}
cleanExpiredMobFiles(tableName, family);
return 0;
} finally {
try {
admin.close();
} catch (IOException e) {
LOG.error("Failed to close the HBaseAdmin.", e);
}
try {
connection.close();
} catch (IOException e) {
LOG.error("Failed to close the connection.", e);
}
}
}
Aggregations