use of org.apache.hadoop.hbase.util.Bytes in project hbase by apache.
the class HTableDescriptor method addCoprocessorToMap.
/**
* Add coprocessor to values Map
* @param specStr The Coprocessor specification all in in one String formatted so matches
* {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
* @return Returns <code>this</code>
*/
private HTableDescriptor addCoprocessorToMap(final String specStr) {
if (specStr == null)
return this;
// generate a coprocessor key
int maxCoprocessorNumber = 0;
Matcher keyMatcher;
for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
if (!keyMatcher.matches()) {
continue;
}
maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber);
}
maxCoprocessorNumber++;
String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
this.values.put(new Bytes(Bytes.toBytes(key)), new Bytes(Bytes.toBytes(specStr)));
return this;
}
use of org.apache.hadoop.hbase.util.Bytes in project hbase by apache.
the class CompoundConfiguration method addBytesMap.
/**
* Add Bytes map to config list. This map is generally
* created by HTableDescriptor or HColumnDescriptor, but can be abstractly
* used. The added configuration overrides the previous ones if there are
* name collisions.
*
* @param map
* Bytes map
* @return this, for builder pattern
*/
public CompoundConfiguration addBytesMap(final Map<Bytes, Bytes> map) {
freezeMutableConf();
// put new map at the front of the list (top priority)
this.configs.add(0, new ImmutableConfigMap() {
private final Map<Bytes, Bytes> m = map;
@Override
public Iterator<Map.Entry<String, String>> iterator() {
Map<String, String> ret = new HashMap<>();
for (Map.Entry<Bytes, Bytes> entry : map.entrySet()) {
String key = Bytes.toString(entry.getKey().get());
String val = entry.getValue() == null ? null : Bytes.toString(entry.getValue().get());
ret.put(key, val);
}
return ret.entrySet().iterator();
}
@Override
public String get(String key) {
Bytes ibw = new Bytes(Bytes.toBytes(key));
if (!m.containsKey(ibw))
return null;
Bytes value = m.get(ibw);
if (value == null || value.get() == null)
return null;
return Bytes.toString(value.get());
}
@Override
public String getRaw(String key) {
return get(key);
}
@Override
public Class<?> getClassByName(String name) throws ClassNotFoundException {
return null;
}
@Override
public int size() {
return m.size();
}
@Override
public String toString() {
return m.toString();
}
});
return this;
}
use of org.apache.hadoop.hbase.util.Bytes in project hbase by apache.
the class Constraints method getConstraints.
/**
* Get the constraints stored in the table descriptor
*
* @param desc
* To read from
* @param classloader
* To use when loading classes. If a special classloader is used on a
* region, for instance, then that should be the classloader used to
* load the constraints. This could also apply to unit-testing
* situation, where want to ensure that class is reloaded or not.
* @return List of configured {@link Constraint Constraints}
* @throws IOException
* if any part of reading/arguments fails
*/
static List<? extends Constraint> getConstraints(HTableDescriptor desc, ClassLoader classloader) throws IOException {
List<Constraint> constraints = new ArrayList<>();
// loop through all the key, values looking for constraints
for (Map.Entry<Bytes, Bytes> e : desc.getValues().entrySet()) {
// read out the constraint
String key = Bytes.toString(e.getKey().get()).trim();
String[] className = CONSTRAINT_HTD_ATTR_KEY_PATTERN.split(key);
if (className.length == 2) {
key = className[1];
if (LOG.isDebugEnabled()) {
LOG.debug("Loading constraint:" + key);
}
// read in the rest of the constraint
Configuration conf;
try {
conf = readConfiguration(e.getValue().get());
} catch (IOException e1) {
// long that we don't have a valid configuration stored, and move on.
LOG.warn("Corrupted configuration found for key:" + key + ", skipping it.");
continue;
}
// if it is not enabled, skip it
if (!conf.getBoolean(ENABLED_KEY, false)) {
if (LOG.isDebugEnabled())
LOG.debug("Constraint: " + key + " is DISABLED - skipping it");
// go to the next constraint
continue;
}
try {
// add the constraint, now that we expect it to be valid.
Class<? extends Constraint> clazz = classloader.loadClass(key).asSubclass(Constraint.class);
Constraint constraint = clazz.newInstance();
constraint.setConf(conf);
constraints.add(constraint);
} catch (ClassNotFoundException e1) {
throw new IOException(e1);
} catch (InstantiationException e1) {
throw new IOException(e1);
} catch (IllegalAccessException e1) {
throw new IOException(e1);
}
}
}
// sort them, based on the priorities
Collections.sort(constraints, constraintComparator);
return constraints;
}
use of org.apache.hadoop.hbase.util.Bytes in project hbase by apache.
the class Constraints method remove.
/**
* Remove all {@link Constraint Constraints} that have been added to the table
* and turn off the constraint processing.
* <p>
* All {@link Configuration Configurations} and their associated
* {@link Constraint} are removed.
*
* @param desc
* {@link HTableDescriptor} to remove {@link Constraint Constraints}
* from.
*/
public static void remove(HTableDescriptor desc) {
// disable constraints
disable(desc);
// remove all the constraint settings
List<Bytes> keys = new ArrayList<>();
// loop through all the key, values looking for constraints
for (Map.Entry<Bytes, Bytes> e : desc.getValues().entrySet()) {
String key = Bytes.toString((e.getKey().get()));
String[] className = CONSTRAINT_HTD_ATTR_KEY_PATTERN.split(key);
if (className.length == 2) {
keys.add(e.getKey());
}
}
// now remove all the keys we found
for (Bytes key : keys) {
desc.remove(key);
}
}
use of org.apache.hadoop.hbase.util.Bytes in project hbase by apache.
the class ProtobufUtil method convertToTableSchema.
/**
* Converts an HTableDescriptor to TableSchema
* @param htd the HTableDescriptor
* @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
*/
public static TableSchema convertToTableSchema(HTableDescriptor htd) {
TableSchema.Builder builder = TableSchema.newBuilder();
builder.setTableName(toProtoTableName(htd.getTableName()));
for (Map.Entry<Bytes, Bytes> e : htd.getValues().entrySet()) {
BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
aBuilder.setFirst(UnsafeByteOperations.unsafeWrap(e.getKey().get()));
aBuilder.setSecond(UnsafeByteOperations.unsafeWrap(e.getValue().get()));
builder.addAttributes(aBuilder.build());
}
for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
builder.addColumnFamilies(convertToColumnFamilySchema(hcd));
}
for (Map.Entry<String, String> e : htd.getConfiguration().entrySet()) {
NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
aBuilder.setName(e.getKey());
aBuilder.setValue(e.getValue());
builder.addConfiguration(aBuilder.build());
}
return builder.build();
}
Aggregations