use of org.apache.hadoop.hbase.client.TableDescriptorBuilder in project hbase by apache.
the class TestMultiRowResource method setUpBeforeClass.
@BeforeClass
public static void setUpBeforeClass() throws Exception {
conf = TEST_UTIL.getConfiguration();
conf.setBoolean(RESTServer.REST_CSRF_ENABLED_KEY, csrfEnabled);
if (csrfEnabled) {
conf.set(RESTServer.REST_CSRF_BROWSER_USERAGENTS_REGEX_KEY, ".*");
}
extraHdr = new BasicHeader(RESTServer.REST_CSRF_CUSTOM_HEADER_DEFAULT, "");
TEST_UTIL.startMiniCluster();
REST_TEST_UTIL.startServletContainer(conf);
context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class);
marshaller = context.createMarshaller();
unmarshaller = context.createUnmarshaller();
client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
Admin admin = TEST_UTIL.getAdmin();
if (admin.tableExists(TABLE)) {
return;
}
TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TABLE);
ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build();
tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor);
columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build();
tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor);
admin.createTable(tableDescriptorBuilder.build());
}
use of org.apache.hadoop.hbase.client.TableDescriptorBuilder in project hbase by apache.
the class TableSchemaModel method getTableDescriptor.
/**
* @return a table descriptor
*/
@JsonIgnore
public TableDescriptor getTableDescriptor() {
TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TableName.valueOf(getName()));
for (Map.Entry<QName, Object> e : getAny().entrySet()) {
tableDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
for (ColumnSchemaModel column : getColumns()) {
ColumnFamilyDescriptorBuilder cfdb = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(column.getName()));
for (Map.Entry<QName, Object> e : column.getAny().entrySet()) {
cfdb.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
tableDescriptorBuilder.setColumnFamily(cfdb.build());
}
return tableDescriptorBuilder.build();
}
use of org.apache.hadoop.hbase.client.TableDescriptorBuilder in project hbase by apache.
the class CloneSnapshotProcedure method updateTableDescriptorWithSFT.
/**
* If a StoreFileTracker is specified we strip the TableDescriptor from previous SFT config
* and set the specified SFT on the table level
*/
private void updateTableDescriptorWithSFT() {
if (StringUtils.isEmpty(customSFT)) {
return;
}
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableDescriptor);
builder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, customSFT);
for (ColumnFamilyDescriptor family : tableDescriptor.getColumnFamilies()) {
ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family);
cfBuilder.setConfiguration(StoreFileTrackerFactory.TRACKER_IMPL, null);
cfBuilder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, null);
builder.modifyColumnFamily(cfBuilder.build());
}
tableDescriptor = builder.build();
}
use of org.apache.hadoop.hbase.client.TableDescriptorBuilder in project hbase by apache.
the class ModifyTableStoreFileTrackerProcedure method createFinishTableDescriptor.
@Override
protected TableDescriptor createFinishTableDescriptor(TableDescriptor current) {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(current);
finish(builder::setValue, builder::removeValue);
return builder.build();
}
use of org.apache.hadoop.hbase.client.TableDescriptorBuilder in project hbase by apache.
the class TestHFileOutputFormat2 method testColumnFamilySettings.
/**
* Test that {@link HFileOutputFormat2} RecordWriter uses compression and
* bloom filter settings from the column family descriptor
*/
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
@Test
public void testColumnFamilySettings() throws Exception {
Configuration conf = new Configuration(this.util.getConfiguration());
RecordWriter<ImmutableBytesWritable, Cell> writer = null;
TaskAttemptContext context = null;
Path dir = util.getDataTestDir("testColumnFamilySettings");
// Setup table descriptor
Table table = Mockito.mock(Table.class);
RegionLocator regionLocator = Mockito.mock(RegionLocator.class);
TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]);
Mockito.doReturn(tableDescriptorBuilder.build()).when(table).getDescriptor();
for (ColumnFamilyDescriptor hcd : HBaseTestingUtil.generateColumnDescriptors()) {
tableDescriptorBuilder.setColumnFamily(hcd);
}
// set up the table to return some mock keys
setupMockStartKeys(regionLocator);
try {
// partial map red setup to get an operational writer for testing
// We turn off the sequence file compression, because DefaultCodec
// pollutes the GZip codec pool with an incompatible compressor.
conf.set("io.seqfile.compression.type", "NONE");
conf.set("hbase.fs.tmp.dir", dir.toString());
// turn locality off to eliminate getRegionLocation fail-and-retry time when writing kvs
conf.setBoolean(HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY, false);
Job job = new Job(conf, "testLocalMRIncrementalLoad");
job.setWorkingDirectory(util.getDataTestDirOnTestFS("testColumnFamilySettings"));
setupRandomGeneratorMapper(job, false);
HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator);
FileOutputFormat.setOutputPath(job, dir);
context = createTestTaskAttemptContext(job);
HFileOutputFormat2 hof = new HFileOutputFormat2();
writer = hof.getRecordWriter(context);
// write out random rows
writeRandomKeyValues(writer, context, tableDescriptorBuilder.build().getColumnFamilyNames(), ROWSPERSPLIT);
writer.close(context);
// Make sure that a directory was created for every CF
FileSystem fs = dir.getFileSystem(conf);
// commit so that the filesystem has one directory per column family
hof.getOutputCommitter(context).commitTask(context);
hof.getOutputCommitter(context).commitJob(context);
FileStatus[] families = CommonFSUtils.listStatus(fs, dir, new FSUtils.FamilyDirFilter(fs));
assertEquals(tableDescriptorBuilder.build().getColumnFamilies().length, families.length);
for (FileStatus f : families) {
String familyStr = f.getPath().getName();
ColumnFamilyDescriptor hcd = tableDescriptorBuilder.build().getColumnFamily(Bytes.toBytes(familyStr));
// verify that the compression on this file matches the configured
// compression
Path dataFilePath = fs.listStatus(f.getPath())[0].getPath();
Reader reader = HFile.createReader(fs, dataFilePath, new CacheConfig(conf), true, conf);
Map<byte[], byte[]> fileInfo = reader.getHFileInfo();
byte[] bloomFilter = fileInfo.get(BLOOM_FILTER_TYPE_KEY);
if (bloomFilter == null)
bloomFilter = Bytes.toBytes("NONE");
assertEquals("Incorrect bloom filter used for column family " + familyStr + "(reader: " + reader + ")", hcd.getBloomFilterType(), BloomType.valueOf(Bytes.toString(bloomFilter)));
assertEquals("Incorrect compression used for column family " + familyStr + "(reader: " + reader + ")", hcd.getCompressionType(), reader.getFileContext().getCompression());
}
} finally {
dir.getFileSystem(conf).delete(dir, true);
}
}
Aggregations