use of org.apache.hadoop.hbase.rest.client.Cluster in project hbase by apache.
the class TestVersionResource method setUpBeforeClass.
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster();
REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
context = JAXBContext.newInstance(VersionModel.class, StorageClusterVersionModel.class);
}
use of org.apache.hadoop.hbase.rest.client.Cluster in project hbase by apache.
the class TestScannersWithFilters method setUpBeforeClass.
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster(3);
REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class, ScannerModel.class);
marshaller = context.createMarshaller();
unmarshaller = context.createUnmarshaller();
client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
Admin admin = TEST_UTIL.getAdmin();
if (!admin.tableExists(TABLE)) {
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(FAMILIES[0]));
htd.addFamily(new HColumnDescriptor(FAMILIES[1]));
admin.createTable(htd);
Table table = TEST_UTIL.getConnection().getTable(TABLE);
// Insert first half
for (byte[] ROW : ROWS_ONE) {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for (byte[] QUALIFIER : QUALIFIERS_ONE) {
p.addColumn(FAMILIES[0], QUALIFIER, VALUES[0]);
}
table.put(p);
}
for (byte[] ROW : ROWS_TWO) {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for (byte[] QUALIFIER : QUALIFIERS_TWO) {
p.addColumn(FAMILIES[1], QUALIFIER, VALUES[1]);
}
table.put(p);
}
// Insert second half (reverse families)
for (byte[] ROW : ROWS_ONE) {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for (byte[] QUALIFIER : QUALIFIERS_ONE) {
p.addColumn(FAMILIES[1], QUALIFIER, VALUES[0]);
}
table.put(p);
}
for (byte[] ROW : ROWS_TWO) {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for (byte[] QUALIFIER : QUALIFIERS_TWO) {
p.addColumn(FAMILIES[0], QUALIFIER, VALUES[1]);
}
table.put(p);
}
// Delete the second qualifier from all rows and families
for (byte[] ROW : ROWS_ONE) {
Delete d = new Delete(ROW);
d.addColumns(FAMILIES[0], QUALIFIERS_ONE[1]);
d.addColumns(FAMILIES[1], QUALIFIERS_ONE[1]);
table.delete(d);
}
for (byte[] ROW : ROWS_TWO) {
Delete d = new Delete(ROW);
d.addColumns(FAMILIES[0], QUALIFIERS_TWO[1]);
d.addColumns(FAMILIES[1], QUALIFIERS_TWO[1]);
table.delete(d);
}
colsPerRow -= 2;
// Delete the second rows from both groups, one column at a time
for (byte[] QUALIFIER : QUALIFIERS_ONE) {
Delete d = new Delete(ROWS_ONE[1]);
d.addColumns(FAMILIES[0], QUALIFIER);
d.addColumns(FAMILIES[1], QUALIFIER);
table.delete(d);
}
for (byte[] QUALIFIER : QUALIFIERS_TWO) {
Delete d = new Delete(ROWS_TWO[1]);
d.addColumns(FAMILIES[0], QUALIFIER);
d.addColumns(FAMILIES[1], QUALIFIER);
table.delete(d);
}
numRows -= 2;
table.close();
}
}
use of org.apache.hadoop.hbase.rest.client.Cluster in project hbase by apache.
the class TestMultiRowResource method setUpBeforeClass.
@BeforeClass
public static void setUpBeforeClass() throws Exception {
conf = TEST_UTIL.getConfiguration();
conf.setBoolean(RESTServer.REST_CSRF_ENABLED_KEY, csrfEnabled);
extraHdr = new BasicHeader(RESTServer.REST_CSRF_CUSTOM_HEADER_DEFAULT, "");
TEST_UTIL.startMiniCluster();
REST_TEST_UTIL.startServletContainer(conf);
context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class);
marshaller = context.createMarshaller();
unmarshaller = context.createUnmarshaller();
client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
Admin admin = TEST_UTIL.getAdmin();
if (admin.tableExists(TABLE)) {
return;
}
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(CFA));
htd.addFamily(new HColumnDescriptor(CFB));
admin.createTable(htd);
}
use of org.apache.hadoop.hbase.rest.client.Cluster in project hbase by apache.
the class TestNamespacesInstanceResource method setUpBeforeClass.
@BeforeClass
public static void setUpBeforeClass() throws Exception {
conf = TEST_UTIL.getConfiguration();
TEST_UTIL.startMiniCluster();
REST_TEST_UTIL.startServletContainer(conf);
client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
testNamespacesInstanceModel = new TestNamespacesInstanceModel();
context = JAXBContext.newInstance(NamespacesInstanceModel.class, TableListModel.class);
jsonMapper = new JacksonJaxbJsonProvider().locateMapper(NamespacesInstanceModel.class, MediaType.APPLICATION_JSON_TYPE);
NAMESPACE1_PROPS.put("key1", "value1");
NAMESPACE2_PROPS.put("key2a", "value2a");
NAMESPACE2_PROPS.put("key2b", "value2b");
NAMESPACE3_PROPS.put("key3", "value3");
NAMESPACE4_PROPS.put("key4a", "value4a");
NAMESPACE4_PROPS.put("key4b", "value4b");
}
use of org.apache.hadoop.hbase.rest.client.Cluster in project hbase by apache.
the class TestNamespacesResource method setUpBeforeClass.
@BeforeClass
public static void setUpBeforeClass() throws Exception {
conf = TEST_UTIL.getConfiguration();
TEST_UTIL.startMiniCluster();
REST_TEST_UTIL.startServletContainer(conf);
client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
testNamespacesModel = new TestNamespacesModel();
context = JAXBContext.newInstance(NamespacesModel.class);
}
Aggregations