Search in sources :

Example 51 with ReadOnlyProps

use of org.apache.phoenix.util.ReadOnlyProps in project phoenix by apache.

the class UpdateCacheAcrossDifferentClientsIT method doSetup.

@BeforeClass
public static void doSetup() throws Exception {
    Map<String, String> props = Maps.newConcurrentMap();
    props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.TRUE.toString());
    props.put(QueryServices.MUTATE_BATCH_SIZE_ATTRIB, Integer.toString(3000));
    //When we run all tests together we are using global cluster(driver)
    //so to make drop work we need to re register driver with DROP_METADATA_ATTRIB property
    destroyDriver();
    setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
    //Registering real Phoenix driver to have multiple ConnectionQueryServices created across connections
    //so that metadata changes doesn't get propagated across connections
    DriverManager.registerDriver(PhoenixDriver.INSTANCE);
}
Also used : ReadOnlyProps(org.apache.phoenix.util.ReadOnlyProps) BeforeClass(org.junit.BeforeClass)

Example 52 with ReadOnlyProps

use of org.apache.phoenix.util.ReadOnlyProps in project phoenix by apache.

the class QueryDatabaseMetaDataIT method doSetup.

@BeforeClass
@Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
public static void doSetup() throws Exception {
    Map<String, String> props = getDefaultProps();
    props.put(QueryServices.DEFAULT_KEEP_DELETED_CELLS_ATTRIB, "true");
    setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
Also used : ReadOnlyProps(org.apache.phoenix.util.ReadOnlyProps) BeforeClass(org.junit.BeforeClass)

Example 53 with ReadOnlyProps

use of org.apache.phoenix.util.ReadOnlyProps in project phoenix by apache.

the class MetaDataRegionObserver method postOpen.

@Override
public void postOpen(ObserverContext<RegionCoprocessorEnvironment> e) {
    final RegionCoprocessorEnvironment env = e.getEnvironment();
    Runnable r = new Runnable() {

        @Override
        public void run() {
            HTableInterface metaTable = null;
            HTableInterface statsTable = null;
            try {
                ReadOnlyProps props = new ReadOnlyProps(env.getConfiguration().iterator());
                Thread.sleep(1000);
                metaTable = env.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, props));
                statsTable = env.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, props));
                if (UpgradeUtil.truncateStats(metaTable, statsTable)) {
                    LOG.info("Stats are successfully truncated for upgrade 4.7!!");
                }
            } catch (Exception exception) {
                LOG.warn("Exception while truncate stats..," + " please check and delete stats manually inorder to get proper result with old client!!");
                LOG.warn(exception.getStackTrace());
            } finally {
                try {
                    if (metaTable != null) {
                        metaTable.close();
                    }
                    if (statsTable != null) {
                        statsTable.close();
                    }
                } catch (IOException e) {
                }
            }
        }
    };
    Thread t = new Thread(r);
    t.setDaemon(true);
    t.start();
    if (!enableRebuildIndex) {
        LOG.info("Failure Index Rebuild is skipped by configuration.");
        return;
    }
    // turn off verbose deprecation logging
    Logger deprecationLogger = Logger.getLogger("org.apache.hadoop.conf.Configuration.deprecation");
    if (deprecationLogger != null) {
        deprecationLogger.setLevel(Level.WARN);
    }
    try {
        Class.forName(PhoenixDriver.class.getName());
        // starts index rebuild schedule work
        BuildIndexScheduleTask task = new BuildIndexScheduleTask(e.getEnvironment());
        // run scheduled task every 10 secs
        executor.scheduleAtFixedRate(task, 10000, rebuildIndexTimeInterval, TimeUnit.MILLISECONDS);
    } catch (ClassNotFoundException ex) {
        LOG.error("BuildIndexScheduleTask cannot start!", ex);
    }
}
Also used : ReadOnlyProps(org.apache.phoenix.util.ReadOnlyProps) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) PhoenixDriver(org.apache.phoenix.jdbc.PhoenixDriver) IOException(java.io.IOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Logger(org.apache.log4j.Logger) ServiceException(com.google.protobuf.ServiceException) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) SQLException(java.sql.SQLException) IOException(java.io.IOException)

Example 54 with ReadOnlyProps

use of org.apache.phoenix.util.ReadOnlyProps in project phoenix by apache.

the class PMetaDataImplTest method shouldAlwaysKeepAtLeastOneEntryEvenIfTooLarge.

@Test
public void shouldAlwaysKeepAtLeastOneEntryEvenIfTooLarge() throws Exception {
    TestTimeKeeper timeKeeper = new TestTimeKeeper();
    Map<String, String> props = Maps.newHashMapWithExpectedSize(2);
    props.put(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, "5");
    props.put(QueryServices.CLIENT_CACHE_ENCODING, "object");
    PMetaData metaData = new PMetaDataImpl(5, timeKeeper, new ReadOnlyProps(props));
    addToTable(metaData, "a", 1, timeKeeper);
    assertEquals(1, metaData.size());
    addToTable(metaData, "b", 1, timeKeeper);
    assertEquals(2, metaData.size());
    addToTable(metaData, "c", 5, timeKeeper);
    assertEquals(1, metaData.size());
    addToTable(metaData, "d", 20, timeKeeper);
    assertEquals(1, metaData.size());
    assertNames(metaData, "d");
    addToTable(metaData, "e", 1, timeKeeper);
    assertEquals(1, metaData.size());
    addToTable(metaData, "f", 2, timeKeeper);
    assertEquals(2, metaData.size());
    assertNames(metaData, "e", "f");
}
Also used : ReadOnlyProps(org.apache.phoenix.util.ReadOnlyProps) Test(org.junit.Test)

Example 55 with ReadOnlyProps

use of org.apache.phoenix.util.ReadOnlyProps in project phoenix by apache.

the class PMetaDataImplTest method testEviction.

@Test
public void testEviction() throws Exception {
    TestTimeKeeper timeKeeper = new TestTimeKeeper();
    Map<String, String> props = Maps.newHashMapWithExpectedSize(2);
    props.put(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, "10");
    props.put(QueryServices.CLIENT_CACHE_ENCODING, "object");
    PMetaData metaData = new PMetaDataImpl(5, timeKeeper, new ReadOnlyProps(props));
    addToTable(metaData, "a", 5, timeKeeper);
    assertEquals(1, metaData.size());
    addToTable(metaData, "b", 4, timeKeeper);
    assertEquals(2, metaData.size());
    addToTable(metaData, "c", 3, timeKeeper);
    assertEquals(2, metaData.size());
    assertNames(metaData, "b", "c");
    addToTable(metaData, "b", 8, timeKeeper);
    assertEquals(1, metaData.size());
    assertNames(metaData, "b");
    addToTable(metaData, "d", 11, timeKeeper);
    assertEquals(1, metaData.size());
    assertNames(metaData, "d");
    removeFromTable(metaData, "d", timeKeeper);
    assertNames(metaData);
    addToTable(metaData, "a", 4, timeKeeper);
    assertEquals(1, metaData.size());
    addToTable(metaData, "b", 3, timeKeeper);
    assertEquals(2, metaData.size());
    addToTable(metaData, "c", 2, timeKeeper);
    assertEquals(3, metaData.size());
    assertNames(metaData, "a", "b", "c");
    getFromTable(metaData, "a", timeKeeper);
    addToTable(metaData, "d", 3, timeKeeper);
    assertEquals(3, metaData.size());
    assertNames(metaData, "c", "a", "d");
    // Clone maintains insert order
    metaData = metaData.clone();
    addToTable(metaData, "e", 6, timeKeeper);
    assertEquals(2, metaData.size());
    assertNames(metaData, "d", "e");
}
Also used : ReadOnlyProps(org.apache.phoenix.util.ReadOnlyProps) Test(org.junit.Test)

Aggregations

ReadOnlyProps (org.apache.phoenix.util.ReadOnlyProps)56 BeforeClass (org.junit.BeforeClass)32 Test (org.junit.Test)12 IOException (java.io.IOException)6 PhoenixIOException (org.apache.phoenix.exception.PhoenixIOException)6 Properties (java.util.Properties)4 Configuration (org.apache.hadoop.conf.Configuration)4 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)4 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)3 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)3 HBaseTestingUtility (org.apache.hadoop.hbase.HBaseTestingUtility)3 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)3 Mutation (org.apache.hadoop.hbase.client.Mutation)3 Batch (org.apache.hadoop.hbase.client.coprocessor.Batch)3 BlockingRpcCallback (org.apache.hadoop.hbase.ipc.BlockingRpcCallback)3 ServerRpcController (org.apache.hadoop.hbase.ipc.ServerRpcController)3 MutationProto (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto)3 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)3 MutationCode (org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)3 MetaDataResponse (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse)3