Search in sources :

Example 1 with MemoryChunk

use of org.apache.phoenix.memory.MemoryManager.MemoryChunk in project phoenix by apache.

the class ServerCacheClient method addServerCache.

public ServerCache addServerCache(ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, final byte[] txState, final ServerCacheFactory cacheFactory, final TableRef cacheUsingTableRef) throws SQLException {
    ConnectionQueryServices services = connection.getQueryServices();
    MemoryChunk chunk = services.getMemoryManager().allocate(cachePtr.getLength());
    List<Closeable> closeables = new ArrayList<Closeable>();
    closeables.add(chunk);
    ServerCache hashCacheSpec = null;
    SQLException firstException = null;
    final byte[] cacheId = generateId();
    /**
         * Execute EndPoint in parallel on each server to send compressed hash cache 
         */
    // TODO: generalize and package as a per region server EndPoint caller
    // (ideally this would be functionality provided by the coprocessor framework)
    boolean success = false;
    ExecutorService executor = services.getExecutor();
    List<Future<Boolean>> futures = Collections.emptyList();
    try {
        final PTable cacheUsingTable = cacheUsingTableRef.getTable();
        List<HRegionLocation> locations = services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes());
        int nRegions = locations.size();
        // Size these based on worst case
        futures = new ArrayList<Future<Boolean>>(nRegions);
        Set<HRegionLocation> servers = new HashSet<HRegionLocation>(nRegions);
        for (HRegionLocation entry : locations) {
            // Keep track of servers we've sent to and only send once
            byte[] regionStartKey = entry.getRegionInfo().getStartKey();
            byte[] regionEndKey = entry.getRegionInfo().getEndKey();
            if (!servers.contains(entry) && keyRanges.intersectRegion(regionStartKey, regionEndKey, cacheUsingTable.getIndexType() == IndexType.LOCAL)) {
                // Call RPC once per server
                servers.add(entry);
                if (LOG.isDebugEnabled()) {
                    LOG.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));
                }
                final byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
                final HTableInterface htable = services.getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
                closeables.add(htable);
                futures.add(executor.submit(new JobCallable<Boolean>() {

                    @Override
                    public Boolean call() throws Exception {
                        final Map<byte[], AddServerCacheResponse> results;
                        try {
                            results = htable.coprocessorService(ServerCachingService.class, key, key, new Batch.Call<ServerCachingService, AddServerCacheResponse>() {

                                @Override
                                public AddServerCacheResponse call(ServerCachingService instance) throws IOException {
                                    ServerRpcController controller = new ServerRpcController();
                                    BlockingRpcCallback<AddServerCacheResponse> rpcCallback = new BlockingRpcCallback<AddServerCacheResponse>();
                                    AddServerCacheRequest.Builder builder = AddServerCacheRequest.newBuilder();
                                    final byte[] tenantIdBytes;
                                    if (cacheUsingTable.isMultiTenant()) {
                                        try {
                                            tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(), cacheUsingTable.getBucketNum() != null, connection.getTenantId(), cacheUsingTable.getViewIndexId() != null);
                                        } catch (SQLException e) {
                                            throw new IOException(e);
                                        }
                                    } else {
                                        tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
                                    }
                                    if (tenantIdBytes != null) {
                                        builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
                                    }
                                    builder.setCacheId(ByteStringer.wrap(cacheId));
                                    builder.setCachePtr(org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr));
                                    builder.setHasProtoBufIndexMaintainer(true);
                                    ServerCacheFactoryProtos.ServerCacheFactory.Builder svrCacheFactoryBuider = ServerCacheFactoryProtos.ServerCacheFactory.newBuilder();
                                    svrCacheFactoryBuider.setClassName(cacheFactory.getClass().getName());
                                    builder.setCacheFactory(svrCacheFactoryBuider.build());
                                    builder.setTxState(ByteStringer.wrap(txState));
                                    instance.addServerCache(controller, builder.build(), rpcCallback);
                                    if (controller.getFailedOn() != null) {
                                        throw controller.getFailedOn();
                                    }
                                    return rpcCallback.get();
                                }
                            });
                        } catch (Throwable t) {
                            throw new Exception(t);
                        }
                        if (results != null && results.size() == 1) {
                            return results.values().iterator().next().getReturn();
                        }
                        return false;
                    }

                    /**
                         * Defines the grouping for round robin behavior.  All threads spawned to process
                         * this scan will be grouped together and time sliced with other simultaneously
                         * executing parallel scans.
                         */
                    @Override
                    public Object getJobId() {
                        return ServerCacheClient.this;
                    }

                    @Override
                    public TaskExecutionMetricsHolder getTaskExecutionMetric() {
                        return NO_OP_INSTANCE;
                    }
                }));
            } else {
                if (LOG.isDebugEnabled()) {
                    LOG.debug(addCustomAnnotations("NOT adding cache entry to be sent for " + entry + " since one already exists for that entry", connection));
                }
            }
        }
        hashCacheSpec = new ServerCache(cacheId, servers, cachePtr.getLength());
        // Execute in parallel
        int timeoutMs = services.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
        for (Future<Boolean> future : futures) {
            future.get(timeoutMs, TimeUnit.MILLISECONDS);
        }
        cacheUsingTableRefMap.put(Bytes.mapKey(cacheId), cacheUsingTableRef);
        success = true;
    } catch (SQLException e) {
        firstException = e;
    } catch (Exception e) {
        firstException = new SQLException(e);
    } finally {
        try {
            if (!success) {
                SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec));
                for (Future<Boolean> future : futures) {
                    future.cancel(true);
                }
            }
        } finally {
            try {
                Closeables.closeAll(closeables);
            } catch (IOException e) {
                if (firstException == null) {
                    firstException = new SQLException(e);
                }
            } finally {
                if (firstException != null) {
                    throw firstException;
                }
            }
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection));
    }
    return hashCacheSpec;
}
Also used : SQLException(java.sql.SQLException) SQLCloseable(org.apache.phoenix.util.SQLCloseable) Closeable(java.io.Closeable) ArrayList(java.util.ArrayList) JobCallable(org.apache.phoenix.job.JobManager.JobCallable) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) PTable(org.apache.phoenix.schema.PTable) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) HashSet(java.util.HashSet) ServerCachingService(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ServerCachingService) MemoryChunk(org.apache.phoenix.memory.MemoryManager.MemoryChunk) AddServerCacheRequest(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheRequest) IOException(java.io.IOException) SQLException(java.sql.SQLException) IOException(java.io.IOException) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) ServerCacheFactory(org.apache.phoenix.coprocessor.ServerCachingProtocol.ServerCacheFactory) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) AddServerCacheResponse(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheResponse)

Example 2 with MemoryChunk

use of org.apache.phoenix.memory.MemoryManager.MemoryChunk in project phoenix by apache.

the class TenantCacheImpl method addServerCache.

@Override
public Closeable addServerCache(ImmutableBytesPtr cacheId, ImmutableBytesWritable cachePtr, byte[] txState, ServerCacheFactory cacheFactory, boolean useProtoForIndexMaintainer) throws SQLException {
    MemoryChunk chunk = this.getMemoryManager().allocate(cachePtr.getLength() + txState.length);
    boolean success = false;
    try {
        Closeable element = cacheFactory.newCache(cachePtr, txState, chunk, useProtoForIndexMaintainer);
        getServerCaches().put(cacheId, element);
        success = true;
        return element;
    } finally {
        if (!success) {
            Closeables.closeAllQuietly(Collections.singletonList(chunk));
        }
    }
}
Also used : MemoryChunk(org.apache.phoenix.memory.MemoryManager.MemoryChunk) Closeable(java.io.Closeable)

Example 3 with MemoryChunk

use of org.apache.phoenix.memory.MemoryManager.MemoryChunk in project phoenix by apache.

the class MemoryManagerTest method testWaitForMemoryAvailable.

@Ignore("See PHOENIX-2840")
@Test
public void testWaitForMemoryAvailable() throws Exception {
    final GlobalMemoryManager gmm = spy(new GlobalMemoryManager(100, 80));
    final ChildMemoryManager rmm1 = new ChildMemoryManager(gmm, 100);
    final ChildMemoryManager rmm2 = new ChildMemoryManager(gmm, 100);
    final CountDownLatch latch = new CountDownLatch(2);
    Thread t1 = new Thread() {

        @Override
        public void run() {
            MemoryChunk c1 = rmm1.allocate(50);
            MemoryChunk c2 = rmm1.allocate(50);
            sleepFor(40);
            c1.close();
            sleepFor(20);
            c2.close();
            latch.countDown();
        }
    };
    Thread t2 = new Thread() {

        @Override
        public void run() {
            sleepFor(20);
            // Will require waiting for a bit of time before t1 frees the requested memory
            MemoryChunk c3 = rmm2.allocate(50);
            Mockito.verify(gmm, atLeastOnce()).waitForBytesToFree(anyLong(), anyLong());
            c3.close();
            latch.countDown();
        }
    };
    t2.start();
    t1.start();
    latch.await(1, TimeUnit.SECONDS);
    // Main thread competes with others to get all memory, but should wait
    // until both threads are complete (since that's when the memory will
    // again be all available.
    ChildMemoryManager rmm = new ChildMemoryManager(gmm, 100);
    MemoryChunk c = rmm.allocate(100);
    c.close();
    assertTrue(rmm.getAvailableMemory() == rmm.getMaxMemory());
    assertTrue(rmm1.getAvailableMemory() == rmm1.getMaxMemory());
    assertTrue(rmm2.getAvailableMemory() == rmm2.getMaxMemory());
}
Also used : MemoryChunk(org.apache.phoenix.memory.MemoryManager.MemoryChunk) CountDownLatch(java.util.concurrent.CountDownLatch) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 4 with MemoryChunk

use of org.apache.phoenix.memory.MemoryManager.MemoryChunk in project phoenix by apache.

the class MemoryManagerTest method testOverChildMemoryLimit.

@Test
public void testOverChildMemoryLimit() throws Exception {
    MemoryManager gmm = new GlobalMemoryManager(100, 1);
    ChildMemoryManager rmm1 = new ChildMemoryManager(gmm, 25);
    ChildMemoryManager rmm2 = new ChildMemoryManager(gmm, 25);
    ChildMemoryManager rmm3 = new ChildMemoryManager(gmm, 25);
    ChildMemoryManager rmm4 = new ChildMemoryManager(gmm, 35);
    MemoryChunk c1 = rmm1.allocate(20);
    MemoryChunk c2 = rmm2.allocate(20);
    try {
        rmm1.allocate(10);
        fail();
    } catch (InsufficientMemoryException e) {
    // expected
    }
    MemoryChunk c3 = rmm3.allocate(25);
    c1.close();
    // Ensure that you can get back to max for rmn1 after failure
    MemoryChunk c4 = rmm1.allocate(10);
    MemoryChunk c5 = rmm1.allocate(15);
    MemoryChunk c6 = rmm4.allocate(25);
    try {
        // This passes % test, but fails the next total memory usage test
        rmm4.allocate(10);
        fail();
    } catch (InsufficientMemoryException e) {
    // expected
    }
    c2.close();
    // Tests that % test passes (confirming that the 10 above was subtracted back from request memory usage,
    // since we'd be at the max of 35% now
    MemoryChunk c7 = rmm4.allocate(10);
    try {
        rmm4.allocate(1);
        fail();
    } catch (InsufficientMemoryException e) {
    // expected
    }
    try {
        rmm2.allocate(25);
        fail();
    } catch (InsufficientMemoryException e) {
    // expected
    }
    c3.close();
    c4.close();
    c5.close();
    c6.close();
    c7.close();
    assertTrue(rmm1.getAvailableMemory() == rmm1.getMaxMemory());
    assertTrue(rmm2.getAvailableMemory() == rmm2.getMaxMemory());
    assertTrue(rmm3.getAvailableMemory() == rmm3.getMaxMemory());
    assertTrue(rmm4.getAvailableMemory() == rmm4.getMaxMemory());
}
Also used : MemoryChunk(org.apache.phoenix.memory.MemoryManager.MemoryChunk) Test(org.junit.Test)

Example 5 with MemoryChunk

use of org.apache.phoenix.memory.MemoryManager.MemoryChunk in project phoenix by apache.

the class MemoryManagerTest method testOverGlobalMemoryLimit.

@Test
public void testOverGlobalMemoryLimit() throws Exception {
    GlobalMemoryManager gmm = new GlobalMemoryManager(250, 1);
    try {
        gmm.allocate(300);
        fail();
    } catch (InsufficientMemoryException e) {
    // expected
    }
    ChildMemoryManager rmm1 = new ChildMemoryManager(gmm, 100);
    ChildMemoryManager rmm2 = new ChildMemoryManager(gmm, 100);
    MemoryChunk c1 = rmm1.allocate(100);
    MemoryChunk c2 = rmm2.allocate(100);
    try {
        rmm2.allocate(100);
        fail();
    } catch (InsufficientMemoryException e) {
    // expected
    }
    c1.close();
    c2.close();
    assertTrue(rmm1.getAvailableMemory() == rmm1.getMaxMemory());
}
Also used : MemoryChunk(org.apache.phoenix.memory.MemoryManager.MemoryChunk) Test(org.junit.Test)

Aggregations

MemoryChunk (org.apache.phoenix.memory.MemoryManager.MemoryChunk)8 Test (org.junit.Test)6 CountDownLatch (java.util.concurrent.CountDownLatch)3 Ignore (org.junit.Ignore)3 Closeable (java.io.Closeable)2 IOException (java.io.IOException)1 SQLException (java.sql.SQLException)1 ArrayList (java.util.ArrayList)1 HashSet (java.util.HashSet)1 ExecutorService (java.util.concurrent.ExecutorService)1 Future (java.util.concurrent.Future)1 HRegionLocation (org.apache.hadoop.hbase.HRegionLocation)1 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)1 BlockingRpcCallback (org.apache.hadoop.hbase.ipc.BlockingRpcCallback)1 ServerRpcController (org.apache.hadoop.hbase.ipc.ServerRpcController)1 ServerCacheFactory (org.apache.phoenix.coprocessor.ServerCachingProtocol.ServerCacheFactory)1 AddServerCacheRequest (org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheRequest)1 AddServerCacheResponse (org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheResponse)1 ServerCachingService (org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ServerCachingService)1 JobCallable (org.apache.phoenix.job.JobManager.JobCallable)1