Search in sources :

Example 1 with ScanCursor

use of io.lettuce.core.ScanCursor in project aws-athena-query-federation by awslabs.

the class RedisMetadataHandler method loadKeys.

/**
 * For the given zset prefix, find all values and treat each of those values are a key to scan before returning
 * the scan continuation token.
 *
 * @param syncCommands The Lettuce Client
 * @param prefix The zset key prefix to scan.
 * @param redisCursor The previous Redis cursor (aka continuation token).
 * @param keys The collections of keys we collected so far. Any new keys we find are added to this.
 * @return The Redis cursor to use when continuing the scan.
 */
private ScanCursor loadKeys(RedisCommandsWrapper<String, String> syncCommands, String prefix, ScanCursor redisCursor, Set<String> keys) {
    ScanCursor cursor = (redisCursor == null) ? INITIAL : redisCursor;
    ScanArgs scanArgs = new ScanArgs();
    scanArgs.limit(SCAN_COUNT_SIZE);
    scanArgs.match(prefix);
    KeyScanCursor<String> newCursor = syncCommands.scan(cursor, scanArgs);
    keys.addAll(newCursor.getKeys());
    return newCursor;
}
Also used : ScanCursor(io.lettuce.core.ScanCursor) KeyScanCursor(io.lettuce.core.KeyScanCursor) ScanArgs(io.lettuce.core.ScanArgs)

Example 2 with ScanCursor

use of io.lettuce.core.ScanCursor in project aws-athena-query-federation by awslabs.

the class RedisMetadataHandlerTest method doGetSplitsZset.

@Test
public void doGetSplitsZset() {
    // 3 prefixes for this table
    String prefixes = "prefix1-*,prefix2-*, prefix3-*";
    // 4 zsets per prefix
    when(mockSyncCommands.scan(any(ScanCursor.class), any(ScanArgs.class))).then((InvocationOnMock invocationOnMock) -> {
        ScanCursor cursor = (ScanCursor) invocationOnMock.getArguments()[0];
        if (cursor == null || cursor.getCursor().equals("0")) {
            List<String> result = new ArrayList<>();
            result.add(UUID.randomUUID().toString());
            result.add(UUID.randomUUID().toString());
            result.add(UUID.randomUUID().toString());
            MockKeyScanCursor<String> scanCursor = new MockKeyScanCursor<>();
            scanCursor.setCursor("1");
            scanCursor.setKeys(result);
            return scanCursor;
        } else {
            List<String> result = new ArrayList<>();
            result.add(UUID.randomUUID().toString());
            MockKeyScanCursor<String> scanCursor = new MockKeyScanCursor<>();
            scanCursor.setCursor("0");
            scanCursor.setKeys(result);
            scanCursor.setFinished(true);
            return scanCursor;
        }
    });
    // 100 keys per zset
    when(mockSyncCommands.zcount(anyString(), any(Range.class))).thenReturn(200L);
    List<String> partitionCols = new ArrayList<>();
    Schema schema = SchemaBuilder.newBuilder().addField("partitionId", Types.MinorType.INT.getType()).addStringField(REDIS_ENDPOINT_PROP).addStringField(VALUE_TYPE_TABLE_PROP).addStringField(KEY_PREFIX_TABLE_PROP).addStringField(ZSET_KEYS_TABLE_PROP).addStringField(REDIS_SSL_FLAG).addStringField(REDIS_CLUSTER_FLAG).addStringField(REDIS_DB_NUMBER).build();
    Block partitions = allocator.createBlock(schema);
    partitions.setValue(REDIS_ENDPOINT_PROP, 0, endpoint);
    partitions.setValue(VALUE_TYPE_TABLE_PROP, 0, "literal");
    partitions.setValue(KEY_PREFIX_TABLE_PROP, 0, null);
    partitions.setValue(ZSET_KEYS_TABLE_PROP, 0, prefixes);
    partitions.setValue(REDIS_SSL_FLAG, 0, null);
    partitions.setValue(REDIS_CLUSTER_FLAG, 0, null);
    partitions.setValue(REDIS_DB_NUMBER, 0, null);
    partitions.setRowCount(1);
    String continuationToken = null;
    GetSplitsRequest originalReq = new GetSplitsRequest(IDENTITY, QUERY_ID, DEFAULT_CATALOG, TABLE_NAME, partitions, partitionCols, new Constraints(new HashMap<>()), null);
    GetSplitsRequest req = new GetSplitsRequest(originalReq, continuationToken);
    logger.info("doGetSplitsPrefix: req[{}]", req);
    MetadataResponse rawResponse = handler.doGetSplits(allocator, req);
    assertEquals(MetadataRequestType.GET_SPLITS, rawResponse.getRequestType());
    GetSplitsResponse response = (GetSplitsResponse) rawResponse;
    continuationToken = response.getContinuationToken();
    logger.info("doGetSplitsPrefix: continuationToken[{}] - numSplits[{}]", new Object[] { continuationToken, response.getSplits().size() });
    assertEquals("Continuation criteria violated", 120, response.getSplits().size());
    assertTrue("Continuation criteria violated", response.getContinuationToken() == null);
    verify(mockSyncCommands, times(6)).scan(any(ScanCursor.class), any(ScanArgs.class));
}
Also used : GetSplitsRequest(com.amazonaws.athena.connector.lambda.metadata.GetSplitsRequest) HashMap(java.util.HashMap) ScanArgs(io.lettuce.core.ScanArgs) Schema(org.apache.arrow.vector.types.pojo.Schema) ArrayList(java.util.ArrayList) Matchers.anyString(org.mockito.Matchers.anyString) Range(io.lettuce.core.Range) ScanCursor(io.lettuce.core.ScanCursor) MockKeyScanCursor(com.amazonaws.athena.connectors.redis.util.MockKeyScanCursor) Constraints(com.amazonaws.athena.connector.lambda.domain.predicate.Constraints) InvocationOnMock(org.mockito.invocation.InvocationOnMock) GetSplitsResponse(com.amazonaws.athena.connector.lambda.metadata.GetSplitsResponse) MetadataResponse(com.amazonaws.athena.connector.lambda.metadata.MetadataResponse) Block(com.amazonaws.athena.connector.lambda.data.Block) MockKeyScanCursor(com.amazonaws.athena.connectors.redis.util.MockKeyScanCursor) Test(org.junit.Test)

Example 3 with ScanCursor

use of io.lettuce.core.ScanCursor in project aws-athena-query-federation by awslabs.

the class RedisRecordHandlerTest method doReadRecordsZset.

@Test
public void doReadRecordsZset() throws Exception {
    // 4 keys per prefix
    when(mockSyncCommands.scan(any(ScanCursor.class), any(ScanArgs.class))).then((InvocationOnMock invocationOnMock) -> {
        ScanCursor cursor = (ScanCursor) invocationOnMock.getArguments()[0];
        if (cursor == null || cursor.getCursor().equals("0")) {
            List<String> result = new ArrayList<>();
            result.add(UUID.randomUUID().toString());
            result.add(UUID.randomUUID().toString());
            result.add(UUID.randomUUID().toString());
            MockKeyScanCursor<String> scanCursor = new MockKeyScanCursor<>();
            scanCursor.setCursor("1");
            scanCursor.setKeys(result);
            return scanCursor;
        } else {
            List<String> result = new ArrayList<>();
            result.add(UUID.randomUUID().toString());
            MockKeyScanCursor<String> scanCursor = new MockKeyScanCursor<>();
            scanCursor.setCursor("0");
            scanCursor.setKeys(result);
            scanCursor.setFinished(true);
            return scanCursor;
        }
    });
    // 4 rows per key
    when(mockSyncCommands.zscan(anyString(), any(ScanCursor.class))).then((InvocationOnMock invocationOnMock) -> {
        ScanCursor cursor = (ScanCursor) invocationOnMock.getArguments()[1];
        if (cursor == null || cursor.getCursor().equals("0")) {
            List<ScoredValue<String>> result = new ArrayList<>();
            result.add(ScoredValue.just(0.0D, "1"));
            result.add(ScoredValue.just(0.0D, "2"));
            result.add(ScoredValue.just(0.0D, "3"));
            MockScoredValueScanCursor<String> scanCursor = new MockScoredValueScanCursor<>();
            scanCursor.setCursor("1");
            scanCursor.setValues(result);
            return scanCursor;
        } else {
            List<ScoredValue<String>> result = new ArrayList<>();
            result.add(ScoredValue.just(0.0D, "4"));
            MockScoredValueScanCursor<String> scanCursor = new MockScoredValueScanCursor<>();
            scanCursor.setCursor("0");
            scanCursor.setValues(result);
            scanCursor.setFinished(true);
            return scanCursor;
        }
    });
    AtomicLong value = new AtomicLong(0);
    when(mockSyncCommands.get(anyString())).thenAnswer((InvocationOnMock invocationOnMock) -> String.valueOf(value.getAndIncrement()));
    S3SpillLocation splitLoc = S3SpillLocation.newBuilder().withBucket(UUID.randomUUID().toString()).withSplitId(UUID.randomUUID().toString()).withQueryId(UUID.randomUUID().toString()).withIsDirectory(true).build();
    Split split = Split.newBuilder(splitLoc, keyFactory.create()).add(REDIS_ENDPOINT_PROP, endpoint).add(KEY_TYPE, KeyType.PREFIX.getId()).add(KEY_PREFIX_TABLE_PROP, "key-*").add(VALUE_TYPE_TABLE_PROP, ValueType.ZSET.getId()).build();
    Schema schemaForRead = SchemaBuilder.newBuilder().addField("_key_", Types.MinorType.VARCHAR.getType()).addField("intcol", Types.MinorType.INT.getType()).build();
    Map<String, ValueSet> constraintsMap = new HashMap<>();
    constraintsMap.put("intcol", SortedRangeSet.copyOf(Types.MinorType.INT.getType(), ImmutableList.of(Range.greaterThan(allocator, Types.MinorType.INT.getType(), 1)), false));
    ReadRecordsRequest request = new ReadRecordsRequest(IDENTITY, DEFAULT_CATALOG, "queryId-" + System.currentTimeMillis(), TABLE_NAME, schemaForRead, split, new Constraints(constraintsMap), // 100GB don't expect this to spill
    100_000_000_000L, 100_000_000_000L);
    RecordResponse rawResponse = handler.doReadRecords(allocator, request);
    assertTrue(rawResponse instanceof ReadRecordsResponse);
    ReadRecordsResponse response = (ReadRecordsResponse) rawResponse;
    logger.info("doReadRecordsZset: rows[{}]", response.getRecordCount());
    logger.info("doReadRecordsZset: {}", BlockUtils.rowToString(response.getRecords(), 0));
    assertTrue(response.getRecords().getRowCount() == 12);
    FieldReader keyReader = response.getRecords().getFieldReader(KEY_COLUMN_NAME);
    keyReader.setPosition(0);
    assertNotNull(keyReader.readText());
    FieldReader intCol = response.getRecords().getFieldReader("intcol");
    intCol.setPosition(0);
    assertNotNull(intCol.readInteger());
}
Also used : HashMap(java.util.HashMap) ReadRecordsResponse(com.amazonaws.athena.connector.lambda.records.ReadRecordsResponse) ScanArgs(io.lettuce.core.ScanArgs) Schema(org.apache.arrow.vector.types.pojo.Schema) ArrayList(java.util.ArrayList) Matchers.anyString(org.mockito.Matchers.anyString) RecordResponse(com.amazonaws.athena.connector.lambda.records.RecordResponse) ScanCursor(io.lettuce.core.ScanCursor) MockScoredValueScanCursor(com.amazonaws.athena.connectors.redis.util.MockScoredValueScanCursor) MockKeyScanCursor(com.amazonaws.athena.connectors.redis.util.MockKeyScanCursor) AtomicLong(java.util.concurrent.atomic.AtomicLong) ReadRecordsRequest(com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest) Constraints(com.amazonaws.athena.connector.lambda.domain.predicate.Constraints) MockScoredValueScanCursor(com.amazonaws.athena.connectors.redis.util.MockScoredValueScanCursor) InvocationOnMock(org.mockito.invocation.InvocationOnMock) S3SpillLocation(com.amazonaws.athena.connector.lambda.domain.spill.S3SpillLocation) Split(com.amazonaws.athena.connector.lambda.domain.Split) ScoredValue(io.lettuce.core.ScoredValue) ValueSet(com.amazonaws.athena.connector.lambda.domain.predicate.ValueSet) FieldReader(org.apache.arrow.vector.complex.reader.FieldReader) MockKeyScanCursor(com.amazonaws.athena.connectors.redis.util.MockKeyScanCursor) Test(org.junit.Test)

Example 4 with ScanCursor

use of io.lettuce.core.ScanCursor in project aws-athena-query-federation by awslabs.

the class RedisRecordHandlerTest method doReadRecordsLiteral.

@Test
public void doReadRecordsLiteral() throws Exception {
    // 4 keys per prefix
    when(mockSyncCommands.scan(any(ScanCursor.class), any(ScanArgs.class))).then((InvocationOnMock invocationOnMock) -> {
        ScanCursor cursor = (ScanCursor) invocationOnMock.getArguments()[0];
        if (cursor == null || cursor.getCursor().equals("0")) {
            List<String> result = new ArrayList<>();
            result.add(UUID.randomUUID().toString());
            result.add(UUID.randomUUID().toString());
            result.add(UUID.randomUUID().toString());
            MockKeyScanCursor<String> scanCursor = new MockKeyScanCursor<>();
            scanCursor.setCursor("1");
            scanCursor.setKeys(result);
            return scanCursor;
        } else {
            List<String> result = new ArrayList<>();
            result.add(UUID.randomUUID().toString());
            MockKeyScanCursor<String> scanCursor = new MockKeyScanCursor<>();
            scanCursor.setCursor("0");
            scanCursor.setKeys(result);
            scanCursor.setFinished(true);
            return scanCursor;
        }
    });
    AtomicLong value = new AtomicLong(0);
    when(mockSyncCommands.get(anyString())).thenAnswer((InvocationOnMock invocationOnMock) -> String.valueOf(value.getAndIncrement()));
    S3SpillLocation splitLoc = S3SpillLocation.newBuilder().withBucket(UUID.randomUUID().toString()).withSplitId(UUID.randomUUID().toString()).withQueryId(UUID.randomUUID().toString()).withIsDirectory(true).build();
    Split split = Split.newBuilder(splitLoc, keyFactory.create()).add(REDIS_ENDPOINT_PROP, endpoint).add(KEY_TYPE, KeyType.PREFIX.getId()).add(KEY_PREFIX_TABLE_PROP, "key-*").add(VALUE_TYPE_TABLE_PROP, ValueType.LITERAL.getId()).build();
    Schema schemaForRead = SchemaBuilder.newBuilder().addField("_key_", Types.MinorType.VARCHAR.getType()).addField("intcol", Types.MinorType.INT.getType()).build();
    Map<String, ValueSet> constraintsMap = new HashMap<>();
    constraintsMap.put("intcol", SortedRangeSet.copyOf(Types.MinorType.INT.getType(), ImmutableList.of(Range.greaterThan(allocator, Types.MinorType.INT.getType(), 1)), false));
    ReadRecordsRequest request = new ReadRecordsRequest(IDENTITY, DEFAULT_CATALOG, "queryId-" + System.currentTimeMillis(), TABLE_NAME, schemaForRead, split, new Constraints(constraintsMap), // 100GB don't expect this to spill
    100_000_000_000L, 100_000_000_000L);
    RecordResponse rawResponse = handler.doReadRecords(allocator, request);
    assertTrue(rawResponse instanceof ReadRecordsResponse);
    ReadRecordsResponse response = (ReadRecordsResponse) rawResponse;
    logger.info("doReadRecordsLiteral: rows[{}]", response.getRecordCount());
    logger.info("doReadRecordsLiteral: {}", BlockUtils.rowToString(response.getRecords(), 0));
    assertTrue(response.getRecords().getRowCount() == 2);
    FieldReader keyReader = response.getRecords().getFieldReader(KEY_COLUMN_NAME);
    keyReader.setPosition(0);
    assertNotNull(keyReader.readText().toString());
    FieldReader intCol = response.getRecords().getFieldReader("intcol");
    intCol.setPosition(0);
    assertNotNull(intCol.readInteger());
}
Also used : HashMap(java.util.HashMap) ReadRecordsResponse(com.amazonaws.athena.connector.lambda.records.ReadRecordsResponse) ScanArgs(io.lettuce.core.ScanArgs) Schema(org.apache.arrow.vector.types.pojo.Schema) ArrayList(java.util.ArrayList) Matchers.anyString(org.mockito.Matchers.anyString) RecordResponse(com.amazonaws.athena.connector.lambda.records.RecordResponse) ScanCursor(io.lettuce.core.ScanCursor) MockScoredValueScanCursor(com.amazonaws.athena.connectors.redis.util.MockScoredValueScanCursor) MockKeyScanCursor(com.amazonaws.athena.connectors.redis.util.MockKeyScanCursor) AtomicLong(java.util.concurrent.atomic.AtomicLong) ReadRecordsRequest(com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest) Constraints(com.amazonaws.athena.connector.lambda.domain.predicate.Constraints) InvocationOnMock(org.mockito.invocation.InvocationOnMock) S3SpillLocation(com.amazonaws.athena.connector.lambda.domain.spill.S3SpillLocation) Split(com.amazonaws.athena.connector.lambda.domain.Split) ValueSet(com.amazonaws.athena.connector.lambda.domain.predicate.ValueSet) FieldReader(org.apache.arrow.vector.complex.reader.FieldReader) MockKeyScanCursor(com.amazonaws.athena.connectors.redis.util.MockKeyScanCursor) Test(org.junit.Test)

Example 5 with ScanCursor

use of io.lettuce.core.ScanCursor in project aws-athena-query-federation by awslabs.

the class RedisMetadataHandler method doGetSplits.

/**
 * If the table is comprised of multiple key prefixes, then we parallelize those by making them each a split.
 *
 * @note This function essentially takes each key-prefix and makes it a split. For zset keys, it breaks each zset
 * into a max of N split that we have configured to generate as defined by REDIS_MAX_SPLITS.
 */
@Override
public GetSplitsResponse doGetSplits(BlockAllocator blockAllocator, GetSplitsRequest request) {
    if (request.getPartitions().getRowCount() != 1) {
        throw new RuntimeException("Unexpected number of partitions encountered.");
    }
    Block partitions = request.getPartitions();
    String redisEndpoint = getValue(partitions, 0, REDIS_ENDPOINT_PROP);
    String redisValueType = getValue(partitions, 0, VALUE_TYPE_TABLE_PROP);
    boolean sslEnabled = Boolean.parseBoolean(getValue(partitions, 0, REDIS_SSL_FLAG));
    boolean isCluster = Boolean.parseBoolean(getValue(partitions, 0, REDIS_CLUSTER_FLAG));
    String dbNumber = getValue(partitions, 0, REDIS_DB_NUMBER);
    if (redisEndpoint == null) {
        throw new RuntimeException("Table is missing " + REDIS_ENDPOINT_PROP + " table property");
    }
    if (redisValueType == null) {
        throw new RuntimeException("Table is missing " + VALUE_TYPE_TABLE_PROP + " table property");
    }
    if (dbNumber == null) {
        // default redis logical database
        dbNumber = DEFAULT_REDIS_DB_NUMBER;
    }
    logger.info("doGetSplits: Preparing splits for {}", BlockUtils.rowToString(partitions, 0));
    KeyType keyType;
    Set<String> splitInputs = new HashSet<>();
    RedisConnectionWrapper<String, String> connection = getOrCreateClient(redisEndpoint, sslEnabled, isCluster, dbNumber);
    RedisCommandsWrapper<String, String> syncCommands = connection.sync();
    String keyPrefix = getValue(partitions, 0, KEY_PREFIX_TABLE_PROP);
    if (keyPrefix != null) {
        // Add the prefixes to the list and set the key type.
        splitInputs.addAll(Arrays.asList(keyPrefix.split(KEY_PREFIX_SEPERATOR)));
        keyType = KeyType.PREFIX;
    } else {
        String prop = getValue(partitions, 0, ZSET_KEYS_TABLE_PROP);
        if (prop == null) {
            throw new RuntimeException("Table is missing " + ZSET_KEYS_TABLE_PROP + " table property, it must have this or " + KEY_PREFIX_TABLE_PROP);
        }
        String[] partitionPrefixes = prop.split(KEY_PREFIX_SEPERATOR);
        ScanCursor keyCursor = null;
        // Add all the values in the ZSETs ad keys to scan
        for (String next : partitionPrefixes) {
            do {
                keyCursor = loadKeys(syncCommands, next, keyCursor, splitInputs);
            } while (!keyCursor.isFinished());
        }
        keyType = KeyType.ZSET;
    }
    Set<Split> splits = new HashSet<>();
    for (String next : splitInputs) {
        splits.addAll(makeSplits(request, syncCommands, redisEndpoint, next, keyType, redisValueType, sslEnabled, isCluster, dbNumber));
    }
    return new GetSplitsResponse(request.getCatalogName(), splits, null);
}
Also used : ScanCursor(io.lettuce.core.ScanCursor) KeyScanCursor(io.lettuce.core.KeyScanCursor) GetSplitsResponse(com.amazonaws.athena.connector.lambda.metadata.GetSplitsResponse) Block(com.amazonaws.athena.connector.lambda.data.Block) Split(com.amazonaws.athena.connector.lambda.domain.Split) HashSet(java.util.HashSet)

Aggregations

ScanCursor (io.lettuce.core.ScanCursor)8 ScanArgs (io.lettuce.core.ScanArgs)6 Split (com.amazonaws.athena.connector.lambda.domain.Split)5 Constraints (com.amazonaws.athena.connector.lambda.domain.predicate.Constraints)4 MockKeyScanCursor (com.amazonaws.athena.connectors.redis.util.MockKeyScanCursor)4 KeyScanCursor (io.lettuce.core.KeyScanCursor)4 ArrayList (java.util.ArrayList)4 HashMap (java.util.HashMap)4 Schema (org.apache.arrow.vector.types.pojo.Schema)4 Test (org.junit.Test)4 Matchers.anyString (org.mockito.Matchers.anyString)4 InvocationOnMock (org.mockito.invocation.InvocationOnMock)4 ValueSet (com.amazonaws.athena.connector.lambda.domain.predicate.ValueSet)3 S3SpillLocation (com.amazonaws.athena.connector.lambda.domain.spill.S3SpillLocation)3 ReadRecordsRequest (com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest)3 ReadRecordsResponse (com.amazonaws.athena.connector.lambda.records.ReadRecordsResponse)3 RecordResponse (com.amazonaws.athena.connector.lambda.records.RecordResponse)3 MockScoredValueScanCursor (com.amazonaws.athena.connectors.redis.util.MockScoredValueScanCursor)3 AtomicLong (java.util.concurrent.atomic.AtomicLong)3 FieldReader (org.apache.arrow.vector.complex.reader.FieldReader)3