Search in sources :

Example 1 with Node

use of io.prestosql.spi.Node in project rubix by qubole.

the class StandaloneNodeManager method getWorkerNodes.

@Override
public Set<Node> getWorkerNodes() {
    try {
        URL allNodesRequest = getNodeUrl();
        URL failedNodesRequest = getFailedNodeUrl();
        HttpURLConnection allHttpCon = getHttpURLConnection(allNodesRequest);
        int allNodesResponseCode = allHttpCon.getResponseCode();
        StringBuilder allResponse = new StringBuilder();
        StringBuilder failedResponse = new StringBuilder();
        try {
            if (allNodesResponseCode == HttpURLConnection.HTTP_OK) {
                BufferedReader in = new BufferedReader(new InputStreamReader(allHttpCon.getInputStream()));
                String inputLine = "";
                try {
                    while ((inputLine = in.readLine()) != null) {
                        allResponse.append(inputLine);
                    }
                } catch (IOException e) {
                    throw new IOException(e);
                } finally {
                    in.close();
                }
            } else {
                LOG.warn("v1/node failed with code: " + allNodesResponseCode);
                return null;
            }
        } catch (IOException e) {
            throw new IOException(e);
        } finally {
            allHttpCon.disconnect();
        }
        HttpURLConnection failHttpConn = getHttpURLConnection(failedNodesRequest);
        int failedNodesResponseCode = failHttpConn.getResponseCode();
        // check on failed nodes
        try {
            if (failedNodesResponseCode == HttpURLConnection.HTTP_OK) {
                BufferedReader in = new BufferedReader(new InputStreamReader(failHttpConn.getInputStream()));
                String inputLine;
                try {
                    while ((inputLine = in.readLine()) != null) {
                        failedResponse.append(inputLine);
                    }
                } catch (IOException e) {
                    throw new IOException(e);
                } finally {
                    in.close();
                }
            }
        } catch (IOException e) {
            throw new IOException(e);
        } finally {
            failHttpConn.disconnect();
        }
        Gson gson = new Gson();
        Type type = new TypeToken<List<Stats>>() {
        }.getType();
        List<Stats> allNodes = gson.fromJson(allResponse.toString(), type);
        List<Stats> failedNodes = gson.fromJson(failedResponse.toString(), type);
        if (failedNodes.isEmpty()) {
            failedNodes = ImmutableList.of();
        }
        // keep only the healthy nodes
        allNodes.removeAll(failedNodes);
        Set<Node> hosts = new HashSet<Node>();
        for (Stats node : allNodes) {
            hosts.add(new StandaloneNode(node.getUri()));
        }
        return hosts;
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }
}
Also used : InputStreamReader(java.io.InputStreamReader) Node(io.prestosql.spi.Node) Gson(com.google.gson.Gson) IOException(java.io.IOException) URL(java.net.URL) Type(java.lang.reflect.Type) HttpURLConnection(java.net.HttpURLConnection) BufferedReader(java.io.BufferedReader) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) HashSet(java.util.HashSet)

Example 2 with Node

use of io.prestosql.spi.Node in project hetu-core by openlookeng.

the class TpchSplitManager method getSplits.

@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableHandle tableHandle, SplitSchedulingStrategy splitSchedulingStrategy) {
    Set<Node> nodes = nodeManager.getRequiredWorkerNodes();
    int totalParts = nodes.size() * splitsPerNode;
    int partNumber = 0;
    ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();
    if (session.isSnapshotEnabled()) {
        // Snapshot: Modify splits as needed to all them to be scheduled on any node.
        // This allows them to be processed by a different worker after resume.
        List<HostAddress> addresses = nodes.stream().map(Node::getHostAndPort).collect(Collectors.toList());
        for (int i = 0; i < totalParts; i++) {
            splits.add(new TpchSplit(partNumber, totalParts, addresses));
            partNumber++;
        }
    } else {
        // Split the data using split and skew by the number of nodes available.
        for (Node node : nodes) {
            for (int i = 0; i < splitsPerNode; i++) {
                splits.add(new TpchSplit(partNumber, totalParts, ImmutableList.of(node.getHostAndPort())));
                partNumber++;
            }
        }
    }
    return new FixedSplitSource(splits.build());
}
Also used : ImmutableList(com.google.common.collect.ImmutableList) FixedSplitSource(io.prestosql.spi.connector.FixedSplitSource) Node(io.prestosql.spi.Node) HostAddress(io.prestosql.spi.HostAddress) ConnectorSplit(io.prestosql.spi.connector.ConnectorSplit)

Example 3 with Node

use of io.prestosql.spi.Node in project hetu-core by openlookeng.

the class AtopSplitManager method getSplits.

@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorTableHandle table, SplitSchedulingStrategy splitSchedulingStrategy) {
    AtopTableHandle tableHandle = (AtopTableHandle) table;
    List<ConnectorSplit> splits = new ArrayList<>();
    ZonedDateTime end = ZonedDateTime.now(timeZone);
    for (Node node : nodeManager.getWorkerNodes()) {
        ZonedDateTime start = end.minusDays(maxHistoryDays - 1).withHour(0).withMinute(0).withSecond(0).withNano(0);
        while (start.isBefore(end)) {
            ZonedDateTime splitEnd = start.withHour(23).withMinute(59).withSecond(59).withNano(0);
            Domain splitDomain = Domain.create(ValueSet.ofRanges(Range.range(TIMESTAMP_WITH_TIME_ZONE, 1000 * start.toEpochSecond(), true, 1000 * splitEnd.toEpochSecond(), true)), false);
            if (tableHandle.getStartTimeConstraint().overlaps(splitDomain) && tableHandle.getEndTimeConstraint().overlaps(splitDomain)) {
                splits.add(new AtopSplit(node.getHostAndPort(), start.toEpochSecond(), start.getZone()));
            }
            start = start.plusDays(1).withHour(0).withMinute(0).withSecond(0).withNano(0);
        }
    }
    return new FixedSplitSource(splits);
}
Also used : ZonedDateTime(java.time.ZonedDateTime) FixedSplitSource(io.prestosql.spi.connector.FixedSplitSource) Node(io.prestosql.spi.Node) ArrayList(java.util.ArrayList) Domain(io.prestosql.spi.predicate.Domain) ConnectorSplit(io.prestosql.spi.connector.ConnectorSplit)

Example 4 with Node

use of io.prestosql.spi.Node in project hetu-core by openlookeng.

the class TestJmxSplitManager method testPredicatePushdown.

@Test
public void testPredicatePushdown() throws Exception {
    for (Node node : nodes) {
        String nodeIdentifier = node.getNodeIdentifier();
        TupleDomain<ColumnHandle> nodeTupleDomain = TupleDomain.fromFixedValues(ImmutableMap.of(columnHandle, NullableValue.of(createUnboundedVarcharType(), utf8Slice(nodeIdentifier))));
        JmxTableHandle tableHandle = new JmxTableHandle(new SchemaTableName("schema", "tableName"), ImmutableList.of("objectName"), ImmutableList.of(columnHandle), true, nodeTupleDomain);
        ConnectorSplitSource splitSource = splitManager.getSplits(JmxTransactionHandle.INSTANCE, SESSION, tableHandle, UNGROUPED_SCHEDULING);
        List<ConnectorSplit> allSplits = getAllSplits(splitSource);
        assertEquals(allSplits.size(), 1);
        assertEquals(allSplits.get(0).getAddresses().size(), 1);
        assertEquals(allSplits.get(0).getAddresses().get(0).getHostText(), nodeIdentifier);
    }
}
Also used : ColumnHandle(io.prestosql.spi.connector.ColumnHandle) Node(io.prestosql.spi.Node) InternalNode(io.prestosql.metadata.InternalNode) ConnectorSplitSource(io.prestosql.spi.connector.ConnectorSplitSource) SchemaTableName(io.prestosql.spi.connector.SchemaTableName) ConnectorSplit(io.prestosql.spi.connector.ConnectorSplit) Test(org.testng.annotations.Test)

Example 5 with Node

use of io.prestosql.spi.Node in project hetu-core by openlookeng.

the class MemoryMetadata method beginCreateTable.

@Override
public synchronized MemoryWriteTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, Optional<ConnectorNewTableLayout> layout) {
    checkSchemaExists(tableMetadata.getTable().getSchemaName(), true);
    checkTableNotExists(tableMetadata.getTable(), false);
    List<SortingColumn> sortedBy = MemoryTableProperties.getSortedBy(tableMetadata.getProperties());
    if (sortedBy == null) {
        sortedBy = Collections.emptyList();
    }
    if (sortedBy.size() > 1) {
        throw new PrestoException(INVALID_TABLE_PROPERTY, "sorted_by property currently only supports one column");
    }
    Set<String> sortedByColumnNames = new HashSet<>();
    for (SortingColumn s : sortedBy) {
        if (!sortedByColumnNames.add(s.getColumnName())) {
            throw new PrestoException(INVALID_TABLE_PROPERTY, "duplicate column(s) in sorted_by property");
        }
    }
    List<String> partitionBy = MemoryTableProperties.getPartitionedBy(tableMetadata.getProperties());
    if (partitionBy == null) {
        partitionBy = Collections.emptyList();
    }
    if (partitionBy.size() > 1) {
        throw new PrestoException(INVALID_TABLE_PROPERTY, "partition_by property currently only supports one column");
    }
    Set<String> partitionByColumnNames = new HashSet<>();
    for (String p : partitionBy) {
        if (!partitionByColumnNames.add(p)) {
            throw new PrestoException(INVALID_TABLE_PROPERTY, "duplicate column(s) in partition_by property");
        }
    }
    List<String> indexColumns = MemoryTableProperties.getIndexedColumns(tableMetadata.getProperties());
    if (indexColumns == null) {
        indexColumns = Collections.emptyList();
    }
    Set<String> indexColumnNames = new HashSet<>();
    for (String c : indexColumns) {
        if (!indexColumnNames.add(c)) {
            throw new PrestoException(INVALID_TABLE_PROPERTY, "duplicate column(s) in index_columns property");
        }
        if (sortedByColumnNames.contains(c)) {
            throw new PrestoException(INVALID_TABLE_PROPERTY, "duplicate column(s) in sorted_by and index_columns, sorted_by columns are automatically indexed");
        }
    }
    ImmutableList.Builder<MemoryColumnHandle> columns = ImmutableList.builder();
    Map<String, ColumnMetadata> columnNames = new HashMap<>();
    for (int i = 0; i < tableMetadata.getColumns().size(); i++) {
        ColumnMetadata column = tableMetadata.getColumns().get(i);
        boolean isPartitionKey = partitionBy.contains(column.getName());
        columns.add(new MemoryColumnHandle(column.getName(), i, column.getType().getTypeSignature(), isPartitionKey));
        columnNames.put(column.getName(), column);
    }
    for (String sortedByColumnName : sortedByColumnNames) {
        if (!columnNames.containsKey(sortedByColumnName)) {
            throw new PrestoException(INVALID_TABLE_PROPERTY, "column " + sortedByColumnName + " in sorted_by does not exist");
        }
        if (!columnNames.get(sortedByColumnName).getType().isComparable()) {
            throw new PrestoException(INVALID_TABLE_PROPERTY, "column " + sortedByColumnName + " in sorted_by is not comparable");
        }
    }
    for (String partitionByColumnName : partitionByColumnNames) {
        if (!columnNames.containsKey(partitionByColumnName)) {
            throw new PrestoException(INVALID_TABLE_PROPERTY, "column " + partitionByColumnName + " in partition_column does not exist");
        }
    }
    for (String indexColumnName : indexColumnNames) {
        if (!columnNames.containsKey(indexColumnName)) {
            throw new PrestoException(INVALID_TABLE_PROPERTY, "column " + indexColumnName + " in index_columns does not exist");
        }
    }
    long nextId = nextTableId.getAndIncrement();
    metastore.alterCatalogParameter(MEM_KEY, NEXT_ID_KEY, String.valueOf(nextTableId.get()));
    Set<Node> nodes = nodeManager.getRequiredWorkerNodes();
    checkState(!nodes.isEmpty(), "No Memory nodes available");
    long tableId = nextId;
    List<MemoryColumnHandle> columnHandles = columns.build();
    metastore.createTable(TableEntity.builder().setCatalogName(MEM_KEY).setDatabaseName(tableMetadata.getTable().getSchemaName()).setTableName(tableMetadata.getTable().getTableName()).setTableType(TableEntityType.TABLE.toString()).setParameter(TABLE_ID_KEY, String.valueOf(tableId)).build());
    updateTableInfo(tableId, new TableInfo(tableId, tableMetadata.getTable().getSchemaName(), tableMetadata.getTable().getTableName(), columnHandles, new HashMap<>(), System.currentTimeMillis()));
    boolean spillCompressionEnabled = MemoryTableProperties.getSpillCompressionEnabled(tableMetadata.getProperties());
    boolean asyncProcessingEnabled = MemoryTableProperties.getAsyncProcessingEnabled(tableMetadata.getProperties());
    return new MemoryWriteTableHandle(nextId, tableMetadata.getTable().getSchemaName(), tableMetadata.getTable().getTableName(), spillCompressionEnabled, asyncProcessingEnabled, getTableIdSet(nextId), columnHandles, sortedBy, partitionBy, indexColumns);
}
Also used : ColumnMetadata(io.prestosql.spi.connector.ColumnMetadata) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) ImmutableList(com.google.common.collect.ImmutableList) Node(io.prestosql.spi.Node) PrestoException(io.prestosql.spi.PrestoException) Constraint(io.prestosql.spi.connector.Constraint) HashSet(java.util.HashSet)

Aggregations

Node (io.prestosql.spi.Node)6 ImmutableList (com.google.common.collect.ImmutableList)4 ConnectorSplit (io.prestosql.spi.connector.ConnectorSplit)4 FixedSplitSource (io.prestosql.spi.connector.FixedSplitSource)3 HostAddress (io.prestosql.spi.HostAddress)2 HashSet (java.util.HashSet)2 ImmutableList.toImmutableList (com.google.common.collect.ImmutableList.toImmutableList)1 Gson (com.google.gson.Gson)1 InternalNode (io.prestosql.metadata.InternalNode)1 PrestoException (io.prestosql.spi.PrestoException)1 ColumnHandle (io.prestosql.spi.connector.ColumnHandle)1 ColumnMetadata (io.prestosql.spi.connector.ColumnMetadata)1 ConnectorSplitSource (io.prestosql.spi.connector.ConnectorSplitSource)1 Constraint (io.prestosql.spi.connector.Constraint)1 SchemaTableName (io.prestosql.spi.connector.SchemaTableName)1 Domain (io.prestosql.spi.predicate.Domain)1 BufferedReader (java.io.BufferedReader)1 IOException (java.io.IOException)1 InputStreamReader (java.io.InputStreamReader)1 Type (java.lang.reflect.Type)1