Search in sources :

Example 51 with InvalidProtocolBufferException

use of org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.InvalidProtocolBufferException in project drill by axbaretto.

the class ServerAuthenticationHandler method handle.

@Override
public void handle(S connection, int rpcType, ByteBuf pBody, ByteBuf dBody, ResponseSender sender) throws RpcException {
    final String remoteAddress = connection.getRemoteAddress().toString();
    // exchange involves server "challenges" and client "responses" (initiated by client)
    if (saslRequestTypeValue == rpcType) {
        final SaslMessage saslResponse;
        try {
            saslResponse = SaslMessage.PARSER.parseFrom(new ByteBufInputStream(pBody));
        } catch (final InvalidProtocolBufferException e) {
            handleAuthFailure(connection, sender, e, saslResponseType);
            return;
        }
        logger.trace("Received SASL message {} from {}", saslResponse.getStatus(), remoteAddress);
        final SaslResponseProcessor processor = RESPONSE_PROCESSORS.get(saslResponse.getStatus());
        if (processor == null) {
            logger.info("Unknown message type from client from {}. Will stop authentication.", remoteAddress);
            handleAuthFailure(connection, sender, new SaslException("Received unexpected message"), saslResponseType);
            return;
        }
        final SaslResponseContext<S, T> context = new SaslResponseContext<>(saslResponse, connection, sender, requestHandler, saslResponseType);
        try {
            processor.process(context);
        } catch (final Exception e) {
            handleAuthFailure(connection, sender, e, saslResponseType);
        }
    } else {
        // drop connection
        throw new RpcException(String.format("Request of type %d is not allowed without authentication. Client on %s must authenticate " + "before making requests. Connection dropped. [Details: %s]", rpcType, remoteAddress, connection.getEncryptionCtxtString()));
    }
}
Also used : RpcException(org.apache.drill.exec.rpc.RpcException) InvalidProtocolBufferException(com.google.protobuf.InvalidProtocolBufferException) SaslMessage(org.apache.drill.exec.proto.UserBitShared.SaslMessage) ByteString(com.google.protobuf.ByteString) ByteBufInputStream(io.netty.buffer.ByteBufInputStream) SaslException(javax.security.sasl.SaslException) InvalidProtocolBufferException(com.google.protobuf.InvalidProtocolBufferException) RpcException(org.apache.drill.exec.rpc.RpcException) IOException(java.io.IOException) SaslException(javax.security.sasl.SaslException) UndeclaredThrowableException(java.lang.reflect.UndeclaredThrowableException)

Example 52 with InvalidProtocolBufferException

use of org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.InvalidProtocolBufferException in project dcos-commons by mesosphere.

the class TaskPackingUtils method unpack.

/**
 * This method reverses the work done in {@link TaskPackingUtils#pack(TaskInfo)} such that the original TaskInfo is
 * regenerated. If the provided {@link TaskInfo} doesn't appear to have packed data then this operation does
 * nothing.
 *
 * @see #pack(TaskInfo)
 */
public static TaskInfo unpack(TaskInfo taskInfo) {
    if (!taskInfo.hasData() || !taskInfo.hasExecutor()) {
        return taskInfo;
    } else {
        TaskInfo.Builder taskBuilder = TaskInfo.newBuilder(taskInfo);
        ExecutorInfo pkgExecutorInfo;
        try {
            pkgExecutorInfo = ExecutorInfo.parseFrom(taskInfo.getData());
        } catch (InvalidProtocolBufferException e) {
            // ExecutorInfo. Let's assume this means that the TaskInfo isn't packed and return it as-is.
            return taskInfo;
        }
        if (pkgExecutorInfo.hasCommand()) {
            taskBuilder.setCommand(pkgExecutorInfo.getCommand());
        }
        if (pkgExecutorInfo.hasData()) {
            taskBuilder.setData(pkgExecutorInfo.getData());
        } else {
            taskBuilder.clearData();
        }
        return taskBuilder.build();
    }
}
Also used : TaskInfo(org.apache.mesos.Protos.TaskInfo) ExecutorInfo(org.apache.mesos.Protos.ExecutorInfo) InvalidProtocolBufferException(com.google.protobuf.InvalidProtocolBufferException)

Example 53 with InvalidProtocolBufferException

use of org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.InvalidProtocolBufferException in project accumulo by apache.

the class DistributedWorkQueueWorkAssigner method createWork.

/**
 * Scan over the {@link WorkSection} of the replication table adding work for entries that have data to replicate and have not already been queued.
 */
protected void createWork() {
    // Create a scanner over the replication table's order entries
    Scanner s;
    try {
        s = ReplicationTable.getScanner(conn);
    } catch (ReplicationTableOfflineException e) {
        // no work to do; replication is off
        return;
    }
    OrderSection.limit(s);
    Text buffer = new Text();
    for (Entry<Key, Value> orderEntry : s) {
        // to add more work entries
        if (getQueueSize() > maxQueueSize) {
            log.warn("Queued replication work exceeds configured maximum ({}), sleeping to allow work to occur", maxQueueSize);
            return;
        }
        String file = OrderSection.getFile(orderEntry.getKey(), buffer);
        OrderSection.getTableId(orderEntry.getKey(), buffer);
        String sourceTableId = buffer.toString();
        log.info("Determining if {} from {} needs to be replicated", file, sourceTableId);
        Scanner workScanner;
        try {
            workScanner = ReplicationTable.getScanner(conn);
        } catch (ReplicationTableOfflineException e) {
            log.warn("Replication table is offline. Will retry...");
            sleepUninterruptibly(5, TimeUnit.SECONDS);
            return;
        }
        WorkSection.limit(workScanner);
        workScanner.setRange(Range.exact(file));
        int newReplicationTasksSubmitted = 0, workEntriesRead = 0;
        // For a file, we can concurrently replicate it to multiple targets
        for (Entry<Key, Value> workEntry : workScanner) {
            workEntriesRead++;
            Status status;
            try {
                status = StatusUtil.fromValue(workEntry.getValue());
            } catch (InvalidProtocolBufferException e) {
                log.warn("Could not deserialize protobuf from work entry for {} to {}, will retry", file, ReplicationTarget.from(workEntry.getKey().getColumnQualifier()), e);
                continue;
            }
            // Get the ReplicationTarget for this Work record
            ReplicationTarget target = WorkSection.getTarget(workEntry.getKey(), buffer);
            // Get the file (if any) currently being replicated to the given peer for the given source table
            Collection<String> keysBeingReplicated = getQueuedWork(target);
            Path p = new Path(file);
            String filename = p.getName();
            String key = DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename, target);
            if (!shouldQueueWork(target)) {
                if (!isWorkRequired(status) && keysBeingReplicated.contains(key)) {
                    log.debug("Removing {} from replication state to {} because replication is complete", key, target.getPeerName());
                    this.removeQueuedWork(target, key);
                }
                continue;
            }
            // If there is work to do
            if (isWorkRequired(status)) {
                if (queueWork(p, target)) {
                    newReplicationTasksSubmitted++;
                }
            } else {
                log.debug("Not queueing work for {} to {} because {} doesn't need replication", file, target, ProtobufUtil.toString(status));
                if (keysBeingReplicated.contains(key)) {
                    log.debug("Removing {} from replication state to {} because replication is complete", key, target.getPeerName());
                    this.removeQueuedWork(target, key);
                }
            }
        }
        log.debug("Read {} replication entries from the WorkSection of the replication table", workEntriesRead);
        log.info("Assigned {} replication work entries for {}", newReplicationTasksSubmitted, file);
    }
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Path(org.apache.hadoop.fs.Path) Scanner(org.apache.accumulo.core.client.Scanner) InvalidProtocolBufferException(com.google.protobuf.InvalidProtocolBufferException) Text(org.apache.hadoop.io.Text) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) Value(org.apache.accumulo.core.data.Value) ReplicationTableOfflineException(org.apache.accumulo.core.replication.ReplicationTableOfflineException) Key(org.apache.accumulo.core.data.Key)

Example 54 with InvalidProtocolBufferException

use of org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.InvalidProtocolBufferException in project accumulo by apache.

the class RemoveCompleteReplicationRecords method removeRowIfNecessary.

protected long removeRowIfNecessary(BatchWriter bw, SortedMap<Key, Value> columns, Text row, Text colf, Text colq) {
    long recordsRemoved = 0;
    if (columns.isEmpty()) {
        return recordsRemoved;
    }
    Mutation m = new Mutation(row);
    Map<Table.ID, Long> tableToTimeCreated = new HashMap<>();
    for (Entry<Key, Value> entry : columns.entrySet()) {
        Status status = null;
        try {
            status = Status.parseFrom(entry.getValue().get());
        } catch (InvalidProtocolBufferException e) {
            log.error("Encountered unparsable protobuf for key: {}", entry.getKey().toStringNoTruncate());
            continue;
        }
        // If a column in the row isn't ready for removal, we keep the whole row
        if (!StatusUtil.isSafeForRemoval(status)) {
            return 0l;
        }
        Key k = entry.getKey();
        k.getColumnFamily(colf);
        k.getColumnQualifier(colq);
        log.debug("Removing {} {}:{} from replication table", row, colf, colq);
        m.putDelete(colf, colq);
        Table.ID tableId;
        if (StatusSection.NAME.equals(colf)) {
            tableId = Table.ID.of(colq.toString());
        } else if (WorkSection.NAME.equals(colf)) {
            ReplicationTarget target = ReplicationTarget.from(colq);
            tableId = target.getSourceTableId();
        } else {
            throw new RuntimeException("Got unexpected column");
        }
        if (status.hasCreatedTime()) {
            Long timeClosed = tableToTimeCreated.get(tableId);
            if (null == timeClosed) {
                tableToTimeCreated.put(tableId, status.getCreatedTime());
            } else if (timeClosed != status.getCreatedTime()) {
                log.warn("Found multiple values for timeClosed for {}: {} and {}", row, timeClosed, status.getCreatedTime());
            }
        }
        recordsRemoved++;
    }
    List<Mutation> mutations = new ArrayList<>();
    mutations.add(m);
    for (Entry<Table.ID, Long> entry : tableToTimeCreated.entrySet()) {
        log.info("Removing order mutation for table {} at {} for {}", entry.getKey(), entry.getValue(), row.toString());
        Mutation orderMutation = OrderSection.createMutation(row.toString(), entry.getValue());
        orderMutation.putDelete(OrderSection.NAME, new Text(entry.getKey().getUtf8()));
        mutations.add(orderMutation);
    }
    // or not at all.
    try {
        bw.addMutations(mutations);
        bw.flush();
    } catch (MutationsRejectedException e) {
        log.error("Could not submit mutation to remove columns for {} in replication table", row, e);
        return 0l;
    }
    return recordsRemoved;
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) HashMap(java.util.HashMap) InvalidProtocolBufferException(com.google.protobuf.InvalidProtocolBufferException) ArrayList(java.util.ArrayList) Text(org.apache.hadoop.io.Text) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) Value(org.apache.accumulo.core.data.Value) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException)

Example 55 with InvalidProtocolBufferException

use of org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.InvalidProtocolBufferException in project accumulo by apache.

the class StatusMaker method run.

public void run() {
    Span span = Trace.start("replicationStatusMaker");
    try {
        // Read from a source table (typically accumulo.metadata)
        final Scanner s;
        try {
            s = conn.createScanner(sourceTableName, Authorizations.EMPTY);
        } catch (TableNotFoundException e) {
            throw new RuntimeException(e);
        }
        // Only pull replication records
        s.fetchColumnFamily(ReplicationSection.COLF);
        s.setRange(ReplicationSection.getRange());
        Text file = new Text();
        for (Entry<Key, Value> entry : s) {
            // Get a writer to the replication table
            if (null == replicationWriter) {
                // Ensures table is online
                try {
                    ReplicationTable.setOnline(conn);
                    replicationWriter = ReplicationTable.getBatchWriter(conn);
                } catch (ReplicationTableOfflineException | AccumuloSecurityException | AccumuloException e) {
                    log.warn("Replication table did not come online");
                    replicationWriter = null;
                    return;
                }
            }
            // Extract the useful bits from the status key
            MetadataSchema.ReplicationSection.getFile(entry.getKey(), file);
            Table.ID tableId = MetadataSchema.ReplicationSection.getTableId(entry.getKey());
            Status status;
            try {
                status = Status.parseFrom(entry.getValue().get());
            } catch (InvalidProtocolBufferException e) {
                log.warn("Could not deserialize protobuf for {}", file);
                continue;
            }
            log.debug("Creating replication status record for {} on table {} with {}.", file, tableId, ProtobufUtil.toString(status));
            Span workSpan = Trace.start("createStatusMutations");
            try {
                // Create entries in the replication table from the metadata table
                if (!addStatusRecord(file, tableId, entry.getValue())) {
                    continue;
                }
            } finally {
                workSpan.stop();
            }
            if (status.getClosed()) {
                Span orderSpan = Trace.start("recordStatusOrder");
                try {
                    if (!addOrderRecord(file, tableId, status, entry.getValue())) {
                        continue;
                    }
                } finally {
                    orderSpan.stop();
                }
                Span deleteSpan = Trace.start("deleteClosedStatus");
                try {
                    deleteStatusRecord(entry.getKey());
                } finally {
                    deleteSpan.stop();
                }
            }
        }
    } finally {
        span.stop();
    }
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Scanner(org.apache.accumulo.core.client.Scanner) AccumuloException(org.apache.accumulo.core.client.AccumuloException) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) InvalidProtocolBufferException(com.google.protobuf.InvalidProtocolBufferException) Text(org.apache.hadoop.io.Text) Span(org.apache.accumulo.core.trace.Span) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) Value(org.apache.accumulo.core.data.Value) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) ReplicationTableOfflineException(org.apache.accumulo.core.replication.ReplicationTableOfflineException) Key(org.apache.accumulo.core.data.Key)

Aggregations

InvalidProtocolBufferException (com.google.protobuf.InvalidProtocolBufferException)334 IOException (java.io.IOException)69 ByteString (com.google.protobuf.ByteString)46 ServerRequest (com.pokegoapi.main.ServerRequest)46 RequestFailedException (com.pokegoapi.exceptions.request.RequestFailedException)39 GeneralSecurityException (java.security.GeneralSecurityException)32 CleartextKeysetHandle (com.google.crypto.tink.CleartextKeysetHandle)25 KeysetHandle (com.google.crypto.tink.KeysetHandle)25 HashMap (java.util.HashMap)25 ArrayList (java.util.ArrayList)22 InvalidProtocolBufferException (org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.InvalidProtocolBufferException)22 List (java.util.List)19 Any (com.google.protobuf.Any)18 Map (java.util.Map)18 Key (org.apache.accumulo.core.data.Key)17 Value (org.apache.accumulo.core.data.Value)17 Status (org.apache.accumulo.server.replication.proto.Replication.Status)17 Text (org.apache.hadoop.io.Text)17 HashSet (java.util.HashSet)12 RunnerApi (org.apache.beam.model.pipeline.v1.RunnerApi)11