use of org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.InvalidProtocolBufferException in project hadoop by apache.
the class FSDirEncryptionZoneOp method getFileEncryptionInfo.
/**
* This function combines the per-file encryption info (obtained
* from the inode's XAttrs), and the encryption info from its zone, and
* returns a consolidated FileEncryptionInfo instance. Null is returned
* for non-encrypted or raw files.
*
* @param fsd fsdirectory
* @param iip inodes in the path containing the file, passed in to
* avoid obtaining the list of inodes again
* @return consolidated file encryption info; null for non-encrypted files
*/
static FileEncryptionInfo getFileEncryptionInfo(final FSDirectory fsd, final INodesInPath iip) throws IOException {
if (iip.isRaw() || !fsd.ezManager.hasCreatedEncryptionZone() || !iip.getLastINode().isFile()) {
return null;
}
fsd.readLock();
try {
EncryptionZone encryptionZone = getEZForPath(fsd, iip);
if (encryptionZone == null) {
// not an encrypted file
return null;
} else if (encryptionZone.getPath() == null || encryptionZone.getPath().isEmpty()) {
if (NameNode.LOG.isDebugEnabled()) {
NameNode.LOG.debug("Encryption zone " + encryptionZone.getPath() + " does not have a valid path.");
}
}
final CryptoProtocolVersion version = encryptionZone.getVersion();
final CipherSuite suite = encryptionZone.getSuite();
final String keyName = encryptionZone.getKeyName();
XAttr fileXAttr = FSDirXAttrOp.unprotectedGetXAttrByPrefixedName(iip, CRYPTO_XATTR_FILE_ENCRYPTION_INFO);
if (fileXAttr == null) {
NameNode.LOG.warn("Could not find encryption XAttr for file " + iip.getPath() + " in encryption zone " + encryptionZone.getPath());
return null;
}
try {
HdfsProtos.PerFileEncryptionInfoProto fileProto = HdfsProtos.PerFileEncryptionInfoProto.parseFrom(fileXAttr.getValue());
return PBHelperClient.convert(fileProto, suite, version, keyName);
} catch (InvalidProtocolBufferException e) {
throw new IOException("Could not parse file encryption info for " + "inode " + iip.getPath(), e);
}
} finally {
fsd.readUnlock();
}
}
use of org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.InvalidProtocolBufferException in project hadoop by apache.
the class DFSZKFailoverController method dataToTarget.
@Override
protected HAServiceTarget dataToTarget(byte[] data) {
ActiveNodeInfo proto;
try {
proto = ActiveNodeInfo.parseFrom(data);
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException("Invalid data in ZK: " + StringUtils.byteToHexString(data));
}
NNHAServiceTarget ret = new NNHAServiceTarget(conf, proto.getNameserviceId(), proto.getNamenodeId());
InetSocketAddress addressFromProtobuf = new InetSocketAddress(proto.getHostname(), proto.getPort());
if (!addressFromProtobuf.equals(ret.getAddress())) {
throw new RuntimeException("Mismatched address stored in ZK for " + ret + ": Stored protobuf was " + proto + ", address from our own " + "configuration for this NameNode was " + ret.getAddress());
}
ret.setZkfcPort(proto.getZkfcPort());
return ret;
}
use of org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.InvalidProtocolBufferException in project canal by alibaba.
the class AbstractCanalClientTest method printEntry.
protected void printEntry(List<Entry> entrys) {
for (Entry entry : entrys) {
long executeTime = entry.getHeader().getExecuteTime();
long delayTime = new Date().getTime() - executeTime;
if (entry.getEntryType() == EntryType.TRANSACTIONBEGIN || entry.getEntryType() == EntryType.TRANSACTIONEND) {
if (entry.getEntryType() == EntryType.TRANSACTIONBEGIN) {
TransactionBegin begin = null;
try {
begin = TransactionBegin.parseFrom(entry.getStoreValue());
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException("parse event has an error , data:" + entry.toString(), e);
}
// 打印事务头信息,执行的线程id,事务耗时
logger.info(transaction_format, new Object[] { entry.getHeader().getLogfileName(), String.valueOf(entry.getHeader().getLogfileOffset()), String.valueOf(entry.getHeader().getExecuteTime()), String.valueOf(delayTime) });
logger.info(" BEGIN ----> Thread id: {}", begin.getThreadId());
} else if (entry.getEntryType() == EntryType.TRANSACTIONEND) {
TransactionEnd end = null;
try {
end = TransactionEnd.parseFrom(entry.getStoreValue());
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException("parse event has an error , data:" + entry.toString(), e);
}
// 打印事务提交信息,事务id
logger.info("----------------\n");
logger.info(" END ----> transaction id: {}", end.getTransactionId());
logger.info(transaction_format, new Object[] { entry.getHeader().getLogfileName(), String.valueOf(entry.getHeader().getLogfileOffset()), String.valueOf(entry.getHeader().getExecuteTime()), String.valueOf(delayTime) });
}
continue;
}
if (entry.getEntryType() == EntryType.ROWDATA) {
RowChange rowChage = null;
try {
rowChage = RowChange.parseFrom(entry.getStoreValue());
} catch (Exception e) {
throw new RuntimeException("parse event has an error , data:" + entry.toString(), e);
}
EventType eventType = rowChage.getEventType();
logger.info(row_format, new Object[] { entry.getHeader().getLogfileName(), String.valueOf(entry.getHeader().getLogfileOffset()), entry.getHeader().getSchemaName(), entry.getHeader().getTableName(), eventType, String.valueOf(entry.getHeader().getExecuteTime()), String.valueOf(delayTime) });
if (eventType == EventType.QUERY || rowChage.getIsDdl()) {
logger.info(" sql ----> " + rowChage.getSql() + SEP);
continue;
}
for (RowData rowData : rowChage.getRowDatasList()) {
if (eventType == EventType.DELETE) {
printColumn(rowData.getBeforeColumnsList());
} else if (eventType == EventType.INSERT) {
printColumn(rowData.getAfterColumnsList());
} else {
printColumn(rowData.getAfterColumnsList());
}
}
}
}
}
use of org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.InvalidProtocolBufferException in project pulsar by yahoo.
the class ManagedLedgerOfflineBacklog method calculateCursorBacklogs.
private void calculateCursorBacklogs(final ManagedLedgerFactoryImpl factory, final DestinationName dn, final NavigableMap<Long, MLDataFormats.ManagedLedgerInfo.LedgerInfo> ledgers, final PersistentOfflineTopicStats offlineTopicStats) throws Exception {
if (ledgers.size() == 0) {
return;
}
String managedLedgerName = dn.getPersistenceNamingEncoding();
MetaStore store = factory.getMetaStore();
BookKeeper bk = factory.getBookKeeper();
final CountDownLatch allCursorsCounter = new CountDownLatch(1);
final long errorInReadingCursor = (long) -1;
ConcurrentOpenHashMap<String, Long> ledgerRetryMap = new ConcurrentOpenHashMap<>();
final MLDataFormats.ManagedLedgerInfo.LedgerInfo ledgerInfo = ledgers.lastEntry().getValue();
final PositionImpl lastLedgerPosition = new PositionImpl(ledgerInfo.getLedgerId(), ledgerInfo.getEntries() - 1);
if (log.isDebugEnabled()) {
log.debug("[{}] Last ledger position {}", managedLedgerName, lastLedgerPosition);
}
store.getCursors(managedLedgerName, new MetaStore.MetaStoreCallback<List<String>>() {
@Override
public void operationComplete(List<String> cursors, MetaStore.Stat v) {
// Load existing cursors
if (log.isDebugEnabled()) {
log.debug("[{}] Found {} cursors", managedLedgerName, cursors.size());
}
if (cursors.isEmpty()) {
allCursorsCounter.countDown();
return;
}
final CountDownLatch cursorCounter = new CountDownLatch(cursors.size());
for (final String cursorName : cursors) {
// determine subscription position from cursor ledger
if (log.isDebugEnabled()) {
log.debug("[{}] Loading cursor {}", managedLedgerName, cursorName);
}
AsyncCallback.OpenCallback cursorLedgerOpenCb = (rc, lh, ctx1) -> {
long ledgerId = lh.getId();
if (log.isDebugEnabled()) {
log.debug("[{}] Opened cursor ledger {} for cursor {}. rc={}", managedLedgerName, ledgerId, cursorName, rc);
}
if (rc != BKException.Code.OK) {
log.warn("[{}] Error opening metadata ledger {} for cursor {}: {}", managedLedgerName, ledgerId, cursorName, BKException.getMessage(rc));
cursorCounter.countDown();
return;
}
long lac = lh.getLastAddConfirmed();
if (log.isDebugEnabled()) {
log.debug("[{}] Cursor {} LAC {} read from ledger {}", managedLedgerName, cursorName, lac, ledgerId);
}
if (lac == LedgerHandle.INVALID_ENTRY_ID) {
// save the ledger id and cursor to retry outside of this call back
// since we are trying to read the same cursor ledger, we will block until
// this current callback completes, since an attempt to read the entry
// will block behind this current operation to complete
ledgerRetryMap.put(cursorName, ledgerId);
log.info("[{}] Cursor {} LAC {} read from ledger {}", managedLedgerName, cursorName, lac, ledgerId);
cursorCounter.countDown();
return;
}
final long entryId = lac;
// read last acked message position for subscription
lh.asyncReadEntries(entryId, entryId, new AsyncCallback.ReadCallback() {
@Override
public void readComplete(int rc, LedgerHandle lh, Enumeration<LedgerEntry> seq, Object ctx) {
try {
if (log.isDebugEnabled()) {
log.debug("readComplete rc={} entryId={}", rc, entryId);
}
if (rc != BKException.Code.OK) {
log.warn("[{}] Error reading from metadata ledger {} for cursor {}: {}", managedLedgerName, ledgerId, cursorName, BKException.getMessage(rc));
// indicate that this cursor should be excluded
offlineTopicStats.addCursorDetails(cursorName, errorInReadingCursor, lh.getId());
} else {
LedgerEntry entry = seq.nextElement();
MLDataFormats.PositionInfo positionInfo;
try {
positionInfo = MLDataFormats.PositionInfo.parseFrom(entry.getEntry());
} catch (InvalidProtocolBufferException e) {
log.warn("[{}] Error reading position from metadata ledger {} for cursor {}: {}", managedLedgerName, ledgerId, cursorName, e);
offlineTopicStats.addCursorDetails(cursorName, errorInReadingCursor, lh.getId());
return;
}
final PositionImpl lastAckedMessagePosition = new PositionImpl(positionInfo);
if (log.isDebugEnabled()) {
log.debug("[{}] Cursor {} MD {} read last ledger position {}", managedLedgerName, cursorName, lastAckedMessagePosition, lastLedgerPosition);
}
// calculate cursor backlog
Range<PositionImpl> range = Range.openClosed(lastAckedMessagePosition, lastLedgerPosition);
if (log.isDebugEnabled()) {
log.debug("[{}] Calculating backlog for cursor {} using range {}", managedLedgerName, cursorName, range);
}
long cursorBacklog = getNumberOfEntries(range, ledgers);
offlineTopicStats.messageBacklog += cursorBacklog;
offlineTopicStats.addCursorDetails(cursorName, cursorBacklog, lh.getId());
}
} finally {
cursorCounter.countDown();
}
}
}, null);
};
// end of cursor meta read callback
store.asyncGetCursorInfo(managedLedgerName, cursorName, new MetaStore.MetaStoreCallback<MLDataFormats.ManagedCursorInfo>() {
@Override
public void operationComplete(MLDataFormats.ManagedCursorInfo info, MetaStore.Stat version) {
long cursorLedgerId = info.getCursorsLedgerId();
if (log.isDebugEnabled()) {
log.debug("[{}] Cursor {} meta-data read ledger id {}", managedLedgerName, cursorName, cursorLedgerId);
}
if (cursorLedgerId != -1) {
bk.asyncOpenLedgerNoRecovery(cursorLedgerId, digestType, password, cursorLedgerOpenCb, null);
} else {
PositionImpl lastAckedMessagePosition = new PositionImpl(info.getMarkDeleteLedgerId(), info.getMarkDeleteEntryId());
Range<PositionImpl> range = Range.openClosed(lastAckedMessagePosition, lastLedgerPosition);
if (log.isDebugEnabled()) {
log.debug("[{}] Calculating backlog for cursor {} using range {}", managedLedgerName, cursorName, range);
}
long cursorBacklog = getNumberOfEntries(range, ledgers);
offlineTopicStats.messageBacklog += cursorBacklog;
offlineTopicStats.addCursorDetails(cursorName, cursorBacklog, cursorLedgerId);
cursorCounter.countDown();
}
}
@Override
public void operationFailed(ManagedLedgerException.MetaStoreException e) {
log.warn("[{}] Unable to obtain cursor ledger for cursor {}: {}", managedLedgerName, cursorName, e);
cursorCounter.countDown();
}
});
}
// for every cursor find backlog
try {
if (accurate) {
cursorCounter.await();
} else {
cursorCounter.await(META_READ_TIMEOUT_SECONDS, TimeUnit.SECONDS);
}
} catch (Exception e) {
log.warn("[{}] Error reading subscription positions{}", managedLedgerName, e);
} finally {
allCursorsCounter.countDown();
}
}
@Override
public void operationFailed(ManagedLedgerException.MetaStoreException e) {
log.warn("[{}] Failed to get the cursors list", managedLedgerName, e);
allCursorsCounter.countDown();
}
});
if (accurate) {
allCursorsCounter.await();
} else {
allCursorsCounter.await(META_READ_TIMEOUT_SECONDS, TimeUnit.SECONDS);
}
// go through ledgers where LAC was -1
if (accurate && ledgerRetryMap.size() > 0) {
ledgerRetryMap.forEach((cursorName, ledgerId) -> {
if (log.isDebugEnabled()) {
log.debug("Cursor {} Ledger {} Trying to obtain MD from BkAdmin", cursorName, ledgerId);
}
PositionImpl lastAckedMessagePosition = tryGetMDPosition(bk, ledgerId, cursorName);
if (lastAckedMessagePosition == null) {
log.warn("[{}] Cursor {} read from ledger {}. Unable to determine cursor position", managedLedgerName, cursorName, ledgerId);
} else {
if (log.isDebugEnabled()) {
log.debug("[{}] Cursor {} read from ledger using bk admin {}. position {}", managedLedgerName, cursorName, ledgerId, lastAckedMessagePosition);
}
// calculate cursor backlog
Range<PositionImpl> range = Range.openClosed(lastAckedMessagePosition, lastLedgerPosition);
if (log.isDebugEnabled()) {
log.debug("[{}] Calculating backlog for cursor {} using range {}", managedLedgerName, cursorName, range);
}
long cursorBacklog = getNumberOfEntries(range, ledgers);
offlineTopicStats.messageBacklog += cursorBacklog;
offlineTopicStats.addCursorDetails(cursorName, cursorBacklog, ledgerId);
}
});
}
}
use of org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.InvalidProtocolBufferException in project pulsar by yahoo.
the class ManagedCursorImpl method recoverFromLedger.
protected void recoverFromLedger(final ManagedCursorInfo info, final VoidCallback callback) {
// Read the acknowledged position from the metadata ledger, then create
// a new ledger and write the position into it
ledger.mbean.startCursorLedgerOpenOp();
long ledgerId = info.getCursorsLedgerId();
bookkeeper.asyncOpenLedger(ledgerId, config.getDigestType(), config.getPassword(), (rc, lh, ctx) -> {
if (log.isDebugEnabled()) {
log.debug("[{}] Opened ledger {} for consumer {}. rc={}", ledger.getName(), ledgerId, name, rc);
}
if (isBkErrorNotRecoverable(rc)) {
log.error("[{}] Error opening metadata ledger {} for consumer {}: {}", ledger.getName(), ledgerId, name, BKException.getMessage(rc));
// Rewind to oldest entry available
initialize(getRollbackPosition(info), callback);
return;
} else if (rc != BKException.Code.OK) {
log.warn("[{}] Error opening metadata ledger {} for consumer {}: {}", ledger.getName(), ledgerId, name, BKException.getMessage(rc));
callback.operationFailed(new ManagedLedgerException(BKException.getMessage(rc)));
return;
}
// Read the last entry in the ledger
cursorLedger = lh;
lh.asyncReadLastEntry((rc1, lh1, seq, ctx1) -> {
if (log.isDebugEnabled()) {
log.debug("readComplete rc={} entryId={}", rc1, lh1.getLastAddConfirmed());
}
if (isBkErrorNotRecoverable(rc1)) {
log.error("[{}] Error reading from metadata ledger {} for consumer {}: {}", ledger.getName(), ledgerId, name, BKException.getMessage(rc1));
// Rewind to oldest entry available
initialize(getRollbackPosition(info), callback);
return;
} else if (rc1 != BKException.Code.OK) {
log.warn("[{}] Error reading from metadata ledger {} for consumer {}: {}", ledger.getName(), ledgerId, name, BKException.getMessage(rc1));
callback.operationFailed(new ManagedLedgerException(BKException.getMessage(rc1)));
return;
}
LedgerEntry entry = seq.nextElement();
PositionInfo positionInfo;
try {
positionInfo = PositionInfo.parseFrom(entry.getEntry());
} catch (InvalidProtocolBufferException e) {
callback.operationFailed(new ManagedLedgerException(e));
return;
}
PositionImpl position = new PositionImpl(positionInfo);
if (positionInfo.getIndividualDeletedMessagesCount() > 0) {
recoverIndividualDeletedMessages(positionInfo.getIndividualDeletedMessagesList());
}
recoveredCursor(position);
callback.operationComplete();
}, null);
}, null);
}
Aggregations