use of com.mapr.db.impl.BaseJsonTable in project drill by apache.
the class RestrictedJsonRecordReader method next.
@Override
public int next() {
Stopwatch watch = Stopwatch.createUnstarted();
watch.start();
RestrictedMapRDBSubScanSpec rss = ((RestrictedMapRDBSubScanSpec) this.subScanSpec);
vectorWriter.allocate();
vectorWriter.reset();
if (!rss.readyToGetRowKey()) {
// when we are in the build schema phase
if (rss.isBuildSchemaPhase()) {
readToInitSchema();
}
return 0;
}
Table table = super.formatPlugin.getJsonTableCache().getTable(subScanSpec.getTableName(), subScanSpec.getUserName());
final MultiGet multiGet = new MultiGet((BaseJsonTable) table, condition, false, projections);
int recordCount = 0;
DBDocumentReaderBase reader = null;
int maxRecordsForThisBatch = this.maxRecordsToRead > 0 ? Math.min(rss.getMaxRowKeysToBeRead(), this.maxRecordsToRead) : this.maxRecordsToRead == -1 ? rss.getMaxRowKeysToBeRead() : 0;
Stopwatch timer = Stopwatch.createUnstarted();
while (recordCount < maxRecordsForThisBatch) {
ByteBuffer[] rowKeyIds = rss.getRowKeyIdsToRead(batchSize);
if (rowKeyIds == null) {
break;
}
try {
timer.start();
final List<Document> docList = multiGet.doGet(rowKeyIds);
int index = 0;
long docsToRead = docList.size();
// If limit pushdown then stop once we have `limit` rows from multiget i.e. maxRecordsForThisBatch
if (this.maxRecordsToRead != -1) {
docsToRead = Math.min(docsToRead, maxRecordsForThisBatch);
}
while (index < docsToRead) {
vectorWriter.setPosition(recordCount);
reader = (DBDocumentReaderBase) docList.get(index).asReader();
documentWriter.writeDBDocument(vectorWriter, reader);
recordCount++;
index++;
}
timer.stop();
} catch (UserException e) {
throw UserException.unsupportedError(e).addContext(String.format("Table: %s, document id: '%s'", getTable().getPath(), reader == null ? null : IdCodec.asString(reader.getId()))).build(logger);
} catch (SchemaChangeException e) {
if (getIgnoreSchemaChange()) {
logger.warn("{}. Dropping the row from result.", e.getMessage());
logger.debug("Stack trace:", e);
} else {
throw dataReadError(logger, e);
}
}
}
vectorWriter.setValueCount(recordCount);
if (maxRecordsToRead > 0) {
if (maxRecordsToRead - recordCount >= 0) {
maxRecordsToRead -= recordCount;
} else {
maxRecordsToRead = 0;
}
}
logger.debug("Took {} ms to get {} records, getrowkey {}", watch.elapsed(TimeUnit.MILLISECONDS), recordCount, timer.elapsed(TimeUnit.MILLISECONDS));
return recordCount;
}
Aggregations