use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class ZookeeperClient method put.
/**
* Puts the given byte sequence into the given path.
*
* If path does not exists, this call creates it.
*
* If version holder is not null and path already exists, passes given version for comparison.
* Zookeeper maintains stat structure that holds version number which increases each time znode data change is performed.
* If we pass version that doesn't match the actual version of the data,
* the update will fail {@link org.apache.zookeeper.KeeperException.BadVersionException}.
* We catch such exception and re-throw it as {@link VersionMismatchException}.
* Link to documentation - https://zookeeper.apache.org/doc/r3.2.2/zookeeperProgrammers.html#sc_zkDataModel_znodes
*
* @param path target path
* @param data data to store
* @param version version holder
*/
public void put(final String path, final byte[] data, DataChangeVersion version) {
Preconditions.checkNotNull(path, "path is required");
Preconditions.checkNotNull(data, "data is required");
final String target = PathUtils.join(root, path);
try {
// we make a consistent read to ensure this call won't fail upon consecutive calls on the same path
// before cache is updated
boolean hasNode = hasPath(path, true);
if (!hasNode) {
try {
curator.create().withMode(mode).forPath(target, data);
} catch (NodeExistsException e) {
// Handle race conditions since Drill is distributed and other
// drillbits may have just created the node. This assumes that we do want to
// override the new node. Makes sense here, because if the node had existed,
// we'd have updated it.
hasNode = true;
}
}
if (hasNode) {
if (version != null) {
try {
curator.setData().withVersion(version.getVersion()).forPath(target, data);
} catch (final KeeperException.BadVersionException e) {
throw new VersionMismatchException("Unable to put data. Version mismatch is detected.", version.getVersion(), e);
}
} else {
curator.setData().forPath(target, data);
}
}
getCache().rebuildNode(target);
} catch (final VersionMismatchException e) {
throw e;
} catch (final Exception e) {
throw new DrillRuntimeException("unable to put ", e);
}
}
use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class HBaseRecordReader method next.
@Override
public int next() {
Stopwatch watch = Stopwatch.createStarted();
if (rowKeyVector != null) {
rowKeyVector.clear();
rowKeyVector.allocateNew();
}
for (ValueVector v : familyVectorMap.values()) {
v.clear();
v.allocateNew();
}
int rowCount = 0;
// if allocated memory for the first row is larger than allowed max in batch, it will be added anyway
do {
Result result = null;
final OperatorStats operatorStats = operatorContext == null ? null : operatorContext.getStats();
try {
if (operatorStats != null) {
operatorStats.startWait();
}
try {
result = resultScanner.next();
} finally {
if (operatorStats != null) {
operatorStats.stopWait();
}
}
} catch (IOException e) {
throw new DrillRuntimeException(e);
}
if (result == null) {
break;
}
// parse the result and populate the value vectors
Cell[] cells = result.rawCells();
if (rowKeyVector != null) {
rowKeyVector.getMutator().setSafe(rowCount, cells[0].getRowArray(), cells[0].getRowOffset(), cells[0].getRowLength());
}
if (!rowKeyOnly) {
for (final Cell cell : cells) {
final int familyOffset = cell.getFamilyOffset();
final int familyLength = cell.getFamilyLength();
final byte[] familyArray = cell.getFamilyArray();
final MapVector mv = getOrCreateFamilyVector(new String(familyArray, familyOffset, familyLength), true);
final int qualifierOffset = cell.getQualifierOffset();
final int qualifierLength = cell.getQualifierLength();
final byte[] qualifierArray = cell.getQualifierArray();
final NullableVarBinaryVector v = getOrCreateColumnVector(mv, new String(qualifierArray, qualifierOffset, qualifierLength));
final int valueOffset = cell.getValueOffset();
final int valueLength = cell.getValueLength();
final byte[] valueArray = cell.getValueArray();
v.getMutator().setSafe(rowCount, valueArray, valueOffset, valueLength);
}
}
rowCount++;
} while (canAddNewRow(rowCount));
setOutputRowCount(rowCount);
logger.debug("Took {} ms to get {} records", watch.elapsed(TimeUnit.MILLISECONDS), rowCount);
return rowCount;
}
use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class HBaseRecordReader method getOrCreateFamilyVector.
private MapVector getOrCreateFamilyVector(String familyName, boolean allocateOnCreate) {
try {
MapVector v = familyVectorMap.get(familyName);
if (v == null) {
SchemaPath column = SchemaPath.getSimplePath(familyName);
MaterializedField field = MaterializedField.create(column.getAsNamePart().getName(), COLUMN_FAMILY_TYPE);
v = outputMutator.addField(field, MapVector.class);
if (allocateOnCreate) {
v.allocateNew();
}
getColumns().add(column);
familyVectorMap.put(familyName, v);
}
return v;
} catch (SchemaChangeException e) {
throw new DrillRuntimeException(e);
}
}
use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class JsonTableGroupScan method init.
private void init() {
logger.debug("Getting tablet locations");
try {
Configuration conf = new Configuration();
// Fetch table and tabletInfo only once and cache.
table = MapRDB.getTable(scanSpec.getTableName());
tabletInfos = table.getTabletInfos(scanSpec.getCondition());
// duplicate work and RPCs to MapR DB server.
for (TabletInfo tabletInfo : tabletInfos) {
totalRowCount += tabletInfo.getEstimatedNumRows();
}
computeRegionsToScan();
} catch (Exception e) {
throw new DrillRuntimeException("Error getting region info for table: " + scanSpec.getTableName(), e);
}
}
use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class HBaseGroupScan method init.
private void init() {
logger.debug("Getting region locations");
TableName tableName = TableName.valueOf(hbaseScanSpec.getTableName());
Connection conn = storagePlugin.getConnection();
try (Admin admin = conn.getAdmin();
RegionLocator locator = conn.getRegionLocator(tableName)) {
this.hTableDesc = admin.getTableDescriptor(tableName);
List<HRegionLocation> regionLocations = locator.getAllRegionLocations();
statsCalculator = new TableStatsCalculator(conn, hbaseScanSpec, storagePlugin.getContext().getConfig(), storagePluginConfig);
boolean foundStartRegion = false;
regionsToScan = new TreeMap<HRegionInfo, ServerName>();
for (HRegionLocation regionLocation : regionLocations) {
HRegionInfo regionInfo = regionLocation.getRegionInfo();
if (!foundStartRegion && hbaseScanSpec.getStartRow() != null && hbaseScanSpec.getStartRow().length != 0 && !regionInfo.containsRow(hbaseScanSpec.getStartRow())) {
continue;
}
foundStartRegion = true;
regionsToScan.put(regionInfo, regionLocation.getServerName());
scanSizeInBytes += statsCalculator.getRegionSizeInBytes(regionInfo.getRegionName());
if (hbaseScanSpec.getStopRow() != null && hbaseScanSpec.getStopRow().length != 0 && regionInfo.containsRow(hbaseScanSpec.getStopRow())) {
break;
}
}
} catch (IOException e) {
throw new DrillRuntimeException("Error getting region info for table: " + hbaseScanSpec.getTableName(), e);
}
verifyColumns();
}
Aggregations