use of com.netflix.astyanax.util.RangeBuilder in project coprhd-controller by CoprHD.
the class UserToOrdersMigration method process.
@Override
public void process() throws MigrationCallbackException {
log.info("Adding new index records for class: {} field: {} annotation: {} name={}", new Object[] { Order.class, Order.SUBMITTED_BY_USER_ID, ClassNameTimeSeries.class.getName(), name });
DataObjectType doType = TypeMap.getDoType(Order.class);
ColumnField field = doType.getColumnField(Order.SUBMITTED_BY_USER_ID);
newIndexCF = field.getIndexCF();
ColumnFamily<String, IndexColumnName> userToOrders = new ColumnFamily<>(SOURCE_INDEX_CF_NAME, StringSerializer.get(), IndexColumnNameSerializer.get());
DbClientImpl client = (DbClientImpl) dbClient;
ks = client.getKeyspace(Order.class);
MutationBatch mutationBatch = ks.prepareMutationBatch();
long m = 0;
try {
OperationResult<Rows<String, IndexColumnName>> result = ks.prepareQuery(userToOrders).getAllRows().setRowLimit(1000).withColumnRange(new RangeBuilder().setLimit(0).build()).execute();
ColumnList<IndexColumnName> cols;
for (Row<String, IndexColumnName> row : result.getResult()) {
RowQuery<String, IndexColumnName> rowQuery = ks.prepareQuery(userToOrders).getKey(row.getKey()).autoPaginate(true).withColumnRange(new RangeBuilder().setLimit(5).build());
while (!(cols = rowQuery.execute().getResult()).isEmpty()) {
m++;
for (Column<IndexColumnName> col : cols) {
String indexKey = row.getKey();
String orderId = col.getName().getTwo();
ClassNameTimeSeriesIndexColumnName newCol = new ClassNameTimeSeriesIndexColumnName(col.getName().getOne(), orderId, col.getName().getTimeUUID());
mutationBatch.withRow(newIndexCF, indexKey).putEmptyColumn(newCol, null);
if (m % 10000 == 0) {
mutationBatch.execute();
}
}
}
}
mutationBatch.execute();
} catch (Exception e) {
log.error("Migration to {} failed e=", newIndexCF.getName(), e);
}
}
use of com.netflix.astyanax.util.RangeBuilder in project coprhd-controller by CoprHD.
the class TimeSeriesType method getColumnRange.
/**
* Return column range for given time and bucket granularity
*
* @param time target query time
* @param granularity granularity
* @param pageSize page size
* @return
*/
public ByteBufferRange getColumnRange(DateTime time, TimeBucket granularity, int pageSize) {
if (time.getZone() != DateTimeZone.UTC) {
throw new IllegalArgumentException("Invalid timezone");
}
if (granularity.ordinal() > _bucketGranularity.ordinal()) {
throw new IllegalArgumentException("Invalid granularity");
}
RangeBuilder builder = new RangeBuilder();
builder.setLimit(pageSize);
if (granularity.ordinal() < _bucketGranularity.ordinal()) {
// finer than specified granularity
DateTime start = DateTime.now();
DateTime end = DateTime.now();
switch(granularity) {
case MONTH:
start = new DateTime(time.getYear(), time.getMonthOfYear(), 1, 0, 0, DateTimeZone.UTC);
end = start.plusMonths(1);
break;
case DAY:
start = new DateTime(time.getYear(), time.getMonthOfYear(), time.getDayOfMonth(), 0, 0, DateTimeZone.UTC);
end = start.plusDays(1);
break;
case HOUR:
start = new DateTime(time.getYear(), time.getMonthOfYear(), time.getDayOfMonth(), time.getHourOfDay(), 0, DateTimeZone.UTC);
end = start.plusHours(1);
break;
case MINUTE:
start = new DateTime(time.getYear(), time.getMonthOfYear(), time.getDayOfMonth(), time.getHourOfDay(), time.getMinuteOfHour(), DateTimeZone.UTC);
end = start.plusMinutes(1);
break;
case SECOND:
start = new DateTime(time.getYear(), time.getMonthOfYear(), time.getDayOfMonth(), time.getHourOfDay(), time.getMinuteOfHour(), time.getSecondOfMinute(), DateTimeZone.UTC);
end = start.plusSeconds(1);
break;
}
builder.setStart(TimeUUIDUtils.getTimeUUID(start.getMillis()));
builder.setEnd(createMaxTimeUUID(end.minusMillis(1).getMillis()));
}
return builder.build();
}
use of com.netflix.astyanax.util.RangeBuilder in project coprhd-controller by CoprHD.
the class CustomizedDistributedRowLock method readLockColumns.
/**
* Read all the lock columns. Will also ready data columns if withDataColumns(true) was called
*
* @param readDataColumns
* @throws Exception
*/
private Map<String, Long> readLockColumns(boolean readDataColumns) throws Exception {
Map<String, Long> result = Maps.newLinkedHashMap();
ConsistencyLevel read_consistencyLevel = consistencyLevel;
// CASSANDRA actually does not support EACH_QUORUM for read which is meaningless as well.
if (consistencyLevel == ConsistencyLevel.CL_EACH_QUORUM) {
read_consistencyLevel = ConsistencyLevel.CL_LOCAL_QUORUM;
}
// Read all the columns
if (readDataColumns) {
columns = new OrderedColumnMap<String>();
ColumnList<String> lockResult = keyspace.prepareQuery(columnFamily).setConsistencyLevel(read_consistencyLevel).getKey(key).execute().getResult();
for (Column<String> c : lockResult) {
if (c.getName().startsWith(prefix)) {
result.put(c.getName(), readTimeoutValue(c));
} else {
columns.add(c);
}
}
} else // Read only the lock columns
{
ColumnList<String> lockResult = keyspace.prepareQuery(columnFamily).setConsistencyLevel(read_consistencyLevel).getKey(key).withColumnRange(new RangeBuilder().setStart(prefix + "\u0000").setEnd(prefix + "\uFFFF").build()).execute().getResult();
for (Column<String> c : lockResult) {
result.put(c.getName(), readTimeoutValue(c));
}
}
return result;
}
use of com.netflix.astyanax.util.RangeBuilder in project coprhd-controller by CoprHD.
the class TimeSeriesIndexMigration method process.
@Override
public void process() throws MigrationCallbackException {
log.info("Adding new index records for class: {} field: {} annotation: {}", new Object[] { Order.class.getName(), Order.SUBMITTED, TimeSeriesAlternateId.class.getName() });
ColumnFamily<String, IndexColumnName> tenantToOrder = new ColumnFamily<>(SOURCE_INDEX_CF_NAME, StringSerializer.get(), IndexColumnNameSerializer.get());
ColumnFamily<String, IndexColumnName> timeseriesIndex = new ColumnFamily<>(SOURCE_INDEX_CF_NAME2, StringSerializer.get(), IndexColumnNameSerializer.get());
DataObjectType doType = TypeMap.getDoType(Order.class);
ColumnField field = doType.getColumnField(Order.SUBMITTED);
ColumnFamily<String, TimeSeriesIndexColumnName> newIndexCF = field.getIndexCF();
DbClientImpl client = (DbClientImpl) dbClient;
Keyspace ks = client.getKeyspace(Order.class);
List<CompletableFuture<Void>> tasks = new ArrayList(TASK_SIZE);
try {
OperationResult<Rows<String, IndexColumnName>> result = ks.prepareQuery(tenantToOrder).getAllRows().setRowLimit(1000).withColumnRange(new RangeBuilder().setLimit(0).build()).execute();
for (Row<String, IndexColumnName> row : result.getResult()) {
RowQuery<String, IndexColumnName> rowQuery = ks.prepareQuery(tenantToOrder).getKey(row.getKey()).autoPaginate(true).withColumnRange(new RangeBuilder().setLimit(5).build());
ColumnList<IndexColumnName> cols = rowQuery.execute().getResult();
while (!cols.isEmpty()) {
if (tasks.size() < TASK_SIZE) {
CompletableFuture<Void> task = CompletableFuture.runAsync(new MigrationTask(ks, row.getKey(), cols, tenantToOrder, timeseriesIndex, newIndexCF));
tasks.add(task);
} else {
CompletableFuture.allOf(tasks.toArray(new CompletableFuture[0])).join();
tasks = new ArrayList(TASK_SIZE);
}
cols = rowQuery.execute().getResult();
}
}
if (!tasks.isEmpty()) {
CompletableFuture.allOf(tasks.toArray(new CompletableFuture[0])).join();
}
} catch (Exception e) {
log.error("Migration to {} failed e=", newIndexCF.getName(), e);
}
}
use of com.netflix.astyanax.util.RangeBuilder in project coprhd-controller by CoprHD.
the class DbConsistencyCheckerHelper method checkIndexingCF.
/**
* Scan all the indices and related data object records, to find out
* the index record is existing but the related data object records is missing.
*
* @return number of the corrupted rows in this index CF
* @throws ConnectionException
*/
public void checkIndexingCF(IndexAndCf indexAndCf, boolean toConsole, CheckResult checkResult, boolean isParallel) throws ConnectionException {
initSchemaVersions();
String indexCFName = indexAndCf.cf.getName();
Map<String, ColumnFamily<String, CompositeColumnName>> objCfs = getDataObjectCFs();
_log.info("Start checking the index CF {} with double confirmed option: {}", indexCFName, doubleConfirmed);
Map<ColumnFamily<String, CompositeColumnName>, Map<String, List<IndexEntry>>> objsToCheck = new HashMap<>();
ColumnFamilyQuery<String, IndexColumnName> query = indexAndCf.keyspace.prepareQuery(indexAndCf.cf);
OperationResult<Rows<String, IndexColumnName>> result = query.getAllRows().setRowLimit(dbClient.DEFAULT_PAGE_SIZE).withColumnRange(new RangeBuilder().setLimit(0).build()).execute();
int scannedRows = 0;
long beginTime = System.currentTimeMillis();
for (Row<String, IndexColumnName> row : result.getResult()) {
RowQuery<String, IndexColumnName> rowQuery = indexAndCf.keyspace.prepareQuery(indexAndCf.cf).getKey(row.getKey()).autoPaginate(true).withColumnRange(new RangeBuilder().setLimit(dbClient.DEFAULT_PAGE_SIZE).build());
ColumnList<IndexColumnName> columns;
while (!(columns = rowQuery.execute().getResult()).isEmpty()) {
for (Column<IndexColumnName> column : columns) {
scannedRows++;
ObjectEntry objEntry = extractObjectEntryFromIndex(row.getKey(), column.getName(), indexAndCf.indexType, toConsole);
if (objEntry == null) {
continue;
}
ColumnFamily<String, CompositeColumnName> objCf = objCfs.get(objEntry.getClassName());
if (objCf == null) {
logMessage(String.format("DataObject does not exist for %s", row.getKey()), true, toConsole);
continue;
}
if (skipCheckCFs.contains(objCf.getName())) {
_log.debug("Skip checking CF {} for index CF {}", objCf.getName(), indexAndCf.cf.getName());
continue;
}
Map<String, List<IndexEntry>> objKeysIdxEntryMap = objsToCheck.get(objCf);
if (objKeysIdxEntryMap == null) {
objKeysIdxEntryMap = new HashMap<>();
objsToCheck.put(objCf, objKeysIdxEntryMap);
}
List<IndexEntry> idxEntries = objKeysIdxEntryMap.get(objEntry.getObjectId());
if (idxEntries == null) {
idxEntries = new ArrayList<>();
objKeysIdxEntryMap.put(objEntry.getObjectId(), idxEntries);
}
idxEntries.add(new IndexEntry(row.getKey(), column.getName()));
}
int size = getObjsSize(objsToCheck);
if (size >= INDEX_OBJECTS_BATCH_SIZE) {
if (isParallel) {
processBatchIndexObjectsWithMultipleThreads(indexAndCf, toConsole, objsToCheck, checkResult);
} else {
processBatchIndexObjects(indexAndCf, toConsole, objsToCheck, checkResult);
}
objsToCheck = new HashMap<>();
}
if (scannedRows >= THRESHHOLD_FOR_OUTPUT_DEBUG) {
_log.info("{} data objects have been check with time {}", scannedRows, DurationFormatUtils.formatDurationHMS(System.currentTimeMillis() - beginTime));
scannedRows = 0;
beginTime = System.currentTimeMillis();
}
}
}
// Detect whether the DataObject CFs have the records
if (isParallel) {
processBatchIndexObjectsWithMultipleThreads(indexAndCf, toConsole, objsToCheck, checkResult);
} else {
processBatchIndexObjects(indexAndCf, toConsole, objsToCheck, checkResult);
}
}
Aggregations