use of java.util.OptionalInt in project fastjson by alibaba.
the class OptionalCodec method write.
public void write(JSONSerializer serializer, Object object, Object fieldName, Type fieldType, int features) throws IOException {
if (object == null) {
serializer.writeNull();
return;
}
if (object instanceof Optional) {
Optional<?> optional = (Optional<?>) object;
Object value = optional.isPresent() ? optional.get() : null;
serializer.write(value);
return;
}
if (object instanceof OptionalDouble) {
OptionalDouble optional = (OptionalDouble) object;
if (optional.isPresent()) {
double value = optional.getAsDouble();
serializer.write(value);
} else {
serializer.writeNull();
}
return;
}
if (object instanceof OptionalInt) {
OptionalInt optional = (OptionalInt) object;
if (optional.isPresent()) {
int value = optional.getAsInt();
serializer.out.writeInt(value);
} else {
serializer.writeNull();
}
return;
}
if (object instanceof OptionalLong) {
OptionalLong optional = (OptionalLong) object;
if (optional.isPresent()) {
long value = optional.getAsLong();
serializer.out.writeLong(value);
} else {
serializer.writeNull();
}
return;
}
throw new JSONException("not support optional : " + object.getClass());
}
use of java.util.OptionalInt in project fastjson by alibaba.
the class OptionalTest method test_optionalInt.
public void test_optionalInt() throws Exception {
OptionalInt val = OptionalInt.of(3);
String text = JSON.toJSONString(val);
Assert.assertEquals("3", text);
OptionalInt val2 = JSON.parseObject(text, OptionalInt.class);
Assert.assertEquals(val.getAsInt(), val2.getAsInt());
}
use of java.util.OptionalInt in project presto by prestodb.
the class HivePageSink method getWriterIndexes.
private int[] getWriterIndexes(Page page) {
Page partitionColumns = extractColumns(page, partitionColumnsInputIndex);
Block bucketBlock = buildBucketBlock(page);
int[] writerIndexes = pagePartitioner.partitionPage(partitionColumns, bucketBlock);
if (pagePartitioner.getMaxIndex() >= maxOpenWriters) {
throw new PrestoException(HIVE_TOO_MANY_OPEN_PARTITIONS, "Too many open partitions");
}
// expand writers list to new size
while (writers.size() <= pagePartitioner.getMaxIndex()) {
writers.add(null);
WriterPositions newWriterPositions = new WriterPositions();
systemMemoryUsage += sizeOf(newWriterPositions.getPositionsArray());
writerPositions.add(newWriterPositions);
}
// create missing writers
for (int position = 0; position < page.getPositionCount(); position++) {
int writerIndex = writerIndexes[position];
if (writers.get(writerIndex) != null) {
continue;
}
OptionalInt bucketNumber = OptionalInt.empty();
if (bucketBlock != null) {
bucketNumber = OptionalInt.of(bucketBlock.getInt(position, 0));
}
HiveWriter writer = writerFactory.createWriter(partitionColumns, position, bucketNumber);
writers.set(writerIndex, writer);
}
verify(writers.size() == pagePartitioner.getMaxIndex() + 1);
verify(!writers.contains(null));
return writerIndexes;
}
use of java.util.OptionalInt in project presto by prestodb.
the class RaptorMetadata method getOrCreateDistribution.
private Optional<DistributionInfo> getOrCreateDistribution(Map<String, RaptorColumnHandle> columnHandleMap, Map<String, Object> properties) {
OptionalInt bucketCount = getBucketCount(properties);
List<RaptorColumnHandle> bucketColumnHandles = getBucketColumnHandles(getBucketColumns(properties), columnHandleMap);
if (bucketCount.isPresent() && bucketColumnHandles.isEmpty()) {
throw new PrestoException(INVALID_TABLE_PROPERTY, format("Must specify '%s' along with '%s'", BUCKETED_ON_PROPERTY, BUCKET_COUNT_PROPERTY));
}
if (!bucketCount.isPresent() && !bucketColumnHandles.isEmpty()) {
throw new PrestoException(INVALID_TABLE_PROPERTY, format("Must specify '%s' along with '%s'", BUCKET_COUNT_PROPERTY, BUCKETED_ON_PROPERTY));
}
ImmutableList.Builder<Type> bucketColumnTypes = ImmutableList.builder();
for (RaptorColumnHandle column : bucketColumnHandles) {
validateBucketType(column.getColumnType());
bucketColumnTypes.add(column.getColumnType());
}
long distributionId;
String distributionName = getDistributionName(properties);
if (distributionName != null) {
if (bucketColumnHandles.isEmpty()) {
throw new PrestoException(INVALID_TABLE_PROPERTY, format("Must specify '%s' along with '%s'", BUCKETED_ON_PROPERTY, DISTRIBUTION_NAME_PROPERTY));
}
Distribution distribution = dao.getDistribution(distributionName);
if (distribution == null) {
if (!bucketCount.isPresent()) {
throw new PrestoException(INVALID_TABLE_PROPERTY, "Distribution does not exist and bucket count is not specified");
}
distribution = getOrCreateDistribution(distributionName, bucketColumnTypes.build(), bucketCount.getAsInt());
}
distributionId = distribution.getId();
if (bucketCount.isPresent() && (distribution.getBucketCount() != bucketCount.getAsInt())) {
throw new PrestoException(INVALID_TABLE_PROPERTY, "Bucket count must match distribution");
}
if (!distribution.getColumnTypes().equals(bucketColumnTypes.build())) {
throw new PrestoException(INVALID_TABLE_PROPERTY, "Bucket column types must match distribution");
}
} else if (bucketCount.isPresent()) {
String types = Distribution.serializeColumnTypes(bucketColumnTypes.build());
distributionId = dao.insertDistribution(null, types, bucketCount.getAsInt());
} else {
return Optional.empty();
}
shardManager.createBuckets(distributionId, bucketCount.getAsInt());
return Optional.of(new DistributionInfo(distributionId, bucketCount.getAsInt(), bucketColumnHandles));
}
use of java.util.OptionalInt in project presto by prestodb.
the class RaptorPageSourceProvider method createPageSource.
@Override
public ConnectorPageSource createPageSource(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorSplit split, List<ColumnHandle> columns) {
RaptorSplit raptorSplit = (RaptorSplit) split;
OptionalInt bucketNumber = raptorSplit.getBucketNumber();
TupleDomain<RaptorColumnHandle> predicate = raptorSplit.getEffectivePredicate();
ReaderAttributes attributes = ReaderAttributes.from(session);
OptionalLong transactionId = raptorSplit.getTransactionId();
if (raptorSplit.getShardUuids().size() == 1) {
UUID shardUuid = raptorSplit.getShardUuids().iterator().next();
return createPageSource(shardUuid, bucketNumber, columns, predicate, attributes, transactionId);
}
Iterator<ConnectorPageSource> iterator = raptorSplit.getShardUuids().stream().map(shardUuid -> createPageSource(shardUuid, bucketNumber, columns, predicate, attributes, transactionId)).iterator();
return new ConcatPageSource(iterator);
}
Aggregations