use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class OrcShimV200 method getOffsetAndLengthForSplit.
@VisibleForTesting
public static Tuple2<Long, Long> getOffsetAndLengthForSplit(long splitStart, long splitLength, List<StripeInformation> stripes) {
long splitEnd = splitStart + splitLength;
long readStart = Long.MAX_VALUE;
long readEnd = Long.MIN_VALUE;
for (StripeInformation s : stripes) {
if (splitStart <= s.getOffset() && s.getOffset() < splitEnd) {
// stripe starts in split, so it is included
readStart = Math.min(readStart, s.getOffset());
readEnd = Math.max(readEnd, s.getOffset() + s.getLength());
}
}
if (readStart < Long.MAX_VALUE) {
// at least one split is included
return Tuple2.of(readStart, readEnd - readStart);
} else {
return Tuple2.of(0L, 0L);
}
}
use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class MetadataV2V3SerializerBase method serializeKeyedStateHandle.
// ------------------------------------------------------------------------
// keyed state
// ------------------------------------------------------------------------
@VisibleForTesting
static void serializeKeyedStateHandle(KeyedStateHandle stateHandle, DataOutputStream dos) throws IOException {
if (stateHandle == null) {
dos.writeByte(NULL_HANDLE);
} else if (stateHandle instanceof KeyGroupsStateHandle) {
KeyGroupsStateHandle keyGroupsStateHandle = (KeyGroupsStateHandle) stateHandle;
if (stateHandle instanceof KeyGroupsSavepointStateHandle) {
dos.writeByte(SAVEPOINT_KEY_GROUPS_HANDLE);
} else {
dos.writeByte(KEY_GROUPS_HANDLE_V2);
}
dos.writeInt(keyGroupsStateHandle.getKeyGroupRange().getStartKeyGroup());
dos.writeInt(keyGroupsStateHandle.getKeyGroupRange().getNumberOfKeyGroups());
for (int keyGroup : keyGroupsStateHandle.getKeyGroupRange()) {
dos.writeLong(keyGroupsStateHandle.getOffsetForKeyGroup(keyGroup));
}
serializeStreamStateHandle(keyGroupsStateHandle.getDelegateStateHandle(), dos);
// savepoint state handle would not need to persist state handle id out.
if (!(stateHandle instanceof KeyGroupsSavepointStateHandle)) {
writeStateHandleId(stateHandle, dos);
}
} else if (stateHandle instanceof IncrementalRemoteKeyedStateHandle) {
IncrementalRemoteKeyedStateHandle incrementalKeyedStateHandle = (IncrementalRemoteKeyedStateHandle) stateHandle;
dos.writeByte(INCREMENTAL_KEY_GROUPS_HANDLE_V2);
dos.writeLong(incrementalKeyedStateHandle.getCheckpointId());
dos.writeUTF(String.valueOf(incrementalKeyedStateHandle.getBackendIdentifier()));
dos.writeInt(incrementalKeyedStateHandle.getKeyGroupRange().getStartKeyGroup());
dos.writeInt(incrementalKeyedStateHandle.getKeyGroupRange().getNumberOfKeyGroups());
dos.writeLong(incrementalKeyedStateHandle.getCheckpointedSize());
serializeStreamStateHandle(incrementalKeyedStateHandle.getMetaStateHandle(), dos);
serializeStreamStateHandleMap(incrementalKeyedStateHandle.getSharedState(), dos);
serializeStreamStateHandleMap(incrementalKeyedStateHandle.getPrivateState(), dos);
writeStateHandleId(incrementalKeyedStateHandle, dos);
} else if (stateHandle instanceof ChangelogStateBackendHandle) {
ChangelogStateBackendHandle handle = (ChangelogStateBackendHandle) stateHandle;
dos.writeByte(CHANGELOG_HANDLE);
dos.writeInt(handle.getKeyGroupRange().getStartKeyGroup());
dos.writeInt(handle.getKeyGroupRange().getNumberOfKeyGroups());
dos.writeLong(handle.getCheckpointedSize());
dos.writeInt(handle.getMaterializedStateHandles().size());
for (KeyedStateHandle keyedStateHandle : handle.getMaterializedStateHandles()) {
serializeKeyedStateHandle(keyedStateHandle, dos);
}
dos.writeInt(handle.getNonMaterializedStateHandles().size());
for (KeyedStateHandle k : handle.getNonMaterializedStateHandles()) {
serializeKeyedStateHandle(k, dos);
}
dos.writeLong(handle.getMaterializationID());
writeStateHandleId(handle, dos);
} else if (stateHandle instanceof InMemoryChangelogStateHandle) {
InMemoryChangelogStateHandle handle = (InMemoryChangelogStateHandle) stateHandle;
dos.writeByte(CHANGELOG_BYTE_INCREMENT_HANDLE);
dos.writeInt(handle.getKeyGroupRange().getStartKeyGroup());
dos.writeInt(handle.getKeyGroupRange().getNumberOfKeyGroups());
dos.writeLong(handle.getFrom());
dos.writeLong(handle.getTo());
dos.writeInt(handle.getChanges().size());
for (StateChange change : handle.getChanges()) {
dos.writeInt(change.getKeyGroup());
dos.writeInt(change.getChange().length);
dos.write(change.getChange());
}
writeStateHandleId(handle, dos);
} else if (stateHandle instanceof ChangelogStateHandleStreamImpl) {
ChangelogStateHandleStreamImpl handle = (ChangelogStateHandleStreamImpl) stateHandle;
dos.writeByte(CHANGELOG_FILE_INCREMENT_HANDLE);
dos.writeInt(handle.getKeyGroupRange().getStartKeyGroup());
dos.writeInt(handle.getKeyGroupRange().getNumberOfKeyGroups());
dos.writeInt(handle.getHandlesAndOffsets().size());
for (Tuple2<StreamStateHandle, Long> streamHandleAndOffset : handle.getHandlesAndOffsets()) {
dos.writeLong(streamHandleAndOffset.f1);
serializeStreamStateHandle(streamHandleAndOffset.f0, dos);
}
dos.writeLong(handle.getStateSize());
dos.writeLong(handle.getCheckpointedSize());
writeStateHandleId(handle, dos);
} else {
throw new IllegalStateException("Unknown KeyedStateHandle type: " + stateHandle.getClass());
}
}
use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class EntropyInjector method resolveEntropy.
@VisibleForTesting
static Path resolveEntropy(Path path, EntropyInjectingFileSystem efs, boolean injectEntropy) throws IOException {
final String entropyInjectionKey = efs.getEntropyInjectionKey();
if (entropyInjectionKey == null) {
return path;
} else {
final URI originalUri = path.toUri();
final String checkpointPath = originalUri.getPath();
final int indexOfKey = checkpointPath.indexOf(entropyInjectionKey);
if (indexOfKey == -1) {
return path;
} else {
final StringBuilder buffer = new StringBuilder(checkpointPath.length());
buffer.append(checkpointPath, 0, indexOfKey);
if (injectEntropy) {
buffer.append(efs.generateEntropy());
}
buffer.append(checkpointPath, indexOfKey + entropyInjectionKey.length(), checkpointPath.length());
final String rewrittenPath = buffer.toString();
try {
return new Path(new URI(originalUri.getScheme(), originalUri.getAuthority(), rewrittenPath, originalUri.getQuery(), originalUri.getFragment()).normalize());
} catch (URISyntaxException e) {
// characters
throw new IOException("URI format error while processing path for entropy injection", e);
}
}
}
}
use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class ExternalResourceUtils method externalResourceDriversFromConfig.
/**
* Instantiate the {@link ExternalResourceDriver ExternalResourceDrivers} for all of enabled
* external resources. {@link ExternalResourceDriver ExternalResourceDrivers} are mapped to its
* resource name.
*/
@VisibleForTesting
static Map<String, ExternalResourceDriver> externalResourceDriversFromConfig(Configuration config, PluginManager pluginManager) {
final Set<String> resourceSet = getExternalResourceSet(config);
if (resourceSet.isEmpty()) {
return Collections.emptyMap();
}
final Iterator<ExternalResourceDriverFactory> factoryIterator = pluginManager.load(ExternalResourceDriverFactory.class);
final Map<String, ExternalResourceDriverFactory> externalResourceFactories = new HashMap<>();
factoryIterator.forEachRemaining(externalResourceDriverFactory -> externalResourceFactories.put(externalResourceDriverFactory.getClass().getName(), externalResourceDriverFactory));
final Map<String, ExternalResourceDriver> externalResourceDrivers = new HashMap<>();
for (String resourceName : resourceSet) {
final ConfigOption<String> driverClassOption = key(ExternalResourceOptions.getExternalResourceDriverFactoryConfigOptionForResource(resourceName)).stringType().noDefaultValue();
final String driverFactoryClassName = config.getString(driverClassOption);
if (StringUtils.isNullOrWhitespaceOnly(driverFactoryClassName)) {
LOG.warn("Could not find driver class name for {}. Please make sure {} is configured.", resourceName, driverClassOption.key());
continue;
}
ExternalResourceDriverFactory externalResourceDriverFactory = externalResourceFactories.get(driverFactoryClassName);
if (externalResourceDriverFactory != null) {
DelegatingConfiguration delegatingConfiguration = new DelegatingConfiguration(config, ExternalResourceOptions.getExternalResourceParamConfigPrefixForResource(resourceName));
try {
externalResourceDrivers.put(resourceName, externalResourceDriverFactory.createExternalResourceDriver(delegatingConfiguration));
LOG.info("Add external resources driver for {}.", resourceName);
} catch (Exception e) {
LOG.warn("Could not instantiate driver with factory {} for {}. {}", driverFactoryClassName, resourceName, e);
}
} else {
LOG.warn("Could not find factory class {} for {}.", driverFactoryClassName, resourceName);
}
}
return externalResourceDrivers;
}
use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class HiveCatalog method instantiateCatalogTable.
@VisibleForTesting
CatalogBaseTable instantiateCatalogTable(Table hiveTable) {
boolean isView = TableType.valueOf(hiveTable.getTableType()) == TableType.VIRTUAL_VIEW;
// Table properties
Map<String, String> properties = new HashMap<>(hiveTable.getParameters());
boolean isHiveTable = isHiveTable(properties);
TableSchema tableSchema;
// Partition keys
List<String> partitionKeys = new ArrayList<>();
if (isHiveTable) {
// Table schema
tableSchema = HiveTableUtil.createTableSchema(hiveConf, hiveTable, client, hiveShim);
if (!hiveTable.getPartitionKeys().isEmpty()) {
partitionKeys = getFieldNames(hiveTable.getPartitionKeys());
}
} else {
properties = retrieveFlinkProperties(properties);
if (ManagedTableFactory.DEFAULT_IDENTIFIER.equalsIgnoreCase(properties.get(CONNECTOR.key()))) {
// for Flink's managed table, we remove the connector option
properties.remove(CONNECTOR.key());
}
DescriptorProperties tableSchemaProps = new DescriptorProperties(true);
tableSchemaProps.putProperties(properties);
// try to get table schema with both new and old (1.10) key, in order to support tables
// created in old version
tableSchema = tableSchemaProps.getOptionalTableSchema(Schema.SCHEMA).orElseGet(() -> tableSchemaProps.getOptionalTableSchema("generic.table.schema").orElseGet(() -> TableSchema.builder().build()));
partitionKeys = tableSchemaProps.getPartitionKeys();
// remove the schema from properties
properties = CatalogTableImpl.removeRedundant(properties, tableSchema, partitionKeys);
}
String comment = properties.remove(HiveCatalogConfig.COMMENT);
if (isView) {
return new CatalogViewImpl(hiveTable.getViewOriginalText(), hiveTable.getViewExpandedText(), tableSchema, properties, comment);
} else {
return new CatalogTableImpl(tableSchema, partitionKeys, properties, comment);
}
}
Aggregations