use of org.apache.nifi.distributed.cache.client.DistributedMapCacheClient in project kylo by Teradata.
the class DistributedSavepointController method onConfigured.
@OnEnabled
public void onConfigured(final ConfigurationContext context) throws InitializationException {
getLogger().info("Configuring Savepoint controller.");
final DistributedMapCacheClient cacheClient = context.getProperty(DISTRIBUTED_CACHE_SERVICE).asControllerService(DistributedMapCacheClient.class);
this.provider = new DistributedSavepointProviderImpl(cacheClient);
this.provider.subscribeDistributedSavepointChanges(this.cache);
this.springService = context.getProperty(SPRING_SERVICE).asControllerService(SpringContextService.class);
addJmsListeners();
}
use of org.apache.nifi.distributed.cache.client.DistributedMapCacheClient in project kylo by Teradata.
the class TriggerSavepointTest method setup.
@Before
public void setup() throws InitializationException {
runner = TestRunners.newTestRunner(TriggerSavepoint.class);
final SpringContextService springService = new MockSpringContextService();
DistributedMapCacheClient client = new MockDistributedMapCacheClient();
final Map<String, String> clientProperties = new HashMap<>();
runner.addControllerService("client", client, clientProperties);
runner.enableControllerService(client);
DistributedSavepointController service = new DistributedSavepointController();
final Map<String, String> serviceProperties = new HashMap<>();
serviceProperties.put("distributed-cache-service", "client");
runner.addControllerService("service", service, serviceProperties);
runner.addControllerService(SPRING_SERVICE_IDENTIFIER, springService);
runner.setProperty(service, DistributedSavepointController.SPRING_SERVICE, SPRING_SERVICE_IDENTIFIER);
runner.enableControllerService(springService);
runner.enableControllerService(service);
runner.setProperty(SetSavepoint.SAVEPOINT_SERVICE, "service");
runner.setProperty(TriggerSavepoint.SAVEPOINT_ID, "${savepointid}");
runner.setProperty(TriggerSavepoint.BEHAVIOR, TriggerSavepoint.RETRY);
this.provider = service.getProvider();
}
use of org.apache.nifi.distributed.cache.client.DistributedMapCacheClient in project nifi by apache.
the class AbstractListProcessor method updateState.
@OnScheduled
public final void updateState(final ProcessContext context) throws IOException {
final String path = getPath(context);
final DistributedMapCacheClient client = context.getProperty(DISTRIBUTED_CACHE_SERVICE).asControllerService(DistributedMapCacheClient.class);
// Check if state already exists for this path. If so, we have already migrated the state.
final StateMap stateMap = context.getStateManager().getState(getStateScope(context));
if (stateMap.getVersion() == -1L) {
try {
// Migrate state from the old way of managing state (distributed cache service and local file)
// to the new mechanism (State Manager).
migrateState(path, client, context.getStateManager(), getStateScope(context));
} catch (final IOException ioe) {
throw new IOException("Failed to properly migrate state to State Manager", ioe);
}
}
// When scheduled to run, check if the associated timestamp is null, signifying a clearing of state and reset the internal timestamp
if (lastListedLatestEntryTimestampMillis != null && stateMap.get(LATEST_LISTED_ENTRY_TIMESTAMP_KEY) == null) {
getLogger().info("Detected that state was cleared for this component. Resetting internal values.");
resetTimeStates();
}
if (resetState) {
context.getStateManager().clear(getStateScope(context));
resetState = false;
}
}
use of org.apache.nifi.distributed.cache.client.DistributedMapCacheClient in project nifi by apache.
the class PutDistributedMapCache method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final ComponentLog logger = getLogger();
// cache key is computed from attribute 'CACHE_ENTRY_IDENTIFIER' with expression language support
final String cacheKey = context.getProperty(CACHE_ENTRY_IDENTIFIER).evaluateAttributeExpressions(flowFile).getValue();
// if the computed value is null, or empty, we transfer the flow file to failure relationship
if (StringUtils.isBlank(cacheKey)) {
logger.error("FlowFile {} has no attribute for given Cache Entry Identifier", new Object[] { flowFile });
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
return;
}
// the cache client used to interact with the distributed cache
final DistributedMapCacheClient cache = context.getProperty(DISTRIBUTED_CACHE_SERVICE).asControllerService(DistributedMapCacheClient.class);
try {
final long maxCacheEntrySize = context.getProperty(CACHE_ENTRY_MAX_BYTES).asDataSize(DataUnit.B).longValue();
long flowFileSize = flowFile.getSize();
// too big flow file
if (flowFileSize > maxCacheEntrySize) {
logger.warn("Flow file {} size {} exceeds the max cache entry size ({} B).", new Object[] { flowFile, flowFileSize, maxCacheEntrySize });
session.transfer(flowFile, REL_FAILURE);
return;
}
if (flowFileSize == 0) {
logger.warn("Flow file {} is empty, there is nothing to cache.", new Object[] { flowFile });
session.transfer(flowFile, REL_FAILURE);
return;
}
// get flow file content
final ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
session.exportTo(flowFile, byteStream);
byte[] cacheValue = byteStream.toByteArray();
final String updateStrategy = context.getProperty(CACHE_UPDATE_STRATEGY).getValue();
boolean cached = false;
if (updateStrategy.equals(CACHE_UPDATE_REPLACE.getValue())) {
cache.put(cacheKey, cacheValue, keySerializer, valueSerializer);
cached = true;
} else if (updateStrategy.equals(CACHE_UPDATE_KEEP_ORIGINAL.getValue())) {
final byte[] oldValue = cache.getAndPutIfAbsent(cacheKey, cacheValue, keySerializer, valueSerializer, valueDeserializer);
if (oldValue == null) {
cached = true;
}
}
// set 'cached' attribute
flowFile = session.putAttribute(flowFile, CACHED_ATTRIBUTE_NAME, String.valueOf(cached));
if (cached) {
session.transfer(flowFile, REL_SUCCESS);
} else {
session.transfer(flowFile, REL_FAILURE);
}
} catch (final IOException e) {
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
logger.error("Unable to communicate with cache when processing {} due to {}", new Object[] { flowFile, e });
}
}
use of org.apache.nifi.distributed.cache.client.DistributedMapCacheClient in project nifi by apache.
the class TestHBase_1_1_2_ClientMapCacheService method testPut.
@Test
public void testPut() throws InitializationException, IOException {
final String row = "row1";
final String content = "content1";
final TestRunner runner = TestRunners.newTestRunner(TestProcessor.class);
// Mock an HBase Table so we can verify the put operations later
final Table table = Mockito.mock(Table.class);
when(table.getName()).thenReturn(TableName.valueOf(tableName));
// create the controller service and link it to the test processor
final MockHBaseClientService service = configureHBaseClientService(runner, table);
runner.assertValid(service);
final HBaseClientService hBaseClientService = runner.getProcessContext().getProperty(TestProcessor.HBASE_CLIENT_SERVICE).asControllerService(HBaseClientService.class);
final DistributedMapCacheClient cacheService = configureHBaseCacheService(runner, hBaseClientService);
runner.assertValid(cacheService);
// try to put a single cell
final DistributedMapCacheClient hBaseCacheService = runner.getProcessContext().getProperty(TestProcessor.HBASE_CACHE_SERVICE).asControllerService(DistributedMapCacheClient.class);
hBaseCacheService.put(row, content, stringSerializer, stringSerializer);
// verify only one call to put was made
ArgumentCaptor<Put> capture = ArgumentCaptor.forClass(Put.class);
verify(table, times(1)).put(capture.capture());
verifyPut(row, columnFamily, columnQualifier, content, capture.getValue());
}
Aggregations