Search in sources :

Example 21 with NamespaceId

use of co.cask.cdap.proto.id.NamespaceId in project cdap by caskdata.

the class ConcurrentStreamWriterTestBase method testConcurrentAppendFile.

@Test
public void testConcurrentAppendFile() throws Exception {
    final String streamName = "testConcurrentFile";
    NamespaceId namespace = new NamespaceId("namespace");
    StreamId streamId = namespace.stream(streamName);
    StreamAdmin streamAdmin = new TestStreamAdmin(getNamespacedLocationFactory(), Long.MAX_VALUE, 1000);
    int threads = Runtime.getRuntime().availableProcessors() * 4;
    StreamFileWriterFactory fileWriterFactory = createStreamFileWriterFactory();
    final ConcurrentStreamWriter streamWriter = createStreamWriter(streamId, streamAdmin, threads, fileWriterFactory);
    int msgCount = 10000;
    NamespacedLocationFactory locationFactory = getNamespacedLocationFactory();
    // Half of the threads will be calling appendFile, then other half append event one by one
    // Prepare the files first, each file has 10000 events.
    final List<FileInfo> fileInfos = Lists.newArrayList();
    for (int i = 0; i < threads / 2; i++) {
        fileInfos.add(generateFile(locationFactory, i, msgCount));
    }
    // Append file and write events
    final CountDownLatch startLatch = new CountDownLatch(1);
    final CountDownLatch completion = new CountDownLatch(threads);
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    for (int i = 0; i < threads / 2; i++) {
        executor.execute(createAppendFileTask(streamId, streamWriter, fileInfos.get(i), startLatch, completion));
    }
    for (int i = threads / 2; i < threads; i++) {
        executor.execute(createWriterTask(streamId, streamWriter, i, msgCount, 50, startLatch, completion));
    }
    startLatch.countDown();
    Assert.assertTrue(completion.await(4, TimeUnit.MINUTES));
    // Verify all events are written.
    // There should be only one partition
    Location partitionLocation = streamAdmin.getConfig(streamId).getLocation().list().get(0);
    List<Location> files = partitionLocation.list();
    List<StreamEvent> events = Lists.newArrayListWithCapacity(threads * msgCount);
    for (Location location : files) {
        // Only create reader for the event file
        if (StreamFileType.getType(location.getName()) != StreamFileType.EVENT) {
            continue;
        }
        StreamDataFileReader reader = StreamDataFileReader.create(Locations.newInputSupplier(location));
        reader.read(events, Integer.MAX_VALUE, 0, TimeUnit.SECONDS);
    }
    Assert.assertTrue(verifyEvents(threads, msgCount, events));
}
Also used : StreamId(co.cask.cdap.proto.id.StreamId) StreamEvent(co.cask.cdap.api.flow.flowlet.StreamEvent) NamespacedLocationFactory(co.cask.cdap.common.namespace.NamespacedLocationFactory) CountDownLatch(java.util.concurrent.CountDownLatch) StreamAdmin(co.cask.cdap.data2.transaction.stream.StreamAdmin) NoopStreamAdmin(co.cask.cdap.data.stream.NoopStreamAdmin) StreamFileWriterFactory(co.cask.cdap.data.stream.StreamFileWriterFactory) LocationStreamFileWriterFactory(co.cask.cdap.data.runtime.LocationStreamFileWriterFactory) ExecutorService(java.util.concurrent.ExecutorService) NamespaceId(co.cask.cdap.proto.id.NamespaceId) StreamDataFileReader(co.cask.cdap.data.stream.StreamDataFileReader) Location(org.apache.twill.filesystem.Location) Test(org.junit.Test)

Example 22 with NamespaceId

use of co.cask.cdap.proto.id.NamespaceId in project cdap by caskdata.

the class ConcurrentStreamWriterTestBase method generateFile.

private FileInfo generateFile(NamespacedLocationFactory locationFactory, int id, int events) throws IOException {
    NamespaceId dummyNs = new NamespaceId("dummy");
    Location eventLocation = locationFactory.get(dummyNs).append(UUID.randomUUID().toString());
    Location indexLocation = locationFactory.get(dummyNs).append(UUID.randomUUID().toString());
    StreamDataFileWriter writer = new StreamDataFileWriter(Locations.newOutputSupplier(eventLocation), Locations.newOutputSupplier(indexLocation), 1000L);
    for (int i = 0; i < events; i++) {
        writer.append(StreamFileTestUtils.createEvent(System.currentTimeMillis(), "Message " + i + " from " + id));
        if (i % 50 == 0) {
            writer.flush();
        }
    }
    writer.flush();
    return new FileInfo(eventLocation, indexLocation, writer, events);
}
Also used : NamespaceId(co.cask.cdap.proto.id.NamespaceId) StreamDataFileWriter(co.cask.cdap.data.stream.StreamDataFileWriter) Location(org.apache.twill.filesystem.Location)

Example 23 with NamespaceId

use of co.cask.cdap.proto.id.NamespaceId in project cdap by caskdata.

the class EntityIdKeyHelper method getTargetIdIdFromKey.

public static NamespacedEntityId getTargetIdIdFromKey(MDSKey.Splitter keySplitter, String type) {
    if (type.equals(TYPE_MAP.get(NamespaceId.class))) {
        String namespaceId = keySplitter.getString();
        return new NamespaceId(namespaceId);
    } else if (type.equals(TYPE_MAP.get(ProgramId.class))) {
        String namespaceId = keySplitter.getString();
        String appId = keySplitter.getString();
        String programType = keySplitter.getString();
        String programId = keySplitter.getString();
        return new ProgramId(namespaceId, appId, programType, programId);
    } else if (type.equals(TYPE_MAP.get(ApplicationId.class))) {
        String namespaceId = keySplitter.getString();
        String appId = keySplitter.getString();
        return new ApplicationId(namespaceId, appId);
    } else if (type.equals(TYPE_MAP.get(ArtifactId.class))) {
        String namespaceId = keySplitter.getString();
        String name = keySplitter.getString();
        String version = keySplitter.getString();
        return new ArtifactId(namespaceId, name, version);
    } else if (type.equals(TYPE_MAP.get(DatasetId.class))) {
        String namespaceId = keySplitter.getString();
        String instanceId = keySplitter.getString();
        return new DatasetId(namespaceId, instanceId);
    } else if (type.equals(TYPE_MAP.get(StreamId.class))) {
        String namespaceId = keySplitter.getString();
        String instanceId = keySplitter.getString();
        return new StreamId(namespaceId, instanceId);
    } else if (type.equals(TYPE_MAP.get(StreamViewId.class))) {
        String namespaceId = keySplitter.getString();
        String streamId = keySplitter.getString();
        String viewId = keySplitter.getString();
        return new StreamViewId(namespaceId, streamId, viewId);
    }
    throw new IllegalArgumentException("Illegal Type " + type + " of metadata source.");
}
Also used : StreamId(co.cask.cdap.proto.id.StreamId) ArtifactId(co.cask.cdap.proto.id.ArtifactId) NamespaceId(co.cask.cdap.proto.id.NamespaceId) ProgramId(co.cask.cdap.proto.id.ProgramId) ApplicationId(co.cask.cdap.proto.id.ApplicationId) DatasetId(co.cask.cdap.proto.id.DatasetId) StreamViewId(co.cask.cdap.proto.id.StreamViewId)

Example 24 with NamespaceId

use of co.cask.cdap.proto.id.NamespaceId in project cdap by caskdata.

the class FileStreamAdmin method create.

@Override
@Nullable
public StreamConfig create(final StreamId streamId, @Nullable final Properties props) throws Exception {
    // User should have write access to the namespace
    NamespaceId streamNamespace = streamId.getParent();
    ensureAccess(streamNamespace, Action.WRITE);
    final Properties properties = (props == null) ? new Properties() : props;
    String specifiedOwnerPrincipal = properties.containsKey(Constants.Security.PRINCIPAL) ? properties.getProperty(Constants.Security.PRINCIPAL) : null;
    if (exists(streamId)) {
        // if stream exists then make sure owner for this create request is same
        SecurityUtil.verifyOwnerPrincipal(streamId, specifiedOwnerPrincipal, ownerAdmin);
        // stream create is an idempotent operation as of now so just return null and don't do anything
        return null;
    }
    // revoke privileges to make sure there is no orphaned privileges as the stream doesn't exist its safe to do so
    try {
        privilegesManager.revoke(streamId);
    } catch (Exception e) {
        // if failed to revoke privileges for some reason there might be left over privilees which will get enforced now
        // just warn but do not fail the stream creation itself.
        LOG.warn("Failed to delete privileges for a new {} being created. If the authorization store had some " + "orphaned privileges for {} they will be enforced. ");
    }
    // if the stream didn't exist then add the owner information
    if (specifiedOwnerPrincipal != null) {
        ownerAdmin.add(streamId, new KerberosPrincipalId(specifiedOwnerPrincipal));
    }
    try {
        // Grant All access to the stream created to the User
        privilegesManager.grant(streamId, authenticationContext.getPrincipal(), EnumSet.allOf(Action.class));
        try {
            final Location streamLocation = impersonator.doAs(streamId, new Callable<Location>() {

                @Override
                public Location call() throws Exception {
                    assertNamespaceHomeExists(streamId.getParent());
                    Location streamLocation = getStreamLocation(streamId);
                    Locations.mkdirsIfNotExists(streamLocation);
                    return streamLocation;
                }
            });
            return createStream(streamId, properties, streamLocation);
        } catch (Exception e) {
            // clean up privileges
            privilegesManager.revoke(streamId);
            throw e;
        }
    } catch (Exception e) {
        // there was a problem creating the stream so delete owner information
        // safe to call even if entry doesn't exists
        ownerAdmin.delete(streamId);
        throw e;
    }
}
Also used : Action(co.cask.cdap.proto.security.Action) NamespaceId(co.cask.cdap.proto.id.NamespaceId) StreamProperties(co.cask.cdap.proto.StreamProperties) CoordinatorStreamProperties(co.cask.cdap.data.stream.CoordinatorStreamProperties) Properties(java.util.Properties) KerberosPrincipalId(co.cask.cdap.proto.id.KerberosPrincipalId) NotificationFeedException(co.cask.cdap.notifications.feeds.NotificationFeedException) FileNotFoundException(java.io.FileNotFoundException) UnauthorizedException(co.cask.cdap.security.spi.authorization.UnauthorizedException) IOException(java.io.IOException) NotFoundException(co.cask.cdap.common.NotFoundException) StreamNotFoundException(co.cask.cdap.common.StreamNotFoundException) Location(org.apache.twill.filesystem.Location) Nullable(javax.annotation.Nullable)

Example 25 with NamespaceId

use of co.cask.cdap.proto.id.NamespaceId in project cdap by caskdata.

the class HBaseStreamFileConsumerFactory method create.

@Override
protected StreamConsumer create(TableId tableId, StreamConfig streamConfig, ConsumerConfig consumerConfig, StreamConsumerStateStore stateStore, StreamConsumerState beginConsumerState, FileReader<StreamEventOffset, Iterable<StreamFileOffset>> reader, @Nullable ReadFilter extraFilter) throws IOException {
    int splits = cConf.getInt(Constants.Stream.CONSUMER_TABLE_PRESPLITS);
    AbstractRowKeyDistributor distributor = new RowKeyDistributorByHashPrefix(new RowKeyDistributorByHashPrefix.OneByteSimpleHash(splits));
    byte[][] splitKeys = HBaseTableUtil.getSplitKeys(splits, splits, distributor);
    TableId hBaseTableId = tableUtil.createHTableId(new NamespaceId(tableId.getNamespace()), tableId.getTableName());
    TableDescriptorBuilder tdBuilder = HBaseTableUtil.getTableDescriptorBuilder(hBaseTableId, cConf);
    ColumnFamilyDescriptorBuilder cfdBuilder = HBaseTableUtil.getColumnFamilyDescriptorBuilder(Bytes.toString(QueueEntryRow.COLUMN_FAMILY), hConf);
    tdBuilder.addColumnFamily(cfdBuilder.build());
    tdBuilder.addProperty(QueueConstants.DISTRIBUTOR_BUCKETS, Integer.toString(splits));
    try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get()) {
        ddlExecutor.createTableIfNotExists(tdBuilder.build(), splitKeys);
    }
    HTable hTable = tableUtil.createHTable(hConf, hBaseTableId);
    hTable.setWriteBufferSize(Constants.Stream.HBASE_WRITE_BUFFER_SIZE);
    hTable.setAutoFlushTo(false);
    return new HBaseStreamFileConsumer(cConf, streamConfig, consumerConfig, tableUtil, hTable, reader, stateStore, beginConsumerState, extraFilter, createKeyDistributor(hTable.getTableDescriptor()));
}
Also used : TableId(co.cask.cdap.data2.util.TableId) HBaseDDLExecutor(co.cask.cdap.spi.hbase.HBaseDDLExecutor) RowKeyDistributorByHashPrefix(co.cask.cdap.hbase.wd.RowKeyDistributorByHashPrefix) AbstractRowKeyDistributor(co.cask.cdap.hbase.wd.AbstractRowKeyDistributor) ColumnFamilyDescriptorBuilder(co.cask.cdap.data2.util.hbase.ColumnFamilyDescriptorBuilder) TableDescriptorBuilder(co.cask.cdap.data2.util.hbase.TableDescriptorBuilder) NamespaceId(co.cask.cdap.proto.id.NamespaceId) HTable(org.apache.hadoop.hbase.client.HTable)

Aggregations

NamespaceId (co.cask.cdap.proto.id.NamespaceId)234 Test (org.junit.Test)99 Path (javax.ws.rs.Path)47 NamespaceMeta (co.cask.cdap.proto.NamespaceMeta)43 ApplicationId (co.cask.cdap.proto.id.ApplicationId)35 IOException (java.io.IOException)34 StreamId (co.cask.cdap.proto.id.StreamId)30 DatasetId (co.cask.cdap.proto.id.DatasetId)27 TableId (co.cask.cdap.data2.util.TableId)26 Id (co.cask.cdap.proto.Id)24 ProgramId (co.cask.cdap.proto.id.ProgramId)24 NotFoundException (co.cask.cdap.common.NotFoundException)22 ArtifactId (co.cask.cdap.proto.id.ArtifactId)21 BadRequestException (co.cask.cdap.common.BadRequestException)20 TopicId (co.cask.cdap.proto.id.TopicId)19 GET (javax.ws.rs.GET)18 Location (org.apache.twill.filesystem.Location)18 ArrayList (java.util.ArrayList)15 TopicMetadata (co.cask.cdap.messaging.TopicMetadata)13 NamespaceNotFoundException (co.cask.cdap.common.NamespaceNotFoundException)12