use of org.opendaylight.yang.gen.v1.urn.opendaylight.neutron.provider.ext.rev150712.neutron.networks.network.Segments in project netvirt by opendaylight.
the class NeutronUtils method getSegmentationIdFromNeutronNetwork.
public static <T extends NetworkTypeBase> String getSegmentationIdFromNeutronNetwork(Network network, Class<T> networkType) {
String segmentationId = null;
NetworkProviderExtension providerExtension = network.getAugmentation(NetworkProviderExtension.class);
if (providerExtension != null) {
segmentationId = providerExtension.getSegmentationId();
if (segmentationId == null) {
List<Segments> providerSegments = providerExtension.getSegments();
if (providerSegments != null && providerSegments.size() > 0) {
for (Segments providerSegment : providerSegments) {
if (isNetworkSegmentType(providerSegment, networkType)) {
segmentationId = providerSegment.getSegmentationId();
break;
}
}
}
}
}
return segmentationId;
}
use of org.opendaylight.yang.gen.v1.urn.opendaylight.neutron.provider.ext.rev150712.neutron.networks.network.Segments in project netvirt by opendaylight.
the class NeutronUtils method getSegmentationIdFromNeutronNetworkSegment.
public static String getSegmentationIdFromNeutronNetworkSegment(Network network, Long index) {
String segmentationId = null;
NetworkProviderExtension providerExtension = network.getAugmentation(NetworkProviderExtension.class);
if (providerExtension != null) {
List<Segments> providerSegments = providerExtension.getSegments();
if (providerSegments != null && providerSegments.size() > 0) {
for (Segments providerSegment : providerSegments) {
if (Objects.equals(providerSegment.getSegmentationIndex(), index)) {
segmentationId = providerSegment.getSegmentationId();
break;
}
}
}
}
return segmentationId;
}
use of org.opendaylight.yang.gen.v1.urn.opendaylight.neutron.provider.ext.rev150712.neutron.networks.network.Segments in project openflowplugin by opendaylight.
the class StackedOutboundQueueNoBarrier method writeEntries.
@Override
int writeEntries(@Nonnull final Channel channel, final long now) {
// Local cache
StackedSegment segment = firstSegment;
int entries = 0;
while (channel.isWritable()) {
final OutboundQueueEntry entry = segment.getEntry(flushOffset);
if (!entry.isCommitted()) {
LOG.debug("Queue {} XID {} segment {} offset {} not committed yet", this, segment.getBaseXid() + flushOffset, segment, flushOffset);
break;
}
LOG.trace("Queue {} flushing entry at offset {}", this, flushOffset);
final OfHeader message = entry.takeMessage();
flushOffset++;
entries++;
if (message != null) {
manager.writeMessage(message, now);
} else {
entry.complete(null);
}
if (flushOffset >= StackedSegment.SEGMENT_SIZE) {
/*
* Slow path: purge the current segment unless it's the last one.
* If it is, we leave it for replacement when a new reservation
* is run on it.
* This costs us two slow paths, but hey, this should be very rare,
* so let's keep things simple.
*/
synchronized (unflushedSegments) {
LOG.debug("Flush offset {} unflushed segments {}", flushOffset, unflushedSegments.size());
// We may have raced ahead of reservation code and need to allocate a segment
ensureSegment(segment, flushOffset);
// Remove the segment, update the firstSegment and reset flushOffset
final StackedSegment oldSegment = unflushedSegments.remove(0);
oldSegment.completeAll();
uncompletedSegments.remove(oldSegment);
oldSegment.recycle();
// Reset the first segment and add it to the uncompleted list
segment = unflushedSegments.get(0);
uncompletedSegments.add(segment);
// Update the shutdown offset
if (shutdownOffset != null) {
shutdownOffset -= StackedSegment.SEGMENT_SIZE;
}
// Allow reservations back on the fast path by publishing the new first segment
firstSegment = segment;
flushOffset = 0;
LOG.debug("Queue {} flush moved to segment {}", this, segment);
}
}
}
return entries;
}
use of org.opendaylight.yang.gen.v1.urn.opendaylight.neutron.provider.ext.rev150712.neutron.networks.network.Segments in project openflowplugin by opendaylight.
the class AbstractStackedOutboundQueue method writeEntries.
/**
* Write some entries from the queue to the channel. Guaranteed to run
* in the corresponding EventLoop.
*
* @param channel Channel onto which we are writing
* @param now time stamp
* @return Number of entries written out
*/
int writeEntries(@Nonnull final Channel channel, final long now) {
// Local cache
StackedSegment segment = firstSegment;
int entries = 0;
while (channel.isWritable()) {
final OutboundQueueEntry entry = segment.getEntry(flushOffset);
if (!entry.isCommitted()) {
LOG.debug("Queue {} XID {} segment {} offset {} not committed yet", this, segment.getBaseXid() + flushOffset, segment, flushOffset);
break;
}
LOG.trace("Queue {} flushing entry at offset {}", this, flushOffset);
final OfHeader message = entry.takeMessage();
flushOffset++;
entries++;
if (message != null) {
manager.writeMessage(message, now);
} else {
entry.complete(null);
}
if (flushOffset >= StackedSegment.SEGMENT_SIZE) {
/*
* Slow path: purge the current segment unless it's the last one.
* If it is, we leave it for replacement when a new reservation
* is run on it.
*
* This costs us two slow paths, but hey, this should be very rare,
* so let's keep things simple.
*/
synchronized (unflushedSegments) {
LOG.debug("Flush offset {} unflushed segments {}", flushOffset, unflushedSegments.size());
// We may have raced ahead of reservation code and need to allocate a segment
ensureSegment(segment, flushOffset);
// Remove the segment, update the firstSegment and reset flushOffset
final StackedSegment oldSegment = unflushedSegments.remove(0);
if (oldSegment.isComplete()) {
uncompletedSegments.remove(oldSegment);
oldSegment.recycle();
}
// Reset the first segment and add it to the uncompleted list
segment = unflushedSegments.get(0);
uncompletedSegments.add(segment);
// Update the shutdown offset
if (shutdownOffset != null) {
shutdownOffset -= StackedSegment.SEGMENT_SIZE;
}
// Allow reservations back on the fast path by publishing the new first segment
firstSegment = segment;
flushOffset = 0;
LOG.debug("Queue {} flush moved to segment {}", this, segment);
}
}
}
return entries;
}
Aggregations