You've already forked linux-packaging-mono
Imported Upstream version 4.6.0.125
Former-commit-id: a2155e9bd80020e49e72e86c44da02a8ac0e57a4
This commit is contained in:
parent
a569aebcfd
commit
e79aa3c0ed
@@ -0,0 +1 @@
|
||||
f062ca5e6394c3e981aa5bf8417cf029954e40a0
|
||||
@@ -0,0 +1,343 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Collections.ObjectModel;
|
||||
using System.Reflection;
|
||||
|
||||
// Include Silverlight's managed resources
|
||||
#if SILVERLIGHT
|
||||
using System.Core;
|
||||
#endif //SILVERLIGHT
|
||||
|
||||
namespace System.Linq.Expressions {
|
||||
|
||||
//
|
||||
internal abstract class OldExpressionVisitor {
|
||||
internal OldExpressionVisitor() {
|
||||
}
|
||||
|
||||
internal virtual Expression Visit(Expression exp) {
|
||||
if (exp == null)
|
||||
return exp;
|
||||
switch (exp.NodeType) {
|
||||
case ExpressionType.UnaryPlus:
|
||||
case ExpressionType.Negate:
|
||||
case ExpressionType.NegateChecked:
|
||||
case ExpressionType.Not:
|
||||
case ExpressionType.Convert:
|
||||
case ExpressionType.ConvertChecked:
|
||||
case ExpressionType.ArrayLength:
|
||||
case ExpressionType.Quote:
|
||||
case ExpressionType.TypeAs:
|
||||
return this.VisitUnary((UnaryExpression)exp);
|
||||
case ExpressionType.Add:
|
||||
case ExpressionType.AddChecked:
|
||||
case ExpressionType.Subtract:
|
||||
case ExpressionType.SubtractChecked:
|
||||
case ExpressionType.Multiply:
|
||||
case ExpressionType.MultiplyChecked:
|
||||
case ExpressionType.Divide:
|
||||
case ExpressionType.Modulo:
|
||||
case ExpressionType.Power:
|
||||
case ExpressionType.And:
|
||||
case ExpressionType.AndAlso:
|
||||
case ExpressionType.Or:
|
||||
case ExpressionType.OrElse:
|
||||
case ExpressionType.LessThan:
|
||||
case ExpressionType.LessThanOrEqual:
|
||||
case ExpressionType.GreaterThan:
|
||||
case ExpressionType.GreaterThanOrEqual:
|
||||
case ExpressionType.Equal:
|
||||
case ExpressionType.NotEqual:
|
||||
case ExpressionType.Coalesce:
|
||||
case ExpressionType.ArrayIndex:
|
||||
case ExpressionType.RightShift:
|
||||
case ExpressionType.LeftShift:
|
||||
case ExpressionType.ExclusiveOr:
|
||||
return this.VisitBinary((BinaryExpression)exp);
|
||||
case ExpressionType.TypeIs:
|
||||
return this.VisitTypeIs((TypeBinaryExpression)exp);
|
||||
case ExpressionType.Conditional:
|
||||
return this.VisitConditional((ConditionalExpression)exp);
|
||||
case ExpressionType.Constant:
|
||||
return this.VisitConstant((ConstantExpression)exp);
|
||||
case ExpressionType.Parameter:
|
||||
return this.VisitParameter((ParameterExpression)exp);
|
||||
case ExpressionType.MemberAccess:
|
||||
return this.VisitMemberAccess((MemberExpression)exp);
|
||||
case ExpressionType.Call:
|
||||
return this.VisitMethodCall((MethodCallExpression)exp);
|
||||
case ExpressionType.Lambda:
|
||||
return this.VisitLambda((LambdaExpression)exp);
|
||||
case ExpressionType.New:
|
||||
return this.VisitNew((NewExpression)exp);
|
||||
case ExpressionType.NewArrayInit:
|
||||
case ExpressionType.NewArrayBounds:
|
||||
return this.VisitNewArray((NewArrayExpression)exp);
|
||||
case ExpressionType.Invoke:
|
||||
return this.VisitInvocation((InvocationExpression)exp);
|
||||
case ExpressionType.MemberInit:
|
||||
return this.VisitMemberInit((MemberInitExpression)exp);
|
||||
case ExpressionType.ListInit:
|
||||
return this.VisitListInit((ListInitExpression)exp);
|
||||
default:
|
||||
throw Error.UnhandledExpressionType(exp.NodeType);
|
||||
}
|
||||
}
|
||||
|
||||
internal virtual MemberBinding VisitBinding(MemberBinding binding) {
|
||||
switch (binding.BindingType) {
|
||||
case MemberBindingType.Assignment:
|
||||
return this.VisitMemberAssignment((MemberAssignment)binding);
|
||||
case MemberBindingType.MemberBinding:
|
||||
return this.VisitMemberMemberBinding((MemberMemberBinding)binding);
|
||||
case MemberBindingType.ListBinding:
|
||||
return this.VisitMemberListBinding((MemberListBinding)binding);
|
||||
default:
|
||||
throw Error.UnhandledBindingType(binding.BindingType);
|
||||
}
|
||||
}
|
||||
|
||||
internal virtual ElementInit VisitElementInitializer(ElementInit initializer) {
|
||||
ReadOnlyCollection<Expression> arguments = this.VisitExpressionList(initializer.Arguments);
|
||||
if (arguments != initializer.Arguments) {
|
||||
return Expression.ElementInit(initializer.AddMethod, arguments);
|
||||
}
|
||||
return initializer;
|
||||
}
|
||||
|
||||
internal virtual Expression VisitUnary(UnaryExpression u) {
|
||||
Expression operand = this.Visit(u.Operand);
|
||||
if (operand != u.Operand) {
|
||||
return Expression.MakeUnary(u.NodeType, operand, u.Type, u.Method);
|
||||
}
|
||||
return u;
|
||||
}
|
||||
|
||||
internal virtual Expression VisitBinary(BinaryExpression b) {
|
||||
Expression left = this.Visit(b.Left);
|
||||
Expression right = this.Visit(b.Right);
|
||||
Expression conversion = this.Visit(b.Conversion);
|
||||
if (left != b.Left || right != b.Right || conversion != b.Conversion) {
|
||||
if (b.NodeType == ExpressionType.Coalesce && b.Conversion != null)
|
||||
return Expression.Coalesce(left, right, conversion as LambdaExpression);
|
||||
else
|
||||
return Expression.MakeBinary(b.NodeType, left, right, b.IsLiftedToNull, b.Method);
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
internal virtual Expression VisitTypeIs(TypeBinaryExpression b) {
|
||||
Expression expr = this.Visit(b.Expression);
|
||||
if (expr != b.Expression) {
|
||||
return Expression.TypeIs(expr, b.TypeOperand);
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
internal virtual Expression VisitConstant(ConstantExpression c) {
|
||||
return c;
|
||||
}
|
||||
|
||||
internal virtual Expression VisitConditional(ConditionalExpression c) {
|
||||
Expression test = this.Visit(c.Test);
|
||||
Expression ifTrue = this.Visit(c.IfTrue);
|
||||
Expression ifFalse = this.Visit(c.IfFalse);
|
||||
if (test != c.Test || ifTrue != c.IfTrue || ifFalse != c.IfFalse) {
|
||||
return Expression.Condition(test, ifTrue, ifFalse);
|
||||
}
|
||||
return c;
|
||||
}
|
||||
|
||||
internal virtual Expression VisitParameter(ParameterExpression p) {
|
||||
return p;
|
||||
}
|
||||
|
||||
internal virtual Expression VisitMemberAccess(MemberExpression m) {
|
||||
Expression exp = this.Visit(m.Expression);
|
||||
if (exp != m.Expression) {
|
||||
return Expression.MakeMemberAccess(exp, m.Member);
|
||||
}
|
||||
return m;
|
||||
}
|
||||
|
||||
internal virtual Expression VisitMethodCall(MethodCallExpression m) {
|
||||
Expression obj = this.Visit(m.Object);
|
||||
IEnumerable<Expression> args = this.VisitExpressionList(m.Arguments);
|
||||
if (obj != m.Object || args != m.Arguments) {
|
||||
return Expression.Call(obj, m.Method, args);
|
||||
}
|
||||
return m;
|
||||
}
|
||||
|
||||
internal virtual ReadOnlyCollection<Expression> VisitExpressionList(ReadOnlyCollection<Expression> original) {
|
||||
List<Expression> list = null;
|
||||
for (int i = 0, n = original.Count; i < n; i++) {
|
||||
Expression p = this.Visit(original[i]);
|
||||
if (list != null) {
|
||||
list.Add(p);
|
||||
}
|
||||
else if (p != original[i]) {
|
||||
list = new List<Expression>(n);
|
||||
for (int j = 0; j < i; j++) {
|
||||
list.Add(original[j]);
|
||||
}
|
||||
list.Add(p);
|
||||
}
|
||||
}
|
||||
if (list != null)
|
||||
return list.ToReadOnlyCollection();
|
||||
return original;
|
||||
}
|
||||
|
||||
internal virtual MemberAssignment VisitMemberAssignment(MemberAssignment assignment) {
|
||||
Expression e = this.Visit(assignment.Expression);
|
||||
if (e != assignment.Expression) {
|
||||
return Expression.Bind(assignment.Member, e);
|
||||
}
|
||||
return assignment;
|
||||
}
|
||||
|
||||
internal virtual MemberMemberBinding VisitMemberMemberBinding(MemberMemberBinding binding) {
|
||||
IEnumerable<MemberBinding> bindings = this.VisitBindingList(binding.Bindings);
|
||||
if (bindings != binding.Bindings) {
|
||||
return Expression.MemberBind(binding.Member, bindings);
|
||||
}
|
||||
return binding;
|
||||
}
|
||||
|
||||
internal virtual MemberListBinding VisitMemberListBinding(MemberListBinding binding) {
|
||||
IEnumerable<ElementInit> initializers = this.VisitElementInitializerList(binding.Initializers);
|
||||
if (initializers != binding.Initializers) {
|
||||
return Expression.ListBind(binding.Member, initializers);
|
||||
}
|
||||
return binding;
|
||||
}
|
||||
|
||||
internal virtual IEnumerable<MemberBinding> VisitBindingList(ReadOnlyCollection<MemberBinding> original) {
|
||||
List<MemberBinding> list = null;
|
||||
for (int i = 0, n = original.Count; i < n; i++) {
|
||||
MemberBinding b = this.VisitBinding(original[i]);
|
||||
if (list != null) {
|
||||
list.Add(b);
|
||||
}
|
||||
else if (b != original[i]) {
|
||||
list = new List<MemberBinding>(n);
|
||||
for (int j = 0; j < i; j++) {
|
||||
list.Add(original[j]);
|
||||
}
|
||||
list.Add(b);
|
||||
}
|
||||
}
|
||||
if (list != null)
|
||||
return list;
|
||||
return original;
|
||||
}
|
||||
|
||||
internal virtual IEnumerable<ElementInit> VisitElementInitializerList(ReadOnlyCollection<ElementInit> original)
|
||||
{
|
||||
List<ElementInit> list = null;
|
||||
for (int i = 0, n = original.Count; i < n; i++)
|
||||
{
|
||||
ElementInit init = this.VisitElementInitializer(original[i]);
|
||||
if (list != null)
|
||||
{
|
||||
list.Add(init);
|
||||
}
|
||||
else if (init != original[i])
|
||||
{
|
||||
list = new List<ElementInit>(n);
|
||||
for (int j = 0; j < i; j++)
|
||||
{
|
||||
list.Add(original[j]);
|
||||
}
|
||||
list.Add(init);
|
||||
}
|
||||
}
|
||||
if (list != null)
|
||||
return list;
|
||||
return original;
|
||||
}
|
||||
|
||||
internal virtual Expression VisitLambda(LambdaExpression lambda)
|
||||
{
|
||||
Expression body = this.Visit(lambda.Body);
|
||||
if (body != lambda.Body) {
|
||||
return Expression.Lambda(lambda.Type, body, lambda.Parameters);
|
||||
}
|
||||
return lambda;
|
||||
}
|
||||
|
||||
internal virtual NewExpression VisitNew(NewExpression nex) {
|
||||
IEnumerable<Expression> args = this.VisitExpressionList(nex.Arguments);
|
||||
if (args != nex.Arguments) {
|
||||
if(nex.Members != null)
|
||||
return Expression.New(nex.Constructor, args, nex.Members);
|
||||
else
|
||||
return Expression.New(nex.Constructor, args);
|
||||
}
|
||||
return nex;
|
||||
}
|
||||
|
||||
internal virtual Expression VisitMemberInit(MemberInitExpression init) {
|
||||
NewExpression n = this.VisitNew(init.NewExpression);
|
||||
IEnumerable<MemberBinding> bindings = this.VisitBindingList(init.Bindings);
|
||||
if (n != init.NewExpression || bindings != init.Bindings) {
|
||||
return Expression.MemberInit(n, bindings);
|
||||
}
|
||||
return init;
|
||||
}
|
||||
|
||||
internal virtual Expression VisitListInit(ListInitExpression init) {
|
||||
NewExpression n = this.VisitNew(init.NewExpression);
|
||||
IEnumerable<ElementInit> initializers = this.VisitElementInitializerList(init.Initializers);
|
||||
if (n != init.NewExpression || initializers != init.Initializers) {
|
||||
return Expression.ListInit(n, initializers);
|
||||
}
|
||||
return init;
|
||||
}
|
||||
|
||||
internal virtual Expression VisitNewArray(NewArrayExpression na) {
|
||||
IEnumerable<Expression> exprs = this.VisitExpressionList(na.Expressions);
|
||||
if (exprs != na.Expressions) {
|
||||
if (na.NodeType == ExpressionType.NewArrayInit) {
|
||||
return Expression.NewArrayInit(na.Type.GetElementType(), exprs);
|
||||
}
|
||||
else {
|
||||
return Expression.NewArrayBounds(na.Type.GetElementType(), exprs);
|
||||
}
|
||||
}
|
||||
return na;
|
||||
}
|
||||
|
||||
internal virtual Expression VisitInvocation(InvocationExpression iv) {
|
||||
IEnumerable<Expression> args = this.VisitExpressionList(iv.Arguments);
|
||||
Expression expr = this.Visit(iv.Expression);
|
||||
if (args != iv.Arguments || expr != iv.Expression) {
|
||||
return Expression.Invoke(expr, args);
|
||||
}
|
||||
return iv;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
internal static class ReadOnlyCollectionExtensions {
|
||||
internal static ReadOnlyCollection<T> ToReadOnlyCollection<T>(this IEnumerable<T> sequence) {
|
||||
if (sequence == null)
|
||||
return DefaultReadOnlyCollection<T>.Empty;
|
||||
ReadOnlyCollection<T> col = sequence as ReadOnlyCollection<T>;
|
||||
if (col != null)
|
||||
return col;
|
||||
return new ReadOnlyCollection<T>(sequence.ToArray());
|
||||
}
|
||||
private static class DefaultReadOnlyCollection<T> {
|
||||
private static volatile ReadOnlyCollection<T> _defaultCollection;
|
||||
internal static ReadOnlyCollection<T> Empty {
|
||||
get {
|
||||
if (_defaultCollection == null)
|
||||
_defaultCollection = new ReadOnlyCollection<T>(new T[] { });
|
||||
return _defaultCollection;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
1680
mcs/class/referencesource/System.Core/System/Linq/IQueryable.cs
Normal file
1680
mcs/class/referencesource/System.Core/System/Linq/IQueryable.cs
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,139 @@
|
||||
// ==++==
|
||||
//
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
//
|
||||
// ==--==
|
||||
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
|
||||
//
|
||||
// SynchronousChannel.cs
|
||||
//
|
||||
// <OWNER>[....]</OWNER>
|
||||
//
|
||||
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
|
||||
using System.Collections.Generic;
|
||||
using System.Diagnostics.Contracts;
|
||||
|
||||
namespace System.Linq.Parallel
|
||||
{
|
||||
/// <summary>
|
||||
/// The simplest channel is one that has no synchronization. This is used for stop-
|
||||
/// and-go productions where we are guaranteed the consumer is not running
|
||||
/// concurrently. It just wraps a FIFO queue internally.
|
||||
///
|
||||
/// Assumptions:
|
||||
/// Producers and consumers never try to enqueue/dequeue concurrently.
|
||||
/// </summary>
|
||||
/// <typeparam name="T"></typeparam>
|
||||
internal sealed class SynchronousChannel<T>
|
||||
{
|
||||
// We currently use the BCL FIFO queue internally, although any would do.
|
||||
private Queue<T> m_queue;
|
||||
|
||||
#if DEBUG
|
||||
// In debug builds, we keep track of when the producer is done (for asserts).
|
||||
private bool m_done;
|
||||
#endif
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Instantiates a new queue.
|
||||
//
|
||||
|
||||
internal SynchronousChannel()
|
||||
{
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Initializes the queue for this channel.
|
||||
//
|
||||
|
||||
internal void Init()
|
||||
{
|
||||
m_queue = new Queue<T>();
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Enqueue a new item.
|
||||
//
|
||||
// Arguments:
|
||||
// item - the item to place into the queue
|
||||
// timeoutMilliseconds - synchronous channels never wait, so this is unused
|
||||
//
|
||||
// Assumptions:
|
||||
// The producer has not signaled that it's done yet.
|
||||
//
|
||||
// Return Value:
|
||||
// Synchronous channels always return true for this function. It can't timeout.
|
||||
//
|
||||
|
||||
internal void Enqueue(T item)
|
||||
{
|
||||
Contract.Assert(m_queue != null);
|
||||
#if DEBUG
|
||||
Contract.Assert(!m_done, "trying to enqueue into the queue after production is done");
|
||||
#endif
|
||||
|
||||
m_queue.Enqueue(item);
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Dequeue the next item in the queue.
|
||||
//
|
||||
// Return Value:
|
||||
// The item removed from the queue.
|
||||
//
|
||||
// Assumptions:
|
||||
// The producer must be done producing. This queue is meant for synchronous
|
||||
// production/consumption, therefore it's unsafe for the consumer to try and
|
||||
// dequeue an item while a producer might be enqueueing one.
|
||||
//
|
||||
|
||||
internal T Dequeue()
|
||||
{
|
||||
Contract.Assert(m_queue != null);
|
||||
#if DEBUG
|
||||
Contract.Assert(m_done, "trying to dequeue before production is done -- this is not safe");
|
||||
#endif
|
||||
return m_queue.Dequeue();
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Signals that a producer will no longer be enqueueing items.
|
||||
//
|
||||
|
||||
internal void SetDone()
|
||||
{
|
||||
#if DEBUG
|
||||
// We only track this in DEBUG builds to aid in debugging. This ensures we
|
||||
// can assert dequeue-before-done and enqueue-after-done invariants above.
|
||||
m_done = true;
|
||||
#endif
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Copies the internal contents of this channel to an array.
|
||||
//
|
||||
|
||||
internal void CopyTo(T[] array, int arrayIndex)
|
||||
{
|
||||
Contract.Assert(array != null);
|
||||
#if DEBUG
|
||||
Contract.Assert(m_done, "Can only copy from the channel after it's done being added to");
|
||||
#endif
|
||||
m_queue.CopyTo(array, arrayIndex);
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Retrieves the current count of items in the queue.
|
||||
//
|
||||
|
||||
internal int Count
|
||||
{
|
||||
get
|
||||
{
|
||||
Contract.Assert(m_queue != null);
|
||||
return m_queue.Count;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,128 @@
|
||||
// ==++==
|
||||
//
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
//
|
||||
// ==--==
|
||||
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
|
||||
//
|
||||
// AggregationMinMaxHelpers.cs
|
||||
//
|
||||
// <OWNER>[....]</OWNER>
|
||||
//
|
||||
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
|
||||
using System.Collections.Generic;
|
||||
using System.Linq.Parallel;
|
||||
using System.Diagnostics.Contracts;
|
||||
|
||||
namespace System.Linq
|
||||
{
|
||||
internal static class AggregationMinMaxHelpers<T>
|
||||
{
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Helper method to find the minimum or maximum element in the source.
|
||||
//
|
||||
|
||||
private static T Reduce(IEnumerable<T> source, int sign)
|
||||
{
|
||||
Contract.Assert(source != null);
|
||||
Contract.Assert(sign == -1 || sign == 1);
|
||||
|
||||
Func<Pair<bool, T>, T, Pair<bool, T>> intermediateReduce = MakeIntermediateReduceFunction(sign);
|
||||
Func<Pair<bool, T>, Pair<bool, T>, Pair<bool, T>> finalReduce = MakeFinalReduceFunction(sign);
|
||||
Func<Pair<bool, T>, T> resultSelector = MakeResultSelectorFunction();
|
||||
|
||||
AssociativeAggregationOperator<T, Pair<bool, T>, T> aggregation =
|
||||
new AssociativeAggregationOperator<T, Pair<bool, T>, T>(source, new Pair<bool, T>(false, default(T)), null,
|
||||
true, intermediateReduce, finalReduce, resultSelector, default(T) != null, QueryAggregationOptions.AssociativeCommutative);
|
||||
|
||||
return aggregation.Aggregate();
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Helper method to find the minimum element in the source.
|
||||
//
|
||||
|
||||
internal static T ReduceMin(IEnumerable<T> source)
|
||||
{
|
||||
return Reduce(source, -1);
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Helper method to find the maximum element in the source.
|
||||
//
|
||||
|
||||
internal static T ReduceMax(IEnumerable<T> source)
|
||||
{
|
||||
return Reduce(source, 1);
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// These methods are used to generate delegates to perform the comparisons.
|
||||
//
|
||||
|
||||
private static Func<Pair<bool, T>, T, Pair<bool, T>> MakeIntermediateReduceFunction(int sign)
|
||||
{
|
||||
Comparer<T> comparer = Util.GetDefaultComparer<T>();
|
||||
|
||||
// Note that we capture the 'sign' argument and 'comparer' local, and therefore the C#
|
||||
// compiler will transform this into an instance-based delegate, incurring an extra (hidden)
|
||||
// object allocation.
|
||||
return delegate(Pair<bool, T> accumulator, T element)
|
||||
{
|
||||
// If this is the first element, or the sign of the result of comparing the element with
|
||||
// the existing accumulated result is equal to the sign requested by the function factory,
|
||||
// we will return a new pair that contains the current element as the best item. We will
|
||||
// ignore null elements (for reference and nullable types) in the input stream.
|
||||
if ((default(T) != null || element != null) &&
|
||||
(!accumulator.First || Util.Sign(comparer.Compare(element, accumulator.Second)) == sign))
|
||||
{
|
||||
return new Pair<bool, T>(true, element);
|
||||
}
|
||||
|
||||
// Otherwise, just return the current accumulator result.
|
||||
return accumulator;
|
||||
};
|
||||
}
|
||||
|
||||
private static Func<Pair<bool, T>, Pair<bool, T>, Pair<bool, T>> MakeFinalReduceFunction(int sign)
|
||||
{
|
||||
Comparer<T> comparer = Util.GetDefaultComparer<T>();
|
||||
|
||||
// Note that we capture the 'sign' argument and 'comparer' local, and therefore the C#
|
||||
// compiler will transform this into an instance-based delegate, incurring an extra (hidden)
|
||||
// object allocation.
|
||||
return delegate(Pair<bool, T> accumulator, Pair<bool, T> element)
|
||||
{
|
||||
// If the intermediate reduction is empty, we will ignore it. Otherwise, if this is the
|
||||
// first element, or the sign of the result of comparing the element with the existing
|
||||
// accumulated result is equal to the sign requested by the function factory, we will
|
||||
// return a new pair that contains the current element as the best item.
|
||||
if (element.First &&
|
||||
(!accumulator.First || Util.Sign(comparer.Compare(element.Second, accumulator.Second)) == sign))
|
||||
{
|
||||
Contract.Assert(default(T) != null || element.Second != null, "nulls unexpected in final reduce");
|
||||
return new Pair<bool, T>(true, element.Second);
|
||||
}
|
||||
|
||||
// Otherwise, just return the current accumulator result.
|
||||
return accumulator;
|
||||
};
|
||||
}
|
||||
|
||||
private static Func<Pair<bool, T>, T> MakeResultSelectorFunction()
|
||||
{
|
||||
// If we saw at least one element in the source stream, the right pair element will contain
|
||||
// the element we're looking for -- so we return that. In the case of non-nullable value
|
||||
// types, the aggregation API will have thrown an exception before calling us for
|
||||
// empty sequences. Else, we will just return the element, which may be null for other types.
|
||||
return delegate(Pair<bool, T> accumulator)
|
||||
{
|
||||
Contract.Assert(accumulator.First || default(T) == null,
|
||||
"for non-null types we expect an exception to be thrown before getting here");
|
||||
return accumulator.Second;
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,75 @@
|
||||
// ==++==
|
||||
//
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
//
|
||||
// ==--==
|
||||
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
|
||||
//
|
||||
// EmptyEnumerable.cs
|
||||
//
|
||||
// <OWNER>[....]</OWNER>
|
||||
//
|
||||
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
|
||||
using System.Collections;
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace System.Linq.Parallel
|
||||
{
|
||||
/// <summary>
|
||||
/// We occ----ionally need a no-op enumerator to stand-in when we don't have data left
|
||||
/// within a partition's data stream. These are simple enumerable and enumerator
|
||||
/// implementations that always and consistently yield no elements.
|
||||
/// </summary>
|
||||
/// <typeparam name="T"></typeparam>
|
||||
internal class EmptyEnumerable<T> : ParallelQuery<T>
|
||||
{
|
||||
private EmptyEnumerable()
|
||||
: base(QuerySettings.Empty)
|
||||
{
|
||||
}
|
||||
|
||||
// A singleton cached and shared among callers.
|
||||
private static volatile EmptyEnumerable<T> s_instance;
|
||||
private static volatile EmptyEnumerator<T> s_enumeratorInstance;
|
||||
|
||||
internal static EmptyEnumerable<T> Instance
|
||||
{
|
||||
get
|
||||
{
|
||||
if (s_instance == null)
|
||||
{
|
||||
// There is no need for thread safety here.
|
||||
s_instance = new EmptyEnumerable<T>();
|
||||
}
|
||||
|
||||
return s_instance;
|
||||
}
|
||||
}
|
||||
|
||||
public override IEnumerator<T> GetEnumerator()
|
||||
{
|
||||
if (s_enumeratorInstance == null)
|
||||
{
|
||||
// There is no need for thread safety here.
|
||||
s_enumeratorInstance = new EmptyEnumerator<T>();
|
||||
}
|
||||
|
||||
return s_enumeratorInstance;
|
||||
}
|
||||
}
|
||||
|
||||
internal class EmptyEnumerator<T> : QueryOperatorEnumerator<T, int>, IEnumerator<T>
|
||||
{
|
||||
internal override bool MoveNext(ref T currentElement, ref int currentKey)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// IEnumerator<T> methods.
|
||||
public T Current { get { return default(T); } }
|
||||
object IEnumerator.Current { get { return null; } }
|
||||
public bool MoveNext() { return false; }
|
||||
void Collections.IEnumerator.Reset() { }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,106 @@
|
||||
// ==++==
|
||||
//
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
//
|
||||
// ==--==
|
||||
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
|
||||
//
|
||||
// EnumerableWrapperWeakToStrong.cs
|
||||
//
|
||||
// <OWNER>[....]</OWNER>
|
||||
//
|
||||
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
|
||||
using System.Collections;
|
||||
using System.Collections.Generic;
|
||||
using System.Diagnostics.Contracts;
|
||||
|
||||
namespace System.Linq.Parallel
|
||||
{
|
||||
/// <summary>
|
||||
/// A simple implementation of the IEnumerable{object} interface which wraps
|
||||
/// a weakly typed IEnumerable object, allowing it to be accessed as a strongly typed
|
||||
/// IEnumerable{object}.
|
||||
/// </summary>
|
||||
internal class EnumerableWrapperWeakToStrong : IEnumerable<object>
|
||||
{
|
||||
|
||||
private readonly IEnumerable m_wrappedEnumerable; // The wrapped enumerable object.
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Instantiates a new wrapper object.
|
||||
//
|
||||
|
||||
internal EnumerableWrapperWeakToStrong(IEnumerable wrappedEnumerable)
|
||||
{
|
||||
Contract.Assert(wrappedEnumerable != null);
|
||||
m_wrappedEnumerable = wrappedEnumerable;
|
||||
}
|
||||
|
||||
IEnumerator IEnumerable.GetEnumerator()
|
||||
{
|
||||
return ((IEnumerable<object>)this).GetEnumerator();
|
||||
}
|
||||
|
||||
public IEnumerator<object> GetEnumerator()
|
||||
{
|
||||
return new WrapperEnumeratorWeakToStrong(m_wrappedEnumerable.GetEnumerator());
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// A wrapper over IEnumerator that provides IEnumerator<object> interface
|
||||
//
|
||||
|
||||
class WrapperEnumeratorWeakToStrong : IEnumerator<object>
|
||||
{
|
||||
|
||||
private IEnumerator m_wrappedEnumerator; // The weakly typed enumerator we've wrapped.
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Wrap the specified enumerator in a new weak-to-strong converter.
|
||||
//
|
||||
|
||||
internal WrapperEnumeratorWeakToStrong(IEnumerator wrappedEnumerator)
|
||||
{
|
||||
Contract.Assert(wrappedEnumerator != null);
|
||||
m_wrappedEnumerator = wrappedEnumerator;
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// These are all really simple IEnumerator<object> implementations that simply
|
||||
// forward to the corresponding weakly typed IEnumerator methods.
|
||||
//
|
||||
|
||||
object IEnumerator.Current
|
||||
{
|
||||
get { return m_wrappedEnumerator.Current; }
|
||||
}
|
||||
|
||||
object IEnumerator<object>.Current
|
||||
{
|
||||
get { return m_wrappedEnumerator.Current; }
|
||||
}
|
||||
|
||||
void IDisposable.Dispose()
|
||||
{
|
||||
IDisposable disposable = m_wrappedEnumerator as IDisposable;
|
||||
if (disposable != null)
|
||||
{
|
||||
disposable.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
bool IEnumerator.MoveNext()
|
||||
{
|
||||
return m_wrappedEnumerator.MoveNext();
|
||||
}
|
||||
|
||||
void IEnumerator.Reset()
|
||||
{
|
||||
m_wrappedEnumerator.Reset();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,26 @@
|
||||
// ==++==
|
||||
//
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
//
|
||||
// ==--==
|
||||
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
|
||||
//
|
||||
// IParallelPartitionable.cs
|
||||
//
|
||||
// <OWNER>igoro</OWNER>
|
||||
//
|
||||
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
|
||||
namespace System.Linq.Parallel
|
||||
{
|
||||
/// <summary>
|
||||
///
|
||||
/// An interface that allows developers to specify their own partitioning routines.
|
||||
///
|
||||
/// </summary>
|
||||
/// <typeparam name="T"></typeparam>
|
||||
internal interface IParallelPartitionable<T>
|
||||
{
|
||||
QueryOperatorEnumerator<T, int>[] GetPartitions(int partitionCount);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,56 @@
|
||||
// ==++==
|
||||
//
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
//
|
||||
// ==--==
|
||||
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
|
||||
//
|
||||
// OrderedParallelQuery.cs
|
||||
//
|
||||
// <OWNER>[....]</OWNER>
|
||||
//
|
||||
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Text;
|
||||
using System.Linq.Parallel;
|
||||
using System.Diagnostics.Contracts;
|
||||
|
||||
namespace System.Linq
|
||||
{
|
||||
/// <summary>
|
||||
/// Represents a sorted, parallel sequence.
|
||||
/// </summary>
|
||||
public class OrderedParallelQuery<TSource> : ParallelQuery<TSource>
|
||||
{
|
||||
private QueryOperator<TSource> m_sortOp;
|
||||
|
||||
internal OrderedParallelQuery(QueryOperator<TSource> sortOp)
|
||||
:base(sortOp.SpecifiedQuerySettings)
|
||||
{
|
||||
m_sortOp = sortOp;
|
||||
Contract.Assert(sortOp is IOrderedEnumerable<TSource>);
|
||||
}
|
||||
|
||||
internal QueryOperator<TSource> SortOperator
|
||||
{
|
||||
get { return m_sortOp; }
|
||||
}
|
||||
|
||||
internal IOrderedEnumerable<TSource> OrderedEnumerable
|
||||
{
|
||||
get { return (IOrderedEnumerable<TSource>)m_sortOp; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns an enumerator that iterates through the sequence.
|
||||
/// </summary>
|
||||
/// <returns>An enumerator that iterates through the sequence.</returns>
|
||||
public override IEnumerator<TSource> GetEnumerator()
|
||||
{
|
||||
|
||||
return m_sortOp.GetEnumerator();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,99 @@
|
||||
// ==++==
|
||||
//
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
//
|
||||
// ==--==
|
||||
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
|
||||
//
|
||||
// ParallelEnumerableWrapper.cs
|
||||
//
|
||||
// <OWNER>[....]</OWNER>
|
||||
//
|
||||
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
|
||||
using System.Collections;
|
||||
using System.Collections.Generic;
|
||||
using System.Diagnostics.Contracts;
|
||||
|
||||
namespace System.Linq.Parallel
|
||||
{
|
||||
/// <summary>
|
||||
/// A simple implementation of the ParallelQuery{object} interface which wraps an
|
||||
/// underlying IEnumerable, such that it can be used in parallel queries.
|
||||
/// </summary>
|
||||
internal class ParallelEnumerableWrapper : ParallelQuery<object>
|
||||
{
|
||||
|
||||
private readonly IEnumerable m_source; // The wrapped enumerable object.
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Instantiates a new wrapper object.
|
||||
//
|
||||
|
||||
internal ParallelEnumerableWrapper(Collections.IEnumerable source)
|
||||
: base(QuerySettings.Empty)
|
||||
{
|
||||
Contract.Assert(source != null);
|
||||
m_source = source;
|
||||
}
|
||||
|
||||
internal override IEnumerator GetEnumeratorUntyped()
|
||||
{
|
||||
return m_source.GetEnumerator();
|
||||
}
|
||||
|
||||
public override IEnumerator<object> GetEnumerator()
|
||||
{
|
||||
return new EnumerableWrapperWeakToStrong(m_source).GetEnumerator();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// A simple implementation of the ParallelQuery{T} interface which wraps an
|
||||
/// underlying IEnumerable{T}, such that it can be used in parallel queries.
|
||||
/// </summary>
|
||||
/// <typeparam name="T"></typeparam>
|
||||
internal class ParallelEnumerableWrapper<T> : ParallelQuery<T>
|
||||
{
|
||||
|
||||
private readonly IEnumerable<T> m_wrappedEnumerable; // The wrapped enumerable object.
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Instantiates a new wrapper object.
|
||||
//
|
||||
// Arguments:
|
||||
// wrappedEnumerable - the underlying enumerable object being wrapped
|
||||
//
|
||||
// Notes:
|
||||
// The analysisOptions and degreeOfParallelism settings are optional. Passing null
|
||||
// indicates that the system defaults should be used instead.
|
||||
//
|
||||
|
||||
internal ParallelEnumerableWrapper(IEnumerable<T> wrappedEnumerable)
|
||||
: base(QuerySettings.Empty)
|
||||
{
|
||||
Contract.Assert(wrappedEnumerable != null, "wrappedEnumerable must not be null.");
|
||||
|
||||
m_wrappedEnumerable = wrappedEnumerable;
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Retrieves the wrapped enumerable object.
|
||||
//
|
||||
|
||||
internal IEnumerable<T> WrappedEnumerable
|
||||
{
|
||||
get { return m_wrappedEnumerable; }
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Implementations of GetEnumerator that just delegate to the wrapped enumerable.
|
||||
//
|
||||
|
||||
public override IEnumerator<T> GetEnumerator()
|
||||
{
|
||||
Contract.Assert(m_wrappedEnumerable != null);
|
||||
return m_wrappedEnumerable.GetEnumerator();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,131 @@
|
||||
// ==++==
|
||||
//
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
//
|
||||
// ==--==
|
||||
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
|
||||
//
|
||||
// ParallelQuery.cs
|
||||
//
|
||||
// <OWNER>[....]</OWNER>
|
||||
//
|
||||
// ParallelQuery is an abstract class that represents a PLINQ query.
|
||||
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
|
||||
using System.Collections;
|
||||
using System.Collections.Generic;
|
||||
using System.Linq.Parallel;
|
||||
using System.Diagnostics.Contracts;
|
||||
|
||||
namespace System.Linq
|
||||
{
|
||||
/// <summary>
|
||||
/// Represents a parallel sequence.
|
||||
/// </summary>
|
||||
public class ParallelQuery : IEnumerable
|
||||
{
|
||||
// Settings that have been specified on the query so far.
|
||||
private QuerySettings m_specifiedSettings;
|
||||
|
||||
internal ParallelQuery(QuerySettings specifiedSettings)
|
||||
{
|
||||
m_specifiedSettings = specifiedSettings;
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Settings that have been specified on the query so far. Some settings may still
|
||||
// be unspecified and will be replaced either by operators further in the query,
|
||||
// or filled in with defaults at query opening time.
|
||||
//
|
||||
|
||||
internal QuerySettings SpecifiedQuerySettings
|
||||
{
|
||||
get { return m_specifiedSettings; }
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Returns a parallel enumerable that represents 'this' enumerable, with each element
|
||||
// casted to TCastTo. If some element is not of type TCastTo, InvalidCastException
|
||||
// is thrown.
|
||||
//
|
||||
|
||||
internal virtual ParallelQuery<TCastTo> Cast<TCastTo>()
|
||||
{
|
||||
Contract.Assert(false, "The derived class must override this method.");
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Returns a parallel enumerable that represents 'this' enumerable, with each element
|
||||
// casted to TCastTo. Elements that are not of type TCastTo will be left out from
|
||||
// the results.
|
||||
//
|
||||
|
||||
internal virtual ParallelQuery<TCastTo> OfType<TCastTo>()
|
||||
{
|
||||
Contract.Assert(false, "The derived class must override this method.");
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Derived classes implement GetEnumeratorUntyped() instead of IEnumerable.GetEnumerator()
|
||||
// This is to avoid the method name conflict if the derived classes also implement
|
||||
// IEnumerable<T>.
|
||||
//
|
||||
|
||||
internal virtual IEnumerator GetEnumeratorUntyped()
|
||||
{
|
||||
Contract.Assert(false, "The derived class must override this method.");
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns an enumerator that iterates through the sequence.
|
||||
/// </summary>
|
||||
/// <returns>An enumerator that iterates through the sequence.</returns>
|
||||
IEnumerator IEnumerable.GetEnumerator()
|
||||
{
|
||||
return GetEnumeratorUntyped();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Represents a parallel sequence.
|
||||
/// </summary>
|
||||
public class ParallelQuery<TSource> : ParallelQuery, IEnumerable<TSource>
|
||||
{
|
||||
internal ParallelQuery(QuerySettings settings)
|
||||
: base(settings)
|
||||
{
|
||||
}
|
||||
|
||||
internal sealed override ParallelQuery<TCastTo> Cast<TCastTo>()
|
||||
{
|
||||
return ParallelEnumerable.Select<TSource, TCastTo>(this, elem => (TCastTo)(object)elem);
|
||||
}
|
||||
|
||||
internal sealed override ParallelQuery<TCastTo> OfType<TCastTo>()
|
||||
{
|
||||
// @PERF: Currently defined in terms of other operators. This isn't the most performant
|
||||
// solution (because it results in two operators) but is simple to implement.
|
||||
return this
|
||||
.Where<TSource>(elem => elem is TCastTo)
|
||||
.Select<TSource, TCastTo>(elem => (TCastTo)(object)elem);
|
||||
}
|
||||
|
||||
internal override IEnumerator GetEnumeratorUntyped()
|
||||
{
|
||||
return ((IEnumerable<TSource>)this).GetEnumerator();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns an enumerator that iterates through the sequence.
|
||||
/// </summary>
|
||||
/// <returns>An enumerator that iterates through the sequence.</returns>
|
||||
public virtual IEnumerator<TSource> GetEnumerator()
|
||||
{
|
||||
Contract.Assert(false, "The derived class must override this method.");
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
// ==++==
|
||||
//
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
//
|
||||
// ==--==
|
||||
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
|
||||
//
|
||||
// QueryAggregationOptions.cs
|
||||
//
|
||||
// <OWNER>igoro</OWNER>
|
||||
//
|
||||
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
|
||||
namespace System.Linq.Parallel
|
||||
{
|
||||
/// <summary>
|
||||
/// An enum to specify whether an aggregate operator is associative, commutative,
|
||||
/// neither, or both. This influences query analysis and execution: associative
|
||||
/// aggregations can run in parallel, whereas non-associative cannot; non-commutative
|
||||
/// aggregations must be run over data in input-order.
|
||||
/// </summary>
|
||||
[Flags]
|
||||
internal enum QueryAggregationOptions
|
||||
{
|
||||
None = 0,
|
||||
Associative = 1,
|
||||
Commutative = 2,
|
||||
AssociativeCommutative = (Associative | Commutative) // For convenience.
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,129 @@
|
||||
// ==++==
|
||||
//
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
//
|
||||
// ==--==
|
||||
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
|
||||
//
|
||||
// RangeEnumerable.cs
|
||||
//
|
||||
// <OWNER>[....]</OWNER>
|
||||
//
|
||||
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace System.Linq.Parallel
|
||||
{
|
||||
/// <summary>
|
||||
/// A simple enumerable type that implements the range algorithm. It also supports
|
||||
/// partitioning of the indices by implementing an interface that PLINQ recognizes.
|
||||
/// </summary>
|
||||
internal class RangeEnumerable : ParallelQuery<int>, IParallelPartitionable<int>
|
||||
{
|
||||
|
||||
private int m_from; // Lowest index to include.
|
||||
private int m_count; // Number of indices to include.
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Constructs a new range enumerable object for the specified range.
|
||||
//
|
||||
|
||||
internal RangeEnumerable(int from, int count)
|
||||
:base(QuerySettings.Empty)
|
||||
{
|
||||
// Transform the from and to indices into low and highs.
|
||||
m_from = from;
|
||||
m_count = count;
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Retrieves 'count' partitions, each of which uses a non-overlapping set of indices.
|
||||
//
|
||||
|
||||
public QueryOperatorEnumerator<int, int>[] GetPartitions(int partitionCount)
|
||||
{
|
||||
// Calculate a stride size, avoiding overflow if m_count is large
|
||||
int stride = m_count / partitionCount;
|
||||
int biggerPartitionCount = m_count % partitionCount;
|
||||
|
||||
// Create individual partitions, carefully avoiding overflow
|
||||
int doneCount = 0;
|
||||
QueryOperatorEnumerator<int, int>[] partitions = new QueryOperatorEnumerator<int, int>[partitionCount];
|
||||
for (int i = 0; i < partitionCount; i++)
|
||||
{
|
||||
int partitionSize = (i < biggerPartitionCount) ? stride + 1 : stride;
|
||||
partitions[i] = new RangeEnumerator(
|
||||
m_from + doneCount,
|
||||
partitionSize,
|
||||
doneCount);
|
||||
doneCount += partitionSize;
|
||||
}
|
||||
|
||||
return partitions;
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Basic IEnumerator<T> method implementations.
|
||||
//
|
||||
|
||||
public override IEnumerator<int> GetEnumerator()
|
||||
{
|
||||
return new RangeEnumerator(m_from, m_count, 0).AsClassicEnumerator();
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// The actual enumerator that walks over the specified range.
|
||||
//
|
||||
|
||||
class RangeEnumerator : QueryOperatorEnumerator<int, int>
|
||||
{
|
||||
|
||||
private readonly int m_from; // The initial value.
|
||||
private readonly int m_count; // How many values to yield.
|
||||
private readonly int m_initialIndex; // The ordinal index of the first value in the range.
|
||||
private Shared<int> m_currentCount; // The 0-based index of the current value. [allocate in moveNext to avoid false-sharing]
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Creates a new enumerator.
|
||||
//
|
||||
|
||||
internal RangeEnumerator(int from, int count, int initialIndex)
|
||||
{
|
||||
m_from = from;
|
||||
m_count = count;
|
||||
m_initialIndex = initialIndex;
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Basic enumeration method. This implements the logic to walk the desired
|
||||
// range, using the step specified at construction time.
|
||||
//
|
||||
|
||||
internal override bool MoveNext(ref int currentElement, ref int currentKey)
|
||||
{
|
||||
if( m_currentCount == null)
|
||||
m_currentCount = new Shared<int>(-1);
|
||||
|
||||
// Calculate the next index and ensure it falls within our range.
|
||||
int nextCount = m_currentCount.Value + 1;
|
||||
if (nextCount < m_count)
|
||||
{
|
||||
m_currentCount.Value = nextCount;
|
||||
currentElement = nextCount + m_from;
|
||||
currentKey = nextCount + m_initialIndex;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
internal override void Reset()
|
||||
{
|
||||
// We set the current value such that the next addition of step
|
||||
// results in the 1st real value in the range.
|
||||
m_currentCount = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,129 @@
|
||||
// ==++==
|
||||
//
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
//
|
||||
// ==--==
|
||||
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
|
||||
//
|
||||
// RepeatEnumerable.cs
|
||||
//
|
||||
// <OWNER>[....]</OWNER>
|
||||
//
|
||||
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
|
||||
using System.Collections.Generic;
|
||||
using System.Diagnostics.Contracts;
|
||||
|
||||
namespace System.Linq.Parallel
|
||||
{
|
||||
/// <summary>
|
||||
/// A simple enumerable type that implements the repeat algorithm. It also supports
|
||||
/// partitioning of the count space by implementing an interface that PLINQ recognizes.
|
||||
/// </summary>
|
||||
/// <typeparam name="TResult"></typeparam>
|
||||
internal class RepeatEnumerable<TResult> : ParallelQuery<TResult>, IParallelPartitionable<TResult>
|
||||
{
|
||||
|
||||
private TResult m_element; // Element value to repeat.
|
||||
private int m_count; // Count of element values.
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Constructs a new repeat enumerable object for the repeat operation.
|
||||
//
|
||||
|
||||
internal RepeatEnumerable(TResult element, int count)
|
||||
: base(QuerySettings.Empty)
|
||||
{
|
||||
Contract.Assert(count >= 0, "count not within range (must be >= 0)");
|
||||
m_element = element;
|
||||
m_count = count;
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Retrieves 'count' partitions, dividing the total count by the partition count,
|
||||
// and having each partition produce a certain number of repeated elements.
|
||||
//
|
||||
|
||||
public QueryOperatorEnumerator<TResult, int>[] GetPartitions(int partitionCount)
|
||||
{
|
||||
// Calculate a stride size.
|
||||
int stride = (m_count + partitionCount - 1) / partitionCount;
|
||||
|
||||
// Now generate the actual enumerators. Each produces 'stride' elements, except
|
||||
// for the last partition which may produce fewer (if 'm_count' isn't evenly
|
||||
// divisible by 'partitionCount').
|
||||
QueryOperatorEnumerator<TResult, int>[] partitions = new QueryOperatorEnumerator<TResult, int>[partitionCount];
|
||||
for (int i = 0, offset = 0; i < partitionCount; i++, offset += stride)
|
||||
{
|
||||
if ((offset + stride) > m_count)
|
||||
{
|
||||
partitions[i] = new RepeatEnumerator(m_element, offset < m_count ? m_count - offset : 0, offset);
|
||||
}
|
||||
else
|
||||
{
|
||||
partitions[i] = new RepeatEnumerator(m_element, stride, offset);
|
||||
}
|
||||
}
|
||||
|
||||
return partitions;
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Basic IEnumerator<T> method implementations.
|
||||
//
|
||||
|
||||
public override IEnumerator<TResult> GetEnumerator()
|
||||
{
|
||||
return new RepeatEnumerator(m_element, m_count, 0).AsClassicEnumerator();
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// The actual enumerator that produces a set of repeated elements.
|
||||
//
|
||||
|
||||
class RepeatEnumerator : QueryOperatorEnumerator<TResult, int>
|
||||
{
|
||||
|
||||
private readonly TResult m_element; // The element to repeat.
|
||||
private readonly int m_count; // The number of times to repeat it.
|
||||
private readonly int m_indexOffset; // Our index offset.
|
||||
private Shared<int> m_currentIndex; // The number of times we have already repeated it. [allocate in moveNext to avoid false-sharing]
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Creates a new enumerator.
|
||||
//
|
||||
|
||||
internal RepeatEnumerator(TResult element, int count, int indexOffset)
|
||||
{
|
||||
m_element = element;
|
||||
m_count = count;
|
||||
m_indexOffset = indexOffset;
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Basic IEnumerator<T> methods. These produce the repeating sequence..
|
||||
//
|
||||
|
||||
internal override bool MoveNext(ref TResult currentElement, ref int currentKey)
|
||||
{
|
||||
if( m_currentIndex == null)
|
||||
m_currentIndex = new Shared<int>(-1);
|
||||
|
||||
if (m_currentIndex.Value < (m_count - 1))
|
||||
{
|
||||
++m_currentIndex.Value;
|
||||
currentElement = m_element;
|
||||
currentKey = m_currentIndex.Value + m_indexOffset;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
internal override void Reset()
|
||||
{
|
||||
m_currentIndex = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,93 @@
|
||||
// ==++==
|
||||
//
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
//
|
||||
// ==--==
|
||||
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
|
||||
//
|
||||
// ArrayMergeHelper.cs
|
||||
//
|
||||
// <OWNER>[....]</OWNER>
|
||||
//
|
||||
//
|
||||
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using System.Linq.Parallel;
|
||||
using System.Diagnostics;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace System.Linq.Parallel
|
||||
{
|
||||
/// <summary>
|
||||
/// A special merge helper for indexible queries. Given an indexible query, we know how many elements
|
||||
/// we'll have in the result set, so we can allocate the array ahead of time. Then, as each result element
|
||||
/// is produced, we can directly insert it into the appropriate position in the output array, paying
|
||||
/// no extra cost for ordering.
|
||||
/// </summary>
|
||||
/// <typeparam name="TInputOutput"></typeparam>
|
||||
internal class ArrayMergeHelper<TInputOutput> : IMergeHelper<TInputOutput>
|
||||
{
|
||||
private QueryResults<TInputOutput> m_queryResults; // Indexible query results
|
||||
private TInputOutput[] m_outputArray; // The output array.
|
||||
private QuerySettings m_settings; // Settings for the query.
|
||||
|
||||
/// <summary>
|
||||
/// Instantiates the array merge helper.
|
||||
/// </summary>
|
||||
/// <param name="settings">The query settings</param>
|
||||
/// <param name="queryResults">The query results</param>
|
||||
public ArrayMergeHelper(QuerySettings settings, QueryResults<TInputOutput> queryResults)
|
||||
{
|
||||
m_settings = settings;
|
||||
m_queryResults = queryResults;
|
||||
|
||||
int count = m_queryResults.Count;
|
||||
m_outputArray = new TInputOutput[count];
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// A method used as a delegate passed into the ForAll operator
|
||||
/// </summary>
|
||||
private void ToArrayElement(int index)
|
||||
{
|
||||
m_outputArray[index] = m_queryResults[index];
|
||||
}
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Schedules execution of the merge itself.
|
||||
/// </summary>
|
||||
public void Execute()
|
||||
{
|
||||
ParallelQuery<int> query = ParallelEnumerable.Range(0, m_queryResults.Count);
|
||||
query = new QueryExecutionOption<int>(QueryOperator<int>.AsQueryOperator(query), m_settings);
|
||||
query.ForAll(ToArrayElement);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Gets the enumerator over the results.
|
||||
///
|
||||
/// We never expect this method to be called. ArrayMergeHelper is intended to be used when we want
|
||||
/// to consume the results using GetResultsAsArray().
|
||||
/// </summary>
|
||||
public IEnumerator<TInputOutput> GetEnumerator()
|
||||
{
|
||||
Debug.Assert(false, "ArrayMergeHelper<>.GetEnumerator() is not intended to be used. Call GetResultsAsArray() instead.");
|
||||
return ((IEnumerable<TInputOutput>)GetResultsAsArray()).GetEnumerator();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns the merged results as an array.
|
||||
/// </summary>
|
||||
/// <returns></returns>
|
||||
public TInputOutput[] GetResultsAsArray()
|
||||
{
|
||||
Debug.Assert(m_outputArray != null);
|
||||
return m_outputArray;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,296 @@
|
||||
// ==++==
|
||||
//
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
//
|
||||
// ==--==
|
||||
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
|
||||
//
|
||||
// AsynchronousChannelMergeEnumerator.cs
|
||||
//
|
||||
// <OWNER>[....]</OWNER>
|
||||
//
|
||||
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
|
||||
using System.Threading;
|
||||
using System.Diagnostics.Contracts;
|
||||
#if SILVERLIGHT
|
||||
using System.Core; // for System.Core.SR
|
||||
#endif
|
||||
|
||||
namespace System.Linq.Parallel
|
||||
{
|
||||
/// <summary>
|
||||
/// An enumerator that merges multiple one-to-one channels into a single output
|
||||
/// stream, including any necessary blocking and synchronization. This is an
|
||||
/// asynchronous enumerator, i.e. the producers may be inserting items into the
|
||||
/// channels concurrently with the consumer taking items out of them. Therefore,
|
||||
/// enumerating this object can cause the current thread to block.
|
||||
///
|
||||
/// We use a biased choice algorithm to choose from our consumer channels. I.e. we
|
||||
/// will prefer to process elements in a fair round-robin fashion, but will
|
||||
/// occ----ionally bypass this if a channel is empty.
|
||||
///
|
||||
/// </summary>
|
||||
/// <typeparam name="T"></typeparam>
|
||||
internal sealed class AsynchronousChannelMergeEnumerator<T> : MergeEnumerator<T>
|
||||
{
|
||||
private AsynchronousChannel<T>[] m_channels; // The channels being enumerated.
|
||||
private IntValueEvent m_consumerEvent; // The consumer event.
|
||||
private bool[] m_done; // Tracks which channels are done.
|
||||
private int m_channelIndex; // The next channel from which we'll dequeue.
|
||||
private T m_currentElement; // The remembered element from the previous MoveNext.
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Allocates a new enumerator over a set of one-to-one channels.
|
||||
//
|
||||
|
||||
internal AsynchronousChannelMergeEnumerator(
|
||||
QueryTaskGroupState taskGroupState, AsynchronousChannel<T>[] channels, IntValueEvent consumerEvent)
|
||||
: base(taskGroupState)
|
||||
{
|
||||
Contract.Assert(channels != null);
|
||||
#if DEBUG
|
||||
foreach (AsynchronousChannel<T> c in channels) Contract.Assert(c != null);
|
||||
#endif
|
||||
|
||||
m_channels = channels;
|
||||
m_channelIndex = -1; // To catch calls to Current before MoveNext.
|
||||
m_done = new bool[m_channels.Length]; // Initialized to { false }, i.e. no channels done.
|
||||
m_consumerEvent = consumerEvent;
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Retrieves the current element.
|
||||
//
|
||||
// Notes:
|
||||
// This throws if we haven't begun enumerating or have gone past the end of the
|
||||
// data source.
|
||||
//
|
||||
|
||||
public override T Current
|
||||
{
|
||||
get
|
||||
{
|
||||
if (m_channelIndex == -1 || m_channelIndex == m_channels.Length)
|
||||
{
|
||||
throw new InvalidOperationException(SR.GetString(SR.PLINQ_CommonEnumerator_Current_NotStarted));
|
||||
}
|
||||
|
||||
return m_currentElement;
|
||||
}
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Positions the enumerator over the next element. This includes merging as we
|
||||
// enumerate, which may also involve waiting for producers to generate new items.
|
||||
//
|
||||
// Return Value:
|
||||
// True if there's a current element, false if we've reached the end.
|
||||
//
|
||||
|
||||
public override bool MoveNext()
|
||||
{
|
||||
// On the first call to MoveNext, we advance the position to a real channel.
|
||||
int index = m_channelIndex;
|
||||
if (index == -1)
|
||||
{
|
||||
m_channelIndex = index = 0;
|
||||
}
|
||||
|
||||
// If we're past the end, enumeration is done.
|
||||
if (index == m_channels.Length)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// Else try the fast path.
|
||||
if (!m_done[index] && m_channels[index].TryDequeue(ref m_currentElement))
|
||||
{
|
||||
m_channelIndex = (index + 1) % m_channels.Length;
|
||||
return true;
|
||||
}
|
||||
|
||||
return MoveNextSlowPath();
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// The slow path used when a quick loop through the channels didn't come up
|
||||
// with anything. We may need to block and/or mark channels as done.
|
||||
//
|
||||
|
||||
private bool MoveNextSlowPath()
|
||||
{
|
||||
int doneChannels = 0;
|
||||
|
||||
// Remember the first channel we are looking at. If we pass through all of the
|
||||
// channels without finding an element, we will go to sleep.
|
||||
int firstChannelIndex = m_channelIndex;
|
||||
|
||||
int currChannelIndex;
|
||||
while ((currChannelIndex = m_channelIndex) != m_channels.Length)
|
||||
{
|
||||
AsynchronousChannel<T> current = m_channels[currChannelIndex];
|
||||
|
||||
bool isDone = m_done[currChannelIndex];
|
||||
if (!isDone && current.TryDequeue(ref m_currentElement))
|
||||
{
|
||||
// The channel has an item to be processed. We already remembered the current
|
||||
// element (Dequeue stores it as an out-parameter), so we just return true
|
||||
// after advancing to the next channel.
|
||||
m_channelIndex = (currChannelIndex + 1) % m_channels.Length;
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
// There isn't an element in the current channel. Check whether the channel
|
||||
// is done before possibly waiting for an element to arrive.
|
||||
if (!isDone && current.IsDone)
|
||||
{
|
||||
// We must check to ensure an item didn't get enqueued after originally
|
||||
// trying to dequeue above and reading the IsDone flag. If there are still
|
||||
// elements, the producer may have marked the channel as done but of course
|
||||
// we still need to continue processing them.
|
||||
if (!current.IsChunkBufferEmpty)
|
||||
{
|
||||
bool dequeueResult = current.TryDequeue(ref m_currentElement);
|
||||
Contract.Assert(dequeueResult, "channel isn't empty, yet the dequeue failed, hmm");
|
||||
return true;
|
||||
}
|
||||
|
||||
// Mark this channel as being truly done. We won't consider it any longer.
|
||||
m_done[currChannelIndex] = true;
|
||||
isDone = true;
|
||||
current.Dispose();
|
||||
}
|
||||
|
||||
if (isDone)
|
||||
{
|
||||
Contract.Assert(m_channels[currChannelIndex].IsDone, "thought this channel was done");
|
||||
Contract.Assert(m_channels[currChannelIndex].IsChunkBufferEmpty, "thought this channel was empty");
|
||||
|
||||
// Increment the count of done channels that we've seen. If this reaches the
|
||||
// total number of channels, we know we're finally done.
|
||||
if (++doneChannels == m_channels.Length)
|
||||
{
|
||||
// Remember that we are done by setting the index past the end.
|
||||
m_channelIndex = currChannelIndex = m_channels.Length;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Still no element. Advance to the next channel and continue searching.
|
||||
m_channelIndex = currChannelIndex = (currChannelIndex + 1) % m_channels.Length;
|
||||
|
||||
// If the channels aren't done, and we've inspected all of the queues and still
|
||||
// haven't found anything, we will go ahead and wait on all the queues.
|
||||
if (currChannelIndex == firstChannelIndex)
|
||||
{
|
||||
// On our first pass through the queues, we didn't have any side-effects
|
||||
// that would let a producer know we are waiting. Now we go through and
|
||||
// accumulate a set of events to wait on.
|
||||
try
|
||||
{
|
||||
// Reset our done channels counter; we need to tally them again during the
|
||||
// second pass through.
|
||||
doneChannels = 0;
|
||||
|
||||
for (int i = 0; i < m_channels.Length; i++)
|
||||
{
|
||||
bool channelIsDone = false;
|
||||
if (!m_done[i] && m_channels[i].TryDequeue(ref m_currentElement, ref channelIsDone))
|
||||
{
|
||||
// The channel has received an item since the last time we checked.
|
||||
// Just return and let the consumer process the element returned.
|
||||
return true;
|
||||
}
|
||||
else if (channelIsDone)
|
||||
{
|
||||
if (!m_done[i])
|
||||
{
|
||||
m_done[i] = true;
|
||||
}
|
||||
|
||||
if (++doneChannels == m_channels.Length)
|
||||
{
|
||||
// No need to wait. All channels are done. Remember this by setting
|
||||
// the index past the end of the channel list.
|
||||
m_channelIndex = currChannelIndex = m_channels.Length;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If all channels are done, we can break out of the loop entirely.
|
||||
if (currChannelIndex == m_channels.Length)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
//This Wait() does not require cancellation support as it will wake up when all the producers into the
|
||||
//channel have finished. Hence, if all the producers wake up on cancellation, so will this.
|
||||
m_consumerEvent.Wait();
|
||||
m_channelIndex = currChannelIndex = m_consumerEvent.Value;
|
||||
m_consumerEvent.Reset();
|
||||
|
||||
//
|
||||
// We have woken up, and the channel that caused this is contained in the
|
||||
// returned index. This could be due to one of two reasons. Either the channel's
|
||||
// producer has notified that it is done, in which case we just have to take it
|
||||
// out of our current wait-list and redo the wait, or a channel actually has an
|
||||
// item which we will go ahead and process.
|
||||
//
|
||||
// We just go back 'round the loop to accomplish this logic. Reset the channel
|
||||
// index and # of done channels. Go back to the beginning, starting with the channel
|
||||
// that caused us to wake up.
|
||||
//
|
||||
|
||||
firstChannelIndex = currChannelIndex;
|
||||
doneChannels = 0;
|
||||
}
|
||||
finally
|
||||
{
|
||||
// We have to guarantee that any waits we said we would perform are undone.
|
||||
for (int i = 0; i < m_channels.Length; i++)
|
||||
{
|
||||
// If we retrieved an event from a channel, we need to reset the wait.
|
||||
if (!m_done[i])
|
||||
{
|
||||
// We may be calling DoneWithDequeueWait() unnecessarily here, since some of these
|
||||
// are not necessarily set as waiting. Unnecessary calls to DoneWithDequeueWait()
|
||||
// must be accepted by the channel.
|
||||
m_channels[i].DoneWithDequeueWait();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TraceHelpers.TraceInfo("[timing]: {0}: Completed the merge", DateTime.Now.Ticks);
|
||||
|
||||
// If we got this far, it means we've exhausted our channels.
|
||||
Contract.Assert(currChannelIndex == m_channels.Length);
|
||||
|
||||
// If any tasks failed, propagate the failure now. We must do it here, because the merge
|
||||
// executor returns control back to the caller before the query has completed; contrast
|
||||
// this with synchronous enumeration where we can wait before returning.
|
||||
m_taskGroupState.QueryEnd(false);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
public override void Dispose()
|
||||
{
|
||||
if (m_consumerEvent != null)
|
||||
{
|
||||
// MergeEnumerator.Dispose() will wait until all producers complete.
|
||||
// So, we can be sure that no producer will attempt to signal the consumer event, and
|
||||
// we can dispose it.
|
||||
base.Dispose();
|
||||
|
||||
m_consumerEvent.Dispose();
|
||||
m_consumerEvent = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,179 @@
|
||||
// ==++==
|
||||
//
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
//
|
||||
// ==--==
|
||||
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
|
||||
//
|
||||
// DefaultMergeHelper.cs
|
||||
//
|
||||
// <OWNER>[....]</OWNER>
|
||||
//
|
||||
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
|
||||
using System.Collections.Generic;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using System.Diagnostics.Contracts;
|
||||
|
||||
namespace System.Linq.Parallel
|
||||
{
|
||||
/// <summary>
|
||||
/// The default merge helper uses a set of straightforward algorithms for output
|
||||
/// merging. Namely, for synchronous merges, the input data is yielded from the
|
||||
/// input data streams in "depth first" left-to-right order. For asynchronous merges,
|
||||
/// on the other hand, we use a biased choice algorithm to favor input channels in
|
||||
/// a "fair" way. No order preservation is carried out by this helper.
|
||||
/// </summary>
|
||||
/// <typeparam name="TInputOutput"></typeparam>
|
||||
/// <typeparam name="TIgnoreKey"></typeparam>
|
||||
internal class DefaultMergeHelper<TInputOutput, TIgnoreKey> : IMergeHelper<TInputOutput>
|
||||
{
|
||||
private QueryTaskGroupState m_taskGroupState; // State shared among tasks.
|
||||
private PartitionedStream<TInputOutput, TIgnoreKey> m_partitions; // Source partitions.
|
||||
private AsynchronousChannel<TInputOutput>[] m_asyncChannels; // Destination channels (async).
|
||||
private SynchronousChannel<TInputOutput>[] m_syncChannels; // Destination channels ([....]).
|
||||
private IEnumerator<TInputOutput> m_channelEnumerator; // Output enumerator.
|
||||
private TaskScheduler m_taskScheduler; // The task manager to execute the query.
|
||||
private bool m_ignoreOutput; // Whether we're enumerating "for effect".
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Instantiates a new merge helper.
|
||||
//
|
||||
// Arguments:
|
||||
// partitions - the source partitions from which to consume data.
|
||||
// ignoreOutput - whether we're enumerating "for effect" or for output.
|
||||
// pipeline - whether to use a pipelined merge.
|
||||
//
|
||||
|
||||
internal DefaultMergeHelper(PartitionedStream<TInputOutput, TIgnoreKey> partitions, bool ignoreOutput, ParallelMergeOptions options,
|
||||
TaskScheduler taskScheduler, CancellationState cancellationState, int queryId)
|
||||
{
|
||||
Contract.Assert(partitions != null);
|
||||
|
||||
m_taskGroupState = new QueryTaskGroupState(cancellationState, queryId);
|
||||
m_partitions = partitions;
|
||||
m_taskScheduler = taskScheduler;
|
||||
m_ignoreOutput = ignoreOutput;
|
||||
IntValueEvent consumerEvent = new IntValueEvent();
|
||||
|
||||
TraceHelpers.TraceInfo("DefaultMergeHelper::.ctor(..): creating a default merge helper");
|
||||
|
||||
// If output won't be ignored, we need to manufacture a set of channels for the consumer.
|
||||
// Otherwise, when the merge is executed, we'll just invoke the activities themselves.
|
||||
if (!ignoreOutput)
|
||||
{
|
||||
// Create the asynchronous or synchronous channels, based on whether we're pipelining.
|
||||
if (options != ParallelMergeOptions.FullyBuffered)
|
||||
{
|
||||
if (partitions.PartitionCount > 1)
|
||||
{
|
||||
m_asyncChannels =
|
||||
MergeExecutor<TInputOutput>.MakeAsynchronousChannels(partitions.PartitionCount, options, consumerEvent, cancellationState.MergedCancellationToken);
|
||||
m_channelEnumerator = new AsynchronousChannelMergeEnumerator<TInputOutput>(m_taskGroupState, m_asyncChannels, consumerEvent);
|
||||
}
|
||||
else
|
||||
{
|
||||
// If there is only one partition, we don't need to create channels. The only producer enumerator
|
||||
// will be used as the result enumerator.
|
||||
m_channelEnumerator = ExceptionAggregator.WrapQueryEnumerator(partitions[0], m_taskGroupState.CancellationState).GetEnumerator();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
m_syncChannels =
|
||||
MergeExecutor<TInputOutput>.MakeSynchronousChannels(partitions.PartitionCount);
|
||||
m_channelEnumerator = new SynchronousChannelMergeEnumerator<TInputOutput>(m_taskGroupState, m_syncChannels);
|
||||
}
|
||||
|
||||
Contract.Assert(m_asyncChannels == null || m_asyncChannels.Length == partitions.PartitionCount);
|
||||
Contract.Assert(m_syncChannels == null || m_syncChannels.Length == partitions.PartitionCount);
|
||||
Contract.Assert(m_channelEnumerator != null, "enumerator can't be null if we're not ignoring output");
|
||||
}
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Schedules execution of the merge itself.
|
||||
//
|
||||
// Arguments:
|
||||
// ordinalIndexState - the state of the ordinal index of the merged partitions
|
||||
//
|
||||
|
||||
void IMergeHelper<TInputOutput>.Execute()
|
||||
{
|
||||
if (m_asyncChannels != null)
|
||||
{
|
||||
SpoolingTask.SpoolPipeline<TInputOutput, TIgnoreKey>(m_taskGroupState, m_partitions, m_asyncChannels, m_taskScheduler);
|
||||
}
|
||||
else if (m_syncChannels != null)
|
||||
{
|
||||
SpoolingTask.SpoolStopAndGo<TInputOutput, TIgnoreKey>(m_taskGroupState, m_partitions, m_syncChannels, m_taskScheduler);
|
||||
}
|
||||
else if (m_ignoreOutput)
|
||||
{
|
||||
SpoolingTask.SpoolForAll<TInputOutput, TIgnoreKey>(m_taskGroupState, m_partitions, m_taskScheduler);
|
||||
}
|
||||
else
|
||||
{
|
||||
// The last case is a pipelining merge when DOP = 1. In this case, the consumer thread itself will compute the results,
|
||||
// so we don't need any tasks to compute the results asynchronously.
|
||||
Contract.Assert(m_partitions.PartitionCount == 1);
|
||||
}
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Gets the enumerator from which to enumerate output results.
|
||||
//
|
||||
|
||||
IEnumerator<TInputOutput> IMergeHelper<TInputOutput>.GetEnumerator()
|
||||
{
|
||||
Contract.Assert(m_ignoreOutput || m_channelEnumerator != null);
|
||||
return m_channelEnumerator;
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Returns the results as an array.
|
||||
//
|
||||
// @
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
public TInputOutput[] GetResultsAsArray()
|
||||
{
|
||||
if (m_syncChannels != null)
|
||||
{
|
||||
// Right size an array.
|
||||
int totalSize = 0;
|
||||
for (int i = 0; i < m_syncChannels.Length; i++)
|
||||
{
|
||||
totalSize += m_syncChannels[i].Count;
|
||||
}
|
||||
TInputOutput[] array = new TInputOutput[totalSize];
|
||||
|
||||
// And then blit the elements in.
|
||||
int current = 0;
|
||||
for (int i = 0; i < m_syncChannels.Length; i++)
|
||||
{
|
||||
m_syncChannels[i].CopyTo(array, current);
|
||||
current += m_syncChannels[i].Count;
|
||||
}
|
||||
return array;
|
||||
}
|
||||
else
|
||||
{
|
||||
List<TInputOutput> output = new List<TInputOutput>();
|
||||
using (IEnumerator<TInputOutput> enumerator = ((IMergeHelper<TInputOutput>)this).GetEnumerator())
|
||||
{
|
||||
while (enumerator.MoveNext())
|
||||
{
|
||||
output.Add(enumerator.Current);
|
||||
}
|
||||
}
|
||||
|
||||
return output.ToArray();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,35 @@
|
||||
// ==++==
|
||||
//
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
//
|
||||
// ==--==
|
||||
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
|
||||
//
|
||||
// ImergeHelper.cs
|
||||
//
|
||||
// <OWNER>igoro</OWNER>
|
||||
//
|
||||
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace System.Linq.Parallel
|
||||
{
|
||||
/// <summary>
|
||||
/// Used as a stand-in for replaceable merge algorithms. Alternative implementations
|
||||
/// are chosen based on the style of merge required.
|
||||
/// </summary>
|
||||
/// <typeparam name="TInputOutput"></typeparam>
|
||||
internal interface IMergeHelper<TInputOutput>
|
||||
{
|
||||
|
||||
// Begins execution of the merge.
|
||||
void Execute();
|
||||
|
||||
// Return an enumerator that yields the merged output.
|
||||
IEnumerator<TInputOutput> GetEnumerator();
|
||||
|
||||
// Returns the merged output as an array.
|
||||
TInputOutput[] GetResultsAsArray();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,77 @@
|
||||
// ==++==
|
||||
//
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
//
|
||||
// ==--==
|
||||
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
|
||||
//
|
||||
// MergeEnumerator.cs
|
||||
//
|
||||
// <OWNER>[....]</OWNER>
|
||||
//
|
||||
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
|
||||
using System.Collections;
|
||||
using System.Collections.Generic;
|
||||
using System.Diagnostics.Contracts;
|
||||
|
||||
namespace System.Linq.Parallel
|
||||
{
|
||||
/// <summary>
|
||||
/// Convenience class used by enumerators that merge many partitions into one.
|
||||
/// </summary>
|
||||
/// <typeparam name="TInputOutput"></typeparam>
|
||||
internal abstract class MergeEnumerator<TInputOutput> : IEnumerator<TInputOutput>
|
||||
{
|
||||
protected QueryTaskGroupState m_taskGroupState;
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Initializes a new enumerator with the specified group state.
|
||||
//
|
||||
|
||||
protected MergeEnumerator(QueryTaskGroupState taskGroupState)
|
||||
{
|
||||
Contract.Assert(taskGroupState != null);
|
||||
m_taskGroupState = taskGroupState;
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Abstract members of IEnumerator<T> that must be implemented by concrete subclasses.
|
||||
//
|
||||
|
||||
public abstract TInputOutput Current { get; }
|
||||
|
||||
public abstract bool MoveNext();
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Straightforward IEnumerator<T> methods. So subclasses needn't bother.
|
||||
//
|
||||
|
||||
object IEnumerator.Current
|
||||
{
|
||||
get { return ((IEnumerator<TInputOutput>)this).Current; }
|
||||
}
|
||||
|
||||
public virtual void Reset()
|
||||
{
|
||||
// (intentionally left blank)
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// If the enumerator is disposed of before the query finishes, we need to ensure
|
||||
// we properly tear down the query such that exceptions are not lost.
|
||||
//
|
||||
|
||||
public virtual void Dispose()
|
||||
{
|
||||
// If the enumerator is being disposed of before the query has finished,
|
||||
// we will wait for the query to finish. Cancellation should have already
|
||||
// been initiated, so we just need to ensure exceptions are propagated.
|
||||
if (!m_taskGroupState.IsAlreadyEnded)
|
||||
{
|
||||
Contract.Assert(m_taskGroupState.CancellationState.TopLevelDisposedFlag.Value);
|
||||
m_taskGroupState.QueryEnd(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user