Imported Upstream version 4.0.0~alpha1

Former-commit-id: 806294f5ded97629b74c85c09952f2a74fe182d9
This commit is contained in:
Jo Shields
2015-04-07 09:35:12 +01:00
parent 283343f570
commit 3c1f479b9d
22469 changed files with 2931443 additions and 869343 deletions

View File

@@ -0,0 +1,126 @@
// ==++==
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// ==--==
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
//
// CdsSyncEtwBCLProvider.cs
//
// <OWNER>[....]</OWNER>
//
// A helper class for firing ETW events related to the Coordination Data Structure
// [....] primitives. This provider is used by CDS [....] primitives in both mscorlib.dll
// and system.dll. The purpose of sharing the provider class is to be able to enable
// ETW tracing on all CDS [....] types with a single ETW provider GUID, and to minimize
// the number of providers in use.
//
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
using System;
using System.Collections.Generic;
using System.Text;
using System.Security;
namespace System.Threading
{
#if !FEATURE_PAL // PAL doesn't support eventing
using System.Diagnostics.Tracing;
/// <summary>Provides an event source for tracing CDS synchronization information.</summary>
[System.Runtime.CompilerServices.FriendAccessAllowed]
[EventSource(
Name = "System.Threading.SynchronizationEventSource",
Guid = "EC631D38-466B-4290-9306-834971BA0217",
LocalizationResources = "mscorlib")]
internal sealed class CdsSyncEtwBCLProvider : EventSource
{
/// <summary>
/// Defines the singleton instance for the CDS [....] ETW provider.
/// The CDS [....] Event provider GUID is {EC631D38-466B-4290-9306-834971BA0217}.
/// </summary>
public static CdsSyncEtwBCLProvider Log = new CdsSyncEtwBCLProvider();
/// <summary>Prevent external instantiation. All logging should go through the Log instance.</summary>
private CdsSyncEtwBCLProvider() { }
/// <summary>Enabled for all keywords.</summary>
private const EventKeywords ALL_KEYWORDS = (EventKeywords)(-1);
//-----------------------------------------------------------------------------------
//
// CDS Synchronization Event IDs (must be unique)
//
private const int SPINLOCK_FASTPATHFAILED_ID = 1;
private const int SPINWAIT_NEXTSPINWILLYIELD_ID = 2;
private const int BARRIER_PHASEFINISHED_ID = 3;
/////////////////////////////////////////////////////////////////////////////////////
//
// SpinLock Events
//
[Event(SPINLOCK_FASTPATHFAILED_ID, Level = EventLevel.Warning)]
public void SpinLock_FastPathFailed(int ownerID)
{
if (IsEnabled(EventLevel.Warning, ALL_KEYWORDS))
{
WriteEvent(SPINLOCK_FASTPATHFAILED_ID, ownerID);
}
}
/////////////////////////////////////////////////////////////////////////////////////
//
// SpinWait Events
//
[Event(SPINWAIT_NEXTSPINWILLYIELD_ID, Level = EventLevel.Informational)]
public void SpinWait_NextSpinWillYield()
{
if (IsEnabled(EventLevel.Informational, ALL_KEYWORDS))
{
WriteEvent(SPINWAIT_NEXTSPINWILLYIELD_ID);
}
}
//
// Events below this point are used by the CDS types in System.dll
//
/////////////////////////////////////////////////////////////////////////////////////
//
// Barrier Events
//
[SecuritySafeCritical]
[Event(BARRIER_PHASEFINISHED_ID, Level = EventLevel.Verbose, Version=1)]
public void Barrier_PhaseFinished(bool currentSense, long phaseNum)
{
if (IsEnabled(EventLevel.Verbose, ALL_KEYWORDS))
{
// WriteEvent(BARRIER_PHASEFINISHED_ID, currentSense, phaseNum);
// There is no explicit WriteEvent() overload matching this event's bool+long fields.
// Therefore calling WriteEvent() would hit the "params" overload, which leads to an
// object allocation every time this event is fired. To prevent that problem we will
// call WriteEventCore(), which works with a stack based EventData array populated with
// the event fields.
unsafe
{
EventData* eventPayload = stackalloc EventData[2];
Int32 senseAsInt32 = currentSense ? 1 : 0; // write out Boolean as Int32
eventPayload[0].Size = sizeof(int);
eventPayload[0].DataPointer = ((IntPtr)(&senseAsInt32));
eventPayload[1].Size = sizeof(long);
eventPayload[1].DataPointer = ((IntPtr)(&phaseNum));
WriteEventCore(BARRIER_PHASEFINISHED_ID, 2, eventPayload);
}
}
}
}
#endif // !FEATURE_PAL
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,163 @@
// ==++==
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// ==--==
//
// <OWNER>[....]</OWNER>
////////////////////////////////////////////////////////////////////////////////
using System.Diagnostics.Contracts;
using System.Security.Permissions;
using System.Runtime.CompilerServices;
namespace System.Threading
{
/// <summary>
/// Represents a callback delegate that has been registered with a <see cref="T:System.Threading.CancellationToken">CancellationToken</see>.
/// </summary>
/// <remarks>
/// To unregister a callback, dispose the corresponding Registration instance.
/// </remarks>
[HostProtection(Synchronization = true, ExternalThreading = true)]
public struct CancellationTokenRegistration : IEquatable<CancellationTokenRegistration>, IDisposable
{
private readonly CancellationCallbackInfo m_callbackInfo;
private readonly SparselyPopulatedArrayAddInfo<CancellationCallbackInfo> m_registrationInfo;
internal CancellationTokenRegistration(
CancellationCallbackInfo callbackInfo,
SparselyPopulatedArrayAddInfo<CancellationCallbackInfo> registrationInfo)
{
m_callbackInfo = callbackInfo;
m_registrationInfo = registrationInfo;
}
/// <summary>
/// Attempts to deregister the item. If it's already being run, this may fail.
/// Entails a full memory fence.
/// </summary>
/// <returns>True if the callback was found and deregistered, false otherwise.</returns>
[FriendAccessAllowed]
internal bool TryDeregister()
{
if (m_registrationInfo.Source == null) //can be null for dummy registrations.
return false;
// Try to remove the callback info from the array.
// It is possible the callback info is missing (removed for run, or removed by someone else)
// It is also possible there is info in the array but it doesn't match our current registration's callback info.
CancellationCallbackInfo prevailingCallbackInfoInSlot = m_registrationInfo.Source.SafeAtomicRemove(m_registrationInfo.Index, m_callbackInfo);
if (prevailingCallbackInfoInSlot != m_callbackInfo)
return false; //the callback in the slot wasn't us.
return true;
}
/// <summary>
/// Disposes of the registration and unregisters the target callback from the associated
/// <see cref="T:System.Threading.CancellationToken">CancellationToken</see>.
/// If the target callback is currently executing this method will wait until it completes, except
/// in the degenerate cases where a callback method deregisters itself.
/// </summary>
public void Dispose()
{
// Remove the entry from the array.
// This call includes a full memory fence which prevents potential reorderings of the reads below
bool deregisterOccured = TryDeregister();
// We guarantee that we will not return if the callback is being executed (assuming we are not currently called by the callback itself)
// We achieve this by the following rules:
// 1. if we are called in the context of an executing callback, no need to wait (determined by tracking callback-executor threadID)
// - if the currently executing callback is this CTR, then waiting would deadlock. (We choose to return rather than deadlock)
// - if not, then this CTR cannot be the one executing, hence no need to wait
//
// 2. if deregistration failed, and we are on a different thread, then the callback may be running under control of cts.Cancel()
// => poll until cts.ExecutingCallback is not the one we are trying to deregister.
var callbackInfo = m_callbackInfo;
if (callbackInfo != null)
{
var tokenSource = callbackInfo.CancellationTokenSource;
if (tokenSource.IsCancellationRequested && //running callbacks has commenced.
!tokenSource.IsCancellationCompleted && //running callbacks hasn't finished
!deregisterOccured && //deregistration failed (ie the callback is missing from the list)
tokenSource.ThreadIDExecutingCallbacks != Thread.CurrentThread.ManagedThreadId) //the executingThreadID is not this threadID.
{
// Callback execution is in progress, the executing thread is different to us and has taken the callback for execution
// so observe and wait until this target callback is no longer the executing callback.
tokenSource.WaitForCallbackToComplete(m_callbackInfo);
}
}
}
/// <summary>
/// Determines whether two <see
/// cref="T:System.Threading.CancellationTokenRegistration">CancellationTokenRegistration</see>
/// instances are equal.
/// </summary>
/// <param name="left">The first instance.</param>
/// <param name="right">The second instance.</param>
/// <returns>True if the instances are equal; otherwise, false.</returns>
public static bool operator ==(CancellationTokenRegistration left, CancellationTokenRegistration right)
{
return left.Equals(right);
}
/// <summary>
/// Determines whether two <see cref="T:System.Threading.CancellationTokenRegistration">CancellationTokenRegistration</see> instances are not equal.
/// </summary>
/// <param name="left">The first instance.</param>
/// <param name="right">The second instance.</param>
/// <returns>True if the instances are not equal; otherwise, false.</returns>
public static bool operator !=(CancellationTokenRegistration left, CancellationTokenRegistration right)
{
return !left.Equals(right);
}
/// <summary>
/// Determines whether the current <see cref="T:System.Threading.CancellationTokenRegistration">CancellationTokenRegistration</see> instance is equal to the
/// specified <see cref="T:System.Object"/>.
/// </summary>
/// <param name="obj">The other object to which to compare this instance.</param>
/// <returns>True, if both this and <paramref name="obj"/> are equal. False, otherwise.
/// Two <see cref="T:System.Threading.CancellationTokenRegistration">CancellationTokenRegistration</see> instances are equal if
/// they both refer to the output of a single call to the same Register method of a
/// <see cref="T:System.Threading.CancellationToken">CancellationToken</see>.
/// </returns>
public override bool Equals(object obj)
{
return ((obj is CancellationTokenRegistration) && Equals((CancellationTokenRegistration) obj));
}
/// <summary>
/// Determines whether the current <see cref="T:System.Threading.CancellationToken">CancellationToken</see> instance is equal to the
/// specified <see cref="T:System.Object"/>.
/// </summary>
/// <param name="other">The other <see cref="T:System.Threading.CancellationTokenRegistration">CancellationTokenRegistration</see> to which to compare this instance.</param>
/// <returns>True, if both this and <paramref name="other"/> are equal. False, otherwise.
/// Two <see cref="T:System.Threading.CancellationTokenRegistration">CancellationTokenRegistration</see> instances are equal if
/// they both refer to the output of a single call to the same Register method of a
/// <see cref="T:System.Threading.CancellationToken">CancellationToken</see>.
/// </returns>
public bool Equals(CancellationTokenRegistration other)
{
return m_callbackInfo == other.m_callbackInfo &&
m_registrationInfo.Source == other.m_registrationInfo.Source &&
m_registrationInfo.Index == other.m_registrationInfo.Index;
}
/// <summary>
/// Serves as a hash function for a <see cref="T:System.Threading.CancellationTokenRegistration">CancellationTokenRegistration.</see>.
/// </summary>
/// <returns>A hash code for the current <see cref="T:System.Threading.CancellationTokenRegistration">CancellationTokenRegistration</see> instance.</returns>
public override int GetHashCode()
{
if (m_registrationInfo.Source != null)
return m_registrationInfo.Source.GetHashCode() ^ m_registrationInfo.Index.GetHashCode();
return m_registrationInfo.Index.GetHashCode();
}
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,268 @@
// ==++==
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// ==--==
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
//
// LazyInitializer.cs
//
// <OWNER>[....]</OWNER>
//
// a set of lightweight static helpers for lazy initialization.
//
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
using System.Security.Permissions;
using System.Diagnostics.Contracts;
namespace System.Threading
{
/// <summary>
/// Specifies how a <see cref="T:System.Threading.Lazy{T}"/> instance should synchronize access among multiple threads.
/// </summary>
public enum LazyThreadSafetyMode
{
/// <summary>
/// This mode makes no guarantees around the thread-safety of the <see cref="T:System.Threading.Lazy{T}"/> instance. If used from multiple threads, the behavior of the <see cref="T:System.Threading.Lazy{T}"/> is undefined.
/// This mode should be used when a <see cref="T:System.Threading.Lazy{T}"/> is guaranteed to never be initialized from more than one thread simultaneously and high performance is crucial.
/// If valueFactory throws an exception when the <see cref="T:System.Threading.Lazy{T}"/> is initialized, the exception will be cached and returned on subsequent accesses to Value. Also, if valueFactory recursively
/// accesses Value on this <see cref="T:System.Threading.Lazy{T}"/> instance, a <see cref="T:System.InvalidOperationException"/> will be thrown.
/// </summary>
None,
/// <summary>
/// When multiple threads attempt to simultaneously initialize a <see cref="T:System.Threading.Lazy{T}"/> instance, this mode allows each thread to execute the
/// valueFactory but only the first thread to complete initialization will be allowed to set the final value of the <see cref="T:System.Threading.Lazy{T}"/>.
/// Once initialized successfully, any future calls to Value will return the cached result. If valueFactory throws an exception on any thread, that exception will be
/// propagated out of Value. If any thread executes valueFactory without throwing an exception and, therefore, successfully sets the value, that value will be returned on
/// subsequent accesses to Value from any thread. If no thread succeeds in setting the value, IsValueCreated will remain false and subsequent accesses to Value will result in
/// the valueFactory delegate re-executing. Also, if valueFactory recursively accesses Value on this <see cref="T:System.Threading.Lazy{T}"/> instance, an exception will NOT be thrown.
/// </summary>
PublicationOnly,
/// <summary>
/// This mode uses locks to ensure that only a single thread can initialize a <see cref="T:System.Threading.Lazy{T}"/> instance in a thread-safe manner. In general,
/// taken if this mode is used in conjunction with a <see cref="T:System.Threading.Lazy{T}"/> valueFactory delegate that uses locks internally, a deadlock can occur if not
/// handled carefully. If valueFactory throws an exception when the<see cref="T:System.Threading.Lazy{T}"/> is initialized, the exception will be cached and returned on
/// subsequent accesses to Value. Also, if valueFactory recursively accesses Value on this <see cref="T:System.Threading.Lazy{T}"/> instance, a <see cref="T:System.InvalidOperationException"/> will be thrown.
/// </summary>
ExecutionAndPublication
}
/// <summary>
/// Provides lazy initialization routines.
/// </summary>
/// <remarks>
/// These routines avoid needing to allocate a dedicated, lazy-initialization instance, instead using
/// references to ensure targets have been initialized as they are accessed.
/// </remarks>
[HostProtection(Synchronization = true, ExternalThreading = true)]
public static class LazyInitializer
{
/// <summary>
/// Initializes a target reference type with the type's default constructor if the target has not
/// already been initialized.
/// </summary>
/// <typeparam name="T">The refence type of the reference to be initialized.</typeparam>
/// <param name="target">A reference of type <typeparamref name="T"/> to initialize if it has not
/// already been initialized.</param>
/// <returns>The initialized reference of type <typeparamref name="T"/>.</returns>
/// <exception cref="T:System.MissingMemberException">Type <typeparamref name="T"/> does not have a default
/// constructor.</exception>
/// <exception cref="T:System.MemberAccessException">
/// Permissions to access the constructor of type <typeparamref name="T"/> were missing.
/// </exception>
/// <remarks>
/// <para>
/// This method may only be used on reference types. To ensure initialization of value
/// types, see other overloads of EnsureInitialized.
/// </para>
/// <para>
/// This method may be used concurrently by multiple threads to initialize <paramref name="target"/>.
/// In the event that multiple threads access this method concurrently, multiple instances of <typeparamref name="T"/>
/// may be created, but only one will be stored into <paramref name="target"/>. In such an occurrence, this method will not dispose of the
/// objects that were not stored. If such objects must be disposed, it is up to the caller to determine
/// if an object was not used and to then dispose of the object appropriately.
/// </para>
/// </remarks>
public static T EnsureInitialized<T>(ref T target) where T : class
{
// Fast path.
if (Volatile.Read<T>(ref target) != null)
{
return target;
}
return EnsureInitializedCore<T>(ref target, LazyHelpers<T>.s_activatorFactorySelector);
}
/// <summary>
/// Initializes a target reference type using the specified function if it has not already been
/// initialized.
/// </summary>
/// <typeparam name="T">The reference type of the reference to be initialized.</typeparam>
/// <param name="target">The reference of type <typeparamref name="T"/> to initialize if it has not
/// already been initialized.</param>
/// <param name="valueFactory">The <see cref="T:System.Func{T}"/> invoked to initialize the
/// reference.</param>
/// <returns>The initialized reference of type <typeparamref name="T"/>.</returns>
/// <exception cref="T:System.MissingMemberException">Type <typeparamref name="T"/> does not have a
/// default constructor.</exception>
/// <exception cref="T:System.InvalidOperationException"><paramref name="valueFactory"/> returned
/// null.</exception>
/// <remarks>
/// <para>
/// This method may only be used on reference types, and <paramref name="valueFactory"/> may
/// not return a null reference (Nothing in Visual Basic). To ensure initialization of value types or
/// to allow null reference types, see other overloads of EnsureInitialized.
/// </para>
/// <para>
/// This method may be used concurrently by multiple threads to initialize <paramref name="target"/>.
/// In the event that multiple threads access this method concurrently, multiple instances of <typeparamref name="T"/>
/// may be created, but only one will be stored into <paramref name="target"/>. In such an occurrence, this method will not dispose of the
/// objects that were not stored. If such objects must be disposed, it is up to the caller to determine
/// if an object was not used and to then dispose of the object appropriately.
/// </para>
/// </remarks>
public static T EnsureInitialized<T>(ref T target, Func<T> valueFactory) where T : class
{
// Fast path.
if (Volatile.Read<T>(ref target) != null)
{
return target;
}
return EnsureInitializedCore<T>(ref target, valueFactory);
}
/// <summary>
/// Initialize the target using the given delegate (slow path).
/// </summary>
/// <typeparam name="T">The reference type of the reference to be initialized.</typeparam>
/// <param name="target">The variable that need to be initialized</param>
/// <param name="valueFactory">The delegate that will be executed to initialize the target</param>
/// <returns>The initialized variable</returns>
private static T EnsureInitializedCore<T>(ref T target, Func<T> valueFactory) where T : class
{
T value = valueFactory();
if (value == null)
{
throw new InvalidOperationException(Environment.GetResourceString("Lazy_StaticInit_InvalidOperation"));
}
Interlocked.CompareExchange(ref target, value, null);
Contract.Assert(target != null);
return target;
}
/// <summary>
/// Initializes a target reference or value type with its default constructor if it has not already
/// been initialized.
/// </summary>
/// <typeparam name="T">The type of the reference to be initialized.</typeparam>
/// <param name="target">A reference or value of type <typeparamref name="T"/> to initialize if it
/// has not already been initialized.</param>
/// <param name="initialized">A reference to a boolean that determines whether the target has already
/// been initialized.</param>
/// <param name="syncLock">A reference to an object used as the mutually exclusive lock for initializing
/// <paramref name="target"/>. If <paramref name="syncLock"/> is null, a new object will be instantiated.</param>
/// <returns>The initialized value of type <typeparamref name="T"/>.</returns>
public static T EnsureInitialized<T>(ref T target, ref bool initialized, ref object syncLock)
{
// Fast path.
if (Volatile.Read(ref initialized))
{
return target;
}
return EnsureInitializedCore<T>(ref target, ref initialized, ref syncLock, LazyHelpers<T>.s_activatorFactorySelector);
}
/// <summary>
/// Initializes a target reference or value type with a specified function if it has not already been
/// initialized.
/// </summary>
/// <typeparam name="T">The type of the reference to be initialized.</typeparam>
/// <param name="target">A reference or value of type <typeparamref name="T"/> to initialize if it
/// has not already been initialized.</param>
/// <param name="initialized">A reference to a boolean that determines whether the target has already
/// been initialized.</param>
/// <param name="syncLock">A reference to an object used as the mutually exclusive lock for initializing
/// <paramref name="target"/>. If <paramref name="syncLock"/> is null, a new object will be instantiated.</param>
/// <param name="valueFactory">The <see cref="T:System.Func{T}"/> invoked to initialize the
/// reference or value.</param>
/// <returns>The initialized value of type <typeparamref name="T"/>.</returns>
public static T EnsureInitialized<T>(ref T target, ref bool initialized, ref object syncLock, Func<T> valueFactory)
{
// Fast path.
if (Volatile.Read(ref initialized))
{
return target;
}
return EnsureInitializedCore<T>(ref target, ref initialized, ref syncLock, valueFactory);
}
/// <summary>
/// Ensure the target is initialized and return the value (slow path). This overload permits nulls
/// and also works for value type targets. Uses the supplied function to create the value.
/// </summary>
/// <typeparam name="T">The type of target.</typeparam>
/// <param name="target">A reference to the target to be initialized.</param>
/// <param name="initialized">A reference to a location tracking whether the target has been initialized.</param>
/// <param name="syncLock">A reference to a location containing a mutual exclusive lock. If <paramref name="syncLock"/> is null,
/// a new object will be instantiated.</param>
/// <param name="valueFactory">
/// The <see cref="T:System.Func{T}"/> to invoke in order to produce the lazily-initialized value.
/// </param>
/// <returns>The initialized object.</returns>
private static T EnsureInitializedCore<T>(ref T target, ref bool initialized, ref object syncLock, Func<T> valueFactory)
{
// Lazily initialize the lock if necessary.
object slock = syncLock;
if (slock == null)
{
object newLock = new object();
slock = Interlocked.CompareExchange(ref syncLock, newLock, null);
if (slock == null)
{
slock = newLock;
}
}
// Now double check that initialization is still required.
lock (slock)
{
if (!Volatile.Read(ref initialized))
{
target = valueFactory();
Volatile.Write(ref initialized, true);
}
}
return target;
}
}
// Caches the activation selector function to avoid delegate allocations.
static class LazyHelpers<T>
{
internal static Func<T> s_activatorFactorySelector = new Func<T>(ActivatorFactorySelector);
private static T ActivatorFactorySelector()
{
try
{
return (T)Activator.CreateInstance(typeof(T));
}
catch (MissingMethodException)
{
throw new MissingMemberException(Environment.GetResourceString("Lazy_CreateValue_NoParameterlessCtorForT"));
}
}
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,376 @@
// ==++==
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// ==--==
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
//
// SpinWait.cs
//
// <OWNER>[....]</OWNER>
//
// Central spin logic used across the entire code-base.
//
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
using System;
using System.Runtime.ConstrainedExecution;
using System.Security.Permissions;
using System.Threading;
using System.Diagnostics.Contracts;
using System.Diagnostics.CodeAnalysis;
namespace System.Threading
{
// SpinWait is just a little value type that encapsulates some common spinning
// logic. It ensures we always yield on single-proc machines (instead of using busy
// waits), and that we work well on HT. It encapsulates a good mixture of spinning
// and real yielding. It's a value type so that various areas of the engine can use
// one by allocating it on the stack w/out unnecessary GC allocation overhead, e.g.:
//
// void f() {
// SpinWait wait = new SpinWait();
// while (!p) { wait.SpinOnce(); }
// ...
// }
//
// Internally it just maintains a counter that is used to decide when to yield, etc.
//
// A common usage is to spin before blocking. In those cases, the NextSpinWillYield
// property allows a user to decide to fall back to waiting once it returns true:
//
// void f() {
// SpinWait wait = new SpinWait();
// while (!p) {
// if (wait.NextSpinWillYield) { /* block! */ }
// else { wait.SpinOnce(); }
// }
// ...
// }
/// <summary>
/// Provides support for spin-based waiting.
/// </summary>
/// <remarks>
/// <para>
/// <see cref="SpinWait"/> encapsulates common spinning logic. On single-processor machines, yields are
/// always used instead of busy waits, and on computers with Intel<65> processors employing Hyper-Threading<6E>
/// technology, it helps to prevent hardware thread starvation. SpinWait encapsulates a good mixture of
/// spinning and true yielding.
/// </para>
/// <para>
/// <see cref="SpinWait"/> is a value type, which means that low-level code can utilize SpinWait without
/// fear of unnecessary allocation overheads. SpinWait is not generally useful for ordinary applications.
/// In most cases, you should use the synchronization classes provided by the .NET Framework, such as
/// <see cref="System.Threading.Monitor"/>. For most purposes where spin waiting is required, however,
/// the <see cref="SpinWait"/> type should be preferred over the <see
/// cref="System.Threading.Thread.SpinWait"/> method.
/// </para>
/// <para>
/// While SpinWait is designed to be used in concurrent applications, it is not designed to be
/// used from multiple threads concurrently. SpinWait's members are not thread-safe. If multiple
/// threads must spin, each should use its own instance of SpinWait.
/// </para>
/// </remarks>
[HostProtection(Synchronization = true, ExternalThreading = true)]
public struct SpinWait
{
// These constants determine the frequency of yields versus spinning. The
// numbers may seem fairly arbitrary, but were derived with at least some
// thought in the design document. I fully expect they will need to change
// over time as we gain more experience with performance.
internal const int YIELD_THRESHOLD = 10; // When to switch over to a true yield.
internal const int SLEEP_0_EVERY_HOW_MANY_TIMES = 5; // After how many yields should we Sleep(0)?
internal const int SLEEP_1_EVERY_HOW_MANY_TIMES = 20; // After how many yields should we Sleep(1)?
// The number of times we've spun already.
private int m_count;
/// <summary>
/// Gets the number of times <see cref="SpinOnce"/> has been called on this instance.
/// </summary>
public int Count
{
get { return m_count; }
}
/// <summary>
/// Gets whether the next call to <see cref="SpinOnce"/> will yield the processor, triggering a
/// forced context switch.
/// </summary>
/// <value>Whether the next call to <see cref="SpinOnce"/> will yield the processor, triggering a
/// forced context switch.</value>
/// <remarks>
/// On a single-CPU machine, <see cref="SpinOnce"/> always yields the processor. On machines with
/// multiple CPUs, <see cref="SpinOnce"/> may yield after an unspecified number of calls.
/// </remarks>
public bool NextSpinWillYield
{
get { return m_count > YIELD_THRESHOLD || PlatformHelper.IsSingleProcessor; }
}
/// <summary>
/// Performs a single spin.
/// </summary>
/// <remarks>
/// This is typically called in a loop, and may change in behavior based on the number of times a
/// <see cref="SpinOnce"/> has been called thus far on this instance.
/// </remarks>
public void SpinOnce()
{
if (NextSpinWillYield)
{
//
// We must yield.
//
// We prefer to call Thread.Yield first, triggering a SwitchToThread. This
// unfortunately doesn't consider all runnable threads on all OS SKUs. In
// some cases, it may only consult the runnable threads whose ideal processor
// is the one currently executing code. Thus we oc----ionally issue a call to
// Sleep(0), which considers all runnable threads at equal priority. Even this
// is insufficient since we may be spin waiting for lower priority threads to
// execute; we therefore must call Sleep(1) once in a while too, which considers
// all runnable threads, regardless of ideal processor and priority, but may
// remove the thread from the scheduler's queue for 10+ms, if the system is
// configured to use the (default) coarse-grained system timer.
//
#if !FEATURE_PAL && !FEATURE_CORECLR // PAL doesn't support eventing, and we don't compile CDS providers for Coreclr
CdsSyncEtwBCLProvider.Log.SpinWait_NextSpinWillYield();
#endif
int yieldsSoFar = (m_count >= YIELD_THRESHOLD ? m_count - YIELD_THRESHOLD : m_count);
if ((yieldsSoFar % SLEEP_1_EVERY_HOW_MANY_TIMES) == (SLEEP_1_EVERY_HOW_MANY_TIMES - 1))
{
Thread.Sleep(1);
}
else if ((yieldsSoFar % SLEEP_0_EVERY_HOW_MANY_TIMES) == (SLEEP_0_EVERY_HOW_MANY_TIMES - 1))
{
Thread.Sleep(0);
}
else
{
#if PFX_LEGACY_3_5
Platform.Yield();
#else
Thread.Yield();
#endif
}
}
else
{
//
// Otherwise, we will spin.
//
// We do this using the CLR's SpinWait API, which is just a busy loop that
// issues YIELD/PAUSE instructions to ensure multi-threaded CPUs can react
// intelligently to avoid starving. (These are NOOPs on other CPUs.) We
// choose a number for the loop iteration count such that each successive
// call spins for longer, to reduce cache contention. We cap the total
// number of spins we are willing to tolerate to reduce delay to the caller,
// since we expect most callers will eventually block anyway.
//
Thread.SpinWait(4 << m_count);
}
// Finally, increment our spin counter.
m_count = (m_count == int.MaxValue ? YIELD_THRESHOLD : m_count + 1);
}
/// <summary>
/// Resets the spin counter.
/// </summary>
/// <remarks>
/// This makes <see cref="SpinOnce"/> and <see cref="NextSpinWillYield"/> behave as though no calls
/// to <see cref="SpinOnce"/> had been issued on this instance. If a <see cref="SpinWait"/> instance
/// is reused many times, it may be useful to reset it to avoid yielding too soon.
/// </remarks>
public void Reset()
{
m_count = 0;
}
#region Static Methods
/// <summary>
/// Spins until the specified condition is satisfied.
/// </summary>
/// <param name="condition">A delegate to be executed over and over until it returns true.</param>
/// <exception cref="ArgumentNullException">The <paramref name="condition"/> argument is null.</exception>
public static void SpinUntil(Func<bool> condition)
{
#if DEBUG
bool result =
#endif
SpinUntil(condition, Timeout.Infinite);
#if DEBUG
Contract.Assert(result);
#endif
}
/// <summary>
/// Spins until the specified condition is satisfied or until the specified timeout is expired.
/// </summary>
/// <param name="condition">A delegate to be executed over and over until it returns true.</param>
/// <param name="timeout">
/// A <see cref="TimeSpan"/> that represents the number of milliseconds to wait,
/// or a TimeSpan that represents -1 milliseconds to wait indefinitely.</param>
/// <returns>True if the condition is satisfied within the timeout; otherwise, false</returns>
/// <exception cref="ArgumentNullException">The <paramref name="condition"/> argument is null.</exception>
/// <exception cref="T:System.ArgumentOutOfRangeException"><paramref name="timeout"/> is a negative number
/// other than -1 milliseconds, which represents an infinite time-out -or- timeout is greater than
/// <see cref="System.Int32.MaxValue"/>.</exception>
public static bool SpinUntil(Func<bool> condition, TimeSpan timeout)
{
// Validate the timeout
Int64 totalMilliseconds = (Int64)timeout.TotalMilliseconds;
if (totalMilliseconds < -1 || totalMilliseconds > Int32.MaxValue)
{
throw new System.ArgumentOutOfRangeException(
"timeout", timeout, Environment.GetResourceString("SpinWait_SpinUntil_TimeoutWrong"));
}
// Call wait with the timeout milliseconds
return SpinUntil(condition, (int)timeout.TotalMilliseconds);
}
/// <summary>
/// Spins until the specified condition is satisfied or until the specified timeout is expired.
/// </summary>
/// <param name="condition">A delegate to be executed over and over until it returns true.</param>
/// <param name="millisecondsTimeout">The number of milliseconds to wait, or <see
/// cref="System.Threading.Timeout.Infinite"/> (-1) to wait indefinitely.</param>
/// <returns>True if the condition is satisfied within the timeout; otherwise, false</returns>
/// <exception cref="ArgumentNullException">The <paramref name="condition"/> argument is null.</exception>
/// <exception cref="T:System.ArgumentOutOfRangeException"><paramref name="millisecondsTimeout"/> is a
/// negative number other than -1, which represents an infinite time-out.</exception>
public static bool SpinUntil(Func<bool> condition, int millisecondsTimeout)
{
if (millisecondsTimeout < Timeout.Infinite)
{
throw new ArgumentOutOfRangeException(
"millisecondsTimeout", millisecondsTimeout, Environment.GetResourceString("SpinWait_SpinUntil_TimeoutWrong"));
}
if (condition == null)
{
throw new ArgumentNullException("condition", Environment.GetResourceString("SpinWait_SpinUntil_ArgumentNull"));
}
uint startTime = 0;
if (millisecondsTimeout != 0 && millisecondsTimeout != Timeout.Infinite)
{
startTime = TimeoutHelper.GetTime();
}
SpinWait spinner = new SpinWait();
while (!condition())
{
if (millisecondsTimeout == 0)
{
return false;
}
spinner.SpinOnce();
if (millisecondsTimeout != Timeout.Infinite && spinner.NextSpinWillYield)
{
if (millisecondsTimeout <= (TimeoutHelper.GetTime() - startTime))
{
return false;
}
}
}
return true;
}
#endregion
}
/// <summary>
/// A helper class to get the number of processors, it updates the numbers of processors every sampling interval.
/// </summary>
internal static class PlatformHelper
{
private const int PROCESSOR_COUNT_REFRESH_INTERVAL_MS = 30000; // How often to refresh the count, in milliseconds.
private static volatile int s_processorCount; // The last count seen.
private static volatile int s_lastProcessorCountRefreshTicks; // The last time we refreshed.
/// <summary>
/// Gets the number of available processors
/// </summary>
[SuppressMessage("Microsoft.Concurrency", "CA8001", Justification = "Reviewed for thread safety")]
internal static int ProcessorCount
{
get
{
int now = Environment.TickCount;
int procCount = s_processorCount;
if (procCount == 0 || (now - s_lastProcessorCountRefreshTicks) >= PROCESSOR_COUNT_REFRESH_INTERVAL_MS)
{
s_processorCount = procCount = Environment.ProcessorCount;
s_lastProcessorCountRefreshTicks = now;
}
Contract.Assert(procCount > 0 && procCount <= 64,
"Processor count not within the expected range (1 - 64).");
return procCount;
}
}
/// <summary>
/// Gets whether the current machine has only a single processor.
/// </summary>
internal static bool IsSingleProcessor
{
get { return ProcessorCount == 1; }
}
}
/// <summary>
/// A helper class to capture a start time using Environment.TickCout as a time in milliseconds, also updates a given timeout bu subtracting the current time from
/// the start time
/// </summary>
internal static class TimeoutHelper
{
/// <summary>
/// Returns the Environment.TickCount as a start time in milliseconds as a uint, TickCount tools over from postive to negative every ~ 25 days
/// then ~25 days to back to positive again, uint is sued to ignore the sign and double the range to 50 days
/// </summary>
/// <returns></returns>
public static uint GetTime()
{
return (uint)Environment.TickCount;
}
/// <summary>
/// Helper function to measure and update the elapsed time
/// </summary>
/// <param name="startTime"> The first time (in milliseconds) observed when the wait started</param>
/// <param name="originalWaitMillisecondsTimeout">The orginal wait timeoutout in milliseconds</param>
/// <returns>The new wait time in milliseconds, -1 if the time expired</returns>
public static int UpdateTimeOut(uint startTime, int originalWaitMillisecondsTimeout)
{
// The function must be called in case the time out is not infinite
Contract.Assert(originalWaitMillisecondsTimeout != Timeout.Infinite);
uint elapsedMilliseconds = (GetTime() - startTime);
// Check the elapsed milliseconds is greater than max int because this property is uint
if (elapsedMilliseconds > int.MaxValue)
{
return 0;
}
// Subtract the elapsed time from the current wait time
int currentWaitTimeout = originalWaitMillisecondsTimeout - (int)elapsedMilliseconds; ;
if (currentWaitTimeout <= 0)
{
return 0;
}
return currentWaitTimeout;
}
}
}

View File

@@ -0,0 +1,188 @@
#if !FEATURE_PAL && !FEATURE_CORECLR
// ==++==
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// ==--==
//
// <OWNER>AlfreMen</OWNER>
//
using System;
using System.Security;
using System.Diagnostics.Contracts;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.InteropServices.WindowsRuntime;
using WFD = Windows.Foundation.Diagnostics;
namespace System.Threading.Tasks
{
[FriendAccessAllowed]
internal enum CausalityTraceLevel
{
Required = WFD.CausalityTraceLevel.Required,
Important = WFD.CausalityTraceLevel.Important,
Verbose = WFD.CausalityTraceLevel.Verbose
}
[FriendAccessAllowed]
internal enum AsyncCausalityStatus
{
Canceled = WFD.AsyncCausalityStatus.Canceled,
Completed = WFD.AsyncCausalityStatus.Completed,
Error = WFD.AsyncCausalityStatus.Error,
Started = WFD.AsyncCausalityStatus.Started
}
internal enum CausalityRelation
{
AssignDelegate = WFD.CausalityRelation.AssignDelegate,
Join = WFD.CausalityRelation.Join,
Choice = WFD.CausalityRelation.Choice,
Cancel = WFD.CausalityRelation.Cancel,
Error = WFD.CausalityRelation.Error
}
internal enum CausalitySynchronousWork
{
CompletionNotification = WFD.CausalitySynchronousWork.CompletionNotification,
ProgressNotification = WFD.CausalitySynchronousWork.ProgressNotification,
Execution = WFD.CausalitySynchronousWork.Execution
}
[FriendAccessAllowed]
internal static class AsyncCausalityTracer
{
//s_PlatformId = {4B0171A6-F3D0-41A0-9B33-02550652B995}
private static readonly Guid s_PlatformId = new Guid(0x4B0171A6, 0xF3D0, 0x41A0, 0x9B, 0x33, 0x02, 0x55, 0x06, 0x52, 0xB9, 0x95);
//Indicates this information comes from the BCL Library
private const WFD.CausalitySource s_CausalitySource = WFD.CausalitySource.Library;
//Lazy initialize the actual factory
private static WFD.IAsyncCausalityTracerStatics s_TracerFactory;
//We receive the actual value for these as a callback
private static bool f_LoggingOn; //assumes false by default
[FriendAccessAllowed]
internal static bool LoggingOn
{
[FriendAccessAllowed]
get
{
if (!f_FactoryInitialized)
FactoryInitialized();
return f_LoggingOn;
}
}
private static bool f_FactoryInitialized; //assumes false by default
private static object _InitializationLock = new object();
//explicit cache
private static readonly Func<WFD.IAsyncCausalityTracerStatics> s_loadFactoryDelegate = LoadFactory;
[SecuritySafeCritical]
private static WFD.IAsyncCausalityTracerStatics LoadFactory()
{
if (!Environment.IsWinRTSupported) return null;
//COM Class Id
string ClassId = "Windows.Foundation.Diagnostics.AsyncCausalityTracer";
//COM Interface GUID {50850B26-267E-451B-A890-AB6A370245EE}
Guid guid = new Guid(0x50850B26, 0x267E, 0x451B, 0xA8, 0x90, 0XAB, 0x6A, 0x37, 0x02, 0x45, 0xEE);
Object factory = null;
WFD.IAsyncCausalityTracerStatics validFactory = null;
try
{
int hresult = Microsoft.Win32.UnsafeNativeMethods.RoGetActivationFactory(ClassId, ref guid, out factory);
if (hresult < 0 || factory == null) return null; //This prevents having an exception thrown in case IAsyncCausalityTracerStatics isn't registered.
validFactory = (WFD.IAsyncCausalityTracerStatics)factory;
EventRegistrationToken token = validFactory.add_TracingStatusChanged(new EventHandler<WFD.TracingStatusChangedEventArgs>(TracingStatusChangedHandler));
Contract.Assert(token != null, "EventRegistrationToken is null");
}
catch (Exception)
{
// Although catching generic Exception is not recommended, this file is one exception
// since we don't want to propagate any kind of exception to the user since all we are
// doing here depends on internal state.
return null;
}
return validFactory;
}
private static bool FactoryInitialized()
{
return (LazyInitializer.EnsureInitialized(ref s_TracerFactory, ref f_FactoryInitialized, ref _InitializationLock, s_loadFactoryDelegate) != null);
}
[SecuritySafeCritical]
private static void TracingStatusChangedHandler(Object sender, WFD.TracingStatusChangedEventArgs args)
{
f_LoggingOn = args.Enabled;
}
[FriendAccessAllowed]
internal static void TraceOperationCreation(CausalityTraceLevel traceLevel, int taskId, string operationName, ulong relatedContext)
{
if (LoggingOn)
{
s_TracerFactory.TraceOperationCreation((WFD.CausalityTraceLevel)traceLevel, s_CausalitySource, s_PlatformId, GetOperationId((uint)taskId), operationName, relatedContext);
}
}
[FriendAccessAllowed]
internal static void TraceOperationCompletion(CausalityTraceLevel traceLevel, int taskId, AsyncCausalityStatus status)
{
if (LoggingOn)
{
s_TracerFactory.TraceOperationCompletion((WFD.CausalityTraceLevel)traceLevel, s_CausalitySource, s_PlatformId, GetOperationId((uint)taskId), (WFD.AsyncCausalityStatus)status);
}
}
internal static void TraceOperationRelation(CausalityTraceLevel traceLevel, int taskId, CausalityRelation relation)
{
if (LoggingOn)
{
s_TracerFactory.TraceOperationRelation((WFD.CausalityTraceLevel)traceLevel, s_CausalitySource, s_PlatformId, GetOperationId((uint)taskId), (WFD.CausalityRelation)relation);
}
}
internal static void TraceSynchronousWorkStart(CausalityTraceLevel traceLevel, int taskId, CausalitySynchronousWork work)
{
if (LoggingOn)
{
s_TracerFactory.TraceSynchronousWorkStart((WFD.CausalityTraceLevel)traceLevel, s_CausalitySource, s_PlatformId, GetOperationId((uint)taskId), (WFD.CausalitySynchronousWork)work);
}
}
internal static void TraceSynchronousWorkCompletion(CausalityTraceLevel traceLevel, CausalitySynchronousWork work)
{
if (LoggingOn)
{
s_TracerFactory.TraceSynchronousWorkCompletion((WFD.CausalityTraceLevel)traceLevel, s_CausalitySource, (WFD.CausalitySynchronousWork)work);
}
}
private static ulong GetOperationId(uint taskId)
{
return (((ulong)AppDomain.CurrentDomain.Id) << 32) + taskId;
}
}
}
#endif

View File

@@ -0,0 +1,162 @@
///----------- ----------- ----------- ----------- ----------- -----------
/// <copyright file="BeginEndAwaitableAdapter.cs" company="Microsoft">
/// Copyright (c) Microsoft Corporation. All rights reserved.
/// </copyright>
///
/// <owner>[....]</owner>
/// <owner>gpaperin</owner>
///----------- ----------- ----------- ----------- ----------- -----------
using System;
using System.Diagnostics.Contracts;
using System.IO;
using System.Runtime.CompilerServices;
using System.Security;
using System.Threading;
using System.Threading.Tasks;
namespace System.Threading.Tasks {
/// <summary>
/// Provides an adapter to make Begin/End pairs awaitable.
/// In general, Task.Factory.FromAsync should be used for this purpose.
/// However, for cases where absolute minimal overhead is required, this type
/// may be used to making APM pairs awaitable while minimizing overhead.
/// (APM = Asynchronous Programming Model or the Begin/End pattern.)
/// </summary>
/// <remarks>
/// This instance may be reused repeatedly. However, it must only be used
/// by a single APM invocation at a time. It's state will automatically be reset
/// when the await completes.
/// </remarks>
/// <example>
/// Usage sample:
/// <code>
/// static async Task CopyStreamAsync(Stream source, Stream dest) {
///
/// BeginEndAwaitableAdapter adapter = new BeginEndAwaitableAdapter();
/// Byte[] buffer = new Byte[0x1000];
///
/// while (true) {
///
/// source.BeginRead(buffer, 0, buffer.Length, BeginEndAwaitableAdapter.Callback, adapter);
/// Int32 numRead = source.EndRead(await adapter);
/// if (numRead == 0)
/// break;
///
/// dest.BeginWrite(buffer, 0, numRead, BeginEndAwaitableAdapter.Callback, adapter);
/// dest.EndWrite(await adapter);
/// }
/// }
/// </code>
/// </example>
internal sealed class BeginEndAwaitableAdapter : ICriticalNotifyCompletion {
/// <summary>A sentinel marker used to communicate between OnCompleted and the APM callback
/// that the callback has already run, and thus OnCompleted needs to execute the callback.</summary>
private readonly static Action CALLBACK_RAN = () => { };
/// <summary>The IAsyncResult for the APM operation.</summary>
private IAsyncResult _asyncResult;
/// <summary>The continuation delegate provided to the awaiter.</summary>
private Action _continuation;
/// <summary>A callback to be passed as the AsyncCallback to an APM pair.
/// It expects that an BeginEndAwaitableAdapter instance was supplied to the APM Begin method as the object state.</summary>
public readonly static AsyncCallback Callback = (asyncResult) => {
Contract.Assert(asyncResult != null);
Contract.Assert(asyncResult.IsCompleted);
Contract.Assert(asyncResult.AsyncState is BeginEndAwaitableAdapter);
// Get the adapter object supplied as the "object state" to the Begin method
BeginEndAwaitableAdapter adapter = (BeginEndAwaitableAdapter) asyncResult.AsyncState;
// Store the IAsyncResult into it so that it's available to the awaiter
adapter._asyncResult = asyncResult;
// If the _continuation has already been set to the actual continuation by OnCompleted, then invoke the continuation.
// Set _continuation to the CALLBACK_RAN sentinel so that IsCompleted returns true and OnCompleted sees the sentinel
// and knows to invoke the callback.
// Due to some known incorrect implementations of IAsyncResult in the Framework where CompletedSynchronously is lazily
// set to true if it is first invoked after IsCompleted is true, we cannot rely here on CompletedSynchronously for
// synchronization between the caller and the callback, and thus do not use CompletedSynchronously at all.
Action continuation = Interlocked.Exchange(ref adapter._continuation, CALLBACK_RAN);
if (continuation != null) {
Contract.Assert(continuation != CALLBACK_RAN);
continuation();
}
};
/// <summary>Gets an awaiter.</summary>
/// <returns>Returns itself as the awaiter.</returns>
public BeginEndAwaitableAdapter GetAwaiter() {
return this;
}
/// <summary>Gets whether the awaited APM operation completed.</summary>
public bool IsCompleted {
get {
// We are completed if the callback was called and it set the continuation to the CALLBACK_RAN sentinel.
// If the operation completes asynchronously, there's still a chance we'll see CALLBACK_RAN here, in which
// case we're still good to keep running synchronously.
return (_continuation == CALLBACK_RAN);
}
}
/// <summary>Schedules the continuation to run when the operation completes.</summary>
/// <param name="continuation">The continuation.</param>
[SecurityCritical]
public void UnsafeOnCompleted(Action continuation) {
Contract.Assert(continuation != null);
OnCompleted(continuation);
}
/// <summary>Schedules the continuation to run when the operation completes.</summary>
/// <param name="continuation">The continuation.</param>
public void OnCompleted(Action continuation) {
Contract.Assert(continuation != null);
// If the continuation field is null, then set it to be the target continuation
// so that when the operation completes, it'll invoke the continuation. If it's non-null,
// it was already set to the CALLBACK_RAN-sentinel by the Callback, in which case we hit a very rare ----
// where the operation didn't complete synchronously but completed asynchronously between our
// calls to IsCompleted and OnCompleted... in that case, just schedule a task to run the continuation.
if (_continuation == CALLBACK_RAN
|| Interlocked.CompareExchange(ref _continuation, continuation, null) == CALLBACK_RAN) {
Task.Run(continuation); // must run async at this point, or else we'd risk stack diving
}
}
/// <summary>Gets the IAsyncResult for the APM operation after the operation completes, and then resets the adapter.</summary>
/// <returns>The IAsyncResult for the operation.</returns>
public IAsyncResult GetResult() {
Contract.Assert(_asyncResult != null && _asyncResult.IsCompleted);
// Get the IAsyncResult
IAsyncResult result = _asyncResult;
// Reset the adapter
_asyncResult = null;
_continuation = null;
// Return the result
return result;
}
} // class BeginEndAwaitableAdapter
} // namespace

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1 @@
f17b2462e4086665f52411414a2a86cc9103e256

View File

@@ -0,0 +1,103 @@
// ==++==
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// ==--==
//
// <OWNER>AlfreMen</OWNER>
//
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.InteropServices.WindowsRuntime;
// Windows.Foundation.Diagnostics cannot be referenced from managed code because
// they're hidden by the metadata adapter. We redeclare the interfaces manually
// to be able to talk to native WinRT objects.
namespace Windows.Foundation.Diagnostics
{
[ComImport]
[Guid("50850B26-267E-451B-A890-AB6A370245EE")]
[WindowsRuntimeImport]
internal interface IAsyncCausalityTracerStatics
{
void TraceOperationCreation(CausalityTraceLevel traceLevel, CausalitySource source, Guid platformId, ulong operationId, string operationName, ulong relatedContext);
void TraceOperationCompletion(CausalityTraceLevel traceLevel, CausalitySource source, Guid platformId, ulong operationId, AsyncCausalityStatus status);
void TraceOperationRelation(CausalityTraceLevel traceLevel, CausalitySource source, Guid platformId, ulong operationId, CausalityRelation relation);
void TraceSynchronousWorkStart(CausalityTraceLevel traceLevel, CausalitySource source, Guid platformId, ulong operationId, CausalitySynchronousWork work);
void TraceSynchronousWorkCompletion(CausalityTraceLevel traceLevel, CausalitySource source, CausalitySynchronousWork work);
//These next 2 functions could've been represented as an event except that the EventRegistrationToken wasn't being propagated to WinRT
EventRegistrationToken add_TracingStatusChanged(System.EventHandler<TracingStatusChangedEventArgs> eventHandler);
void remove_TracingStatusChanged(EventRegistrationToken token);
}
[ComImport]
[Guid("410B7711-FF3B-477F-9C9A-D2EFDA302DC3")]
[WindowsRuntimeImport]
internal interface ITracingStatusChangedEventArgs
{
bool Enabled { get; }
CausalityTraceLevel TraceLevel { get; }
}
// We need this dummy class to satisfy a QI when the TracingStatusChangedHandler
// after being stored in a GIT cookie and then called by the WinRT API. This usually
// happens when calling a MAnaged WinMD which access this feature.
[ComImport]
[Guid("410B7711-FF3B-477F-9C9A-D2EFDA302DC3")]
[WindowsRuntimeImport]
internal sealed class TracingStatusChangedEventArgs : ITracingStatusChangedEventArgs
{
public extern bool Enabled
{
[MethodImpl(MethodImplOptions.InternalCall)]
get;
}
public extern CausalityTraceLevel TraceLevel
{
[MethodImpl(MethodImplOptions.InternalCall)]
get;
}
}
internal enum CausalityRelation
{
AssignDelegate,
Join,
Choice,
Cancel,
Error
}
internal enum CausalitySource
{
Application,
Library,
System
}
internal enum CausalitySynchronousWork
{
CompletionNotification,
ProgressNotification,
Execution
}
internal enum CausalityTraceLevel
{
Required,
Important,
Verbose
}
internal enum AsyncCausalityStatus
{
Canceled = 2,
Completed = 1,
Error = 3,
Started = 0
}
}

View File

@@ -0,0 +1 @@
612ac932e336859218839634e39d03f00ec318de

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,281 @@
// ==++==
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// ==--==
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
//
// ParallelRangeManager.cs
//
// <OWNER>[....]</OWNER>
//
// Implements the algorithm for distributing loop indices to parallel loop workers
//
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
using System;
using System.Threading;
using System.Diagnostics.Contracts;
#pragma warning disable 0420
namespace System.Threading.Tasks
{
/// <summary>
/// Represents an index range
/// </summary>
internal struct IndexRange
{
// the From and To values for this range. These do not change.
internal long m_nFromInclusive;
internal long m_nToExclusive;
// The shared index, stored as the offset from nFromInclusive. Using an offset rather than the actual
// value saves us from overflows that can happen due to multiple workers racing to increment this.
// All updates to this field need to be interlocked.
internal volatile Shared<long> m_nSharedCurrentIndexOffset;
// to be set to 1 by the worker that finishes this range. It's OK to do a non-interlocked write here.
internal int m_bRangeFinished;
}
/// <summary>
/// The RangeWorker struct wraps the state needed by a task that services the parallel loop
/// </summary>
internal struct RangeWorker
{
// reference to the IndexRange array allocated by the range manager
internal readonly IndexRange[] m_indexRanges;
// index of the current index range that this worker is grabbing chunks from
internal int m_nCurrentIndexRange;
// the step for this loop. Duplicated here for quick access (rather than jumping to rangemanager)
internal long m_nStep;
// increment value is the current amount that this worker will use
// to increment the shared index of the range it's working on
internal long m_nIncrementValue;
// the increment value is doubled each time this worker finds work, and is capped at this value
internal readonly long m_nMaxIncrementValue;
/// <summary>
/// Initializes a RangeWorker struct
/// </summary>
internal RangeWorker(IndexRange[] ranges, int nInitialRange, long nStep)
{
m_indexRanges = ranges;
m_nCurrentIndexRange = nInitialRange;
m_nStep = nStep;
m_nIncrementValue = nStep;
m_nMaxIncrementValue = Parallel.DEFAULT_LOOP_STRIDE * nStep;
}
/// <summary>
/// Implements the core work search algorithm that will be used for this range worker.
/// </summary>
///
/// Usage pattern is:
/// 1) the thread associated with this rangeworker calls FindNewWork
/// 2) if we return true, the worker uses the nFromInclusiveLocal and nToExclusiveLocal values
/// to execute the sequential loop
/// 3) if we return false it means there is no more work left. It's time to quit.
///
internal bool FindNewWork(out long nFromInclusiveLocal, out long nToExclusiveLocal)
{
// since we iterate over index ranges circularly, we will use the
// count of visited ranges as our exit condition
int numIndexRangesToVisit = m_indexRanges.Length;
do
{
// local snap to save array access bounds checks in places where we only read fields
IndexRange currentRange = m_indexRanges[m_nCurrentIndexRange];
if (currentRange.m_bRangeFinished == 0)
{
if (m_indexRanges[m_nCurrentIndexRange].m_nSharedCurrentIndexOffset == null)
{
Interlocked.CompareExchange(ref m_indexRanges[m_nCurrentIndexRange].m_nSharedCurrentIndexOffset, new Shared<long>(0), null);
}
// this access needs to be on the array slot
long nMyOffset = Interlocked.Add(ref m_indexRanges[m_nCurrentIndexRange].m_nSharedCurrentIndexOffset.Value,
m_nIncrementValue) - m_nIncrementValue;
if (currentRange.m_nToExclusive - currentRange.m_nFromInclusive > nMyOffset)
{
// we found work
nFromInclusiveLocal = currentRange.m_nFromInclusive + nMyOffset;
nToExclusiveLocal = nFromInclusiveLocal + m_nIncrementValue;
// Check for going past end of range, or wrapping
if ( (nToExclusiveLocal > currentRange.m_nToExclusive) || (nToExclusiveLocal < currentRange.m_nFromInclusive) )
{
nToExclusiveLocal = currentRange.m_nToExclusive;
}
// We will double our unit of increment until it reaches the maximum.
if (m_nIncrementValue < m_nMaxIncrementValue)
{
m_nIncrementValue *= 2;
if (m_nIncrementValue > m_nMaxIncrementValue)
{
m_nIncrementValue = m_nMaxIncrementValue;
}
}
return true;
}
else
{
// this index range is completed, mark it so that others can skip it quickly
Interlocked.Exchange(ref m_indexRanges[m_nCurrentIndexRange].m_bRangeFinished, 1);
}
}
// move on to the next index range, in circular order.
m_nCurrentIndexRange = (m_nCurrentIndexRange + 1) % m_indexRanges.Length;
numIndexRangesToVisit--;
} while (numIndexRangesToVisit > 0);
// we've visited all index ranges possible => there's no work remaining
nFromInclusiveLocal = 0;
nToExclusiveLocal = 0;
return false;
}
/// <summary>
/// 32 bit integer version of FindNewWork. Assumes the ranges were initialized with 32 bit values.
/// </summary>
internal bool FindNewWork32(out int nFromInclusiveLocal32, out int nToExclusiveLocal32)
{
long nFromInclusiveLocal;
long nToExclusiveLocal;
bool bRetVal = FindNewWork(out nFromInclusiveLocal, out nToExclusiveLocal);
Contract.Assert((nFromInclusiveLocal <= Int32.MaxValue) && (nFromInclusiveLocal >= Int32.MinValue) &&
(nToExclusiveLocal <= Int32.MaxValue) && (nToExclusiveLocal >= Int32.MinValue));
// convert to 32 bit before returning
nFromInclusiveLocal32 = (int)nFromInclusiveLocal;
nToExclusiveLocal32 = (int)nToExclusiveLocal;
return bRetVal;
}
}
/// <summary>
/// Represents the entire loop operation, keeping track of workers and ranges.
/// </summary>
///
/// The usage pattern is:
/// 1) The Parallel loop entry function (ForWorker) creates an instance of this class
/// 2) Every thread joining to service the parallel loop calls RegisterWorker to grab a
/// RangeWorker struct to wrap the state it will need to find and execute work,
/// and they keep interacting with that struct until the end of the loop
internal class RangeManager
{
internal readonly IndexRange[] m_indexRanges;
internal int m_nCurrentIndexRangeToAssign;
internal long m_nStep;
/// <summary>
/// Initializes a RangeManager with the given loop parameters, and the desired number of outer ranges
/// </summary>
internal RangeManager(long nFromInclusive, long nToExclusive, long nStep, int nNumExpectedWorkers)
{
m_nCurrentIndexRangeToAssign = 0;
m_nStep = nStep;
// Our signed math breaks down w/ nNumExpectedWorkers == 1. So change it to 2.
if (nNumExpectedWorkers == 1)
nNumExpectedWorkers = 2;
//
// calculate the size of each index range
//
ulong uSpan = (ulong)(nToExclusive - nFromInclusive);
ulong uRangeSize = uSpan / (ulong) nNumExpectedWorkers; // rough estimate first
uRangeSize -= uRangeSize % (ulong) nStep; // snap to multiples of nStep
// otherwise index range transitions will derail us from nStep
if (uRangeSize == 0)
{
uRangeSize = (ulong) nStep;
}
//
// find the actual number of index ranges we will need
//
Contract.Assert((uSpan / uRangeSize) < Int32.MaxValue);
int nNumRanges = (int)(uSpan / uRangeSize);
if (uSpan % uRangeSize != 0)
{
nNumRanges++;
}
// Convert to signed so the rest of the logic works.
// Should be fine so long as uRangeSize < Int64.MaxValue, which we guaranteed by setting #workers >= 2.
long nRangeSize = (long)uRangeSize;
// allocate the array of index ranges
m_indexRanges = new IndexRange[nNumRanges];
long nCurrentIndex = nFromInclusive;
for (int i = 0; i < nNumRanges; i++)
{
// the fromInclusive of the new index range is always on nCurrentIndex
m_indexRanges[i].m_nFromInclusive = nCurrentIndex;
m_indexRanges[i].m_nSharedCurrentIndexOffset = null;
m_indexRanges[i].m_bRangeFinished = 0;
// now increment it to find the toExclusive value for our range
nCurrentIndex += nRangeSize;
// detect integer overflow or range overage and snap to nToExclusive
if (nCurrentIndex < nCurrentIndex - nRangeSize ||
nCurrentIndex > nToExclusive)
{
// this should only happen at the last index
Contract.Assert(i == nNumRanges - 1);
nCurrentIndex = nToExclusive;
}
// now that the end point of the new range is calculated, assign it.
m_indexRanges[i].m_nToExclusive = nCurrentIndex;
}
}
/// <summary>
/// The function that needs to be called by each new worker thread servicing the parallel loop
/// in order to get a RangeWorker struct that wraps the state for finding and executing indices
/// </summary>
internal RangeWorker RegisterNewWorker()
{
Contract.Assert(m_indexRanges != null && m_indexRanges.Length != 0);
int nInitialRange = (Interlocked.Increment(ref m_nCurrentIndexRangeToAssign) - 1) % m_indexRanges.Length;
return new RangeWorker(m_indexRanges, nInitialRange, m_nStep);
}
}
}
#pragma warning restore 0420

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More