You've already forked linux-packaging-mono
Imported Upstream version 5.10.0.47
Former-commit-id: d0813289fa2d35e1f8ed77530acb4fb1df441bc0
This commit is contained in:
parent
88ff76fe28
commit
e46a49ecf1
@@ -16,6 +16,7 @@
|
||||
using System;
|
||||
using System.Threading;
|
||||
using System.Diagnostics.Contracts;
|
||||
using System.Runtime.InteropServices;
|
||||
|
||||
#pragma warning disable 0420
|
||||
|
||||
@@ -24,6 +25,7 @@ namespace System.Threading.Tasks
|
||||
/// <summary>
|
||||
/// Represents an index range
|
||||
/// </summary>
|
||||
[StructLayout(LayoutKind.Auto)]
|
||||
internal struct IndexRange
|
||||
{
|
||||
// the From and To values for this range. These do not change.
|
||||
@@ -32,7 +34,11 @@ namespace System.Threading.Tasks
|
||||
|
||||
// The shared index, stored as the offset from nFromInclusive. Using an offset rather than the actual
|
||||
// value saves us from overflows that can happen due to multiple workers racing to increment this.
|
||||
// All updates to this field need to be interlocked.
|
||||
// All updates to this field need to be interlocked. To avoid split interlockeds across cache-lines
|
||||
// in 32-bit processes, in 32-bit processes when the range fits in a 32-bit value, we prefer to use
|
||||
// a 32-bit field, and just use the first 32-bits of the long. And to minimize false sharing, each
|
||||
// value is stored in its own heap-allocated object, which is lazily allocated by the thread using
|
||||
// that range, minimizing the chances it'll be near the objects from other threads.
|
||||
internal volatile Shared<long> m_nSharedCurrentIndexOffset;
|
||||
|
||||
// to be set to 1 by the worker that finishes this range. It's OK to do a non-interlocked write here.
|
||||
@@ -43,6 +49,7 @@ namespace System.Threading.Tasks
|
||||
/// <summary>
|
||||
/// The RangeWorker struct wraps the state needed by a task that services the parallel loop
|
||||
/// </summary>
|
||||
[StructLayout(LayoutKind.Auto)]
|
||||
internal struct RangeWorker
|
||||
{
|
||||
// reference to the IndexRange array allocated by the range manager
|
||||
@@ -61,13 +68,17 @@ namespace System.Threading.Tasks
|
||||
// the increment value is doubled each time this worker finds work, and is capped at this value
|
||||
internal readonly long m_nMaxIncrementValue;
|
||||
|
||||
// whether to use 32-bits or 64-bits of current index in each range
|
||||
internal readonly bool _use32BitCurrentIndex;
|
||||
|
||||
/// <summary>
|
||||
/// Initializes a RangeWorker struct
|
||||
/// </summary>
|
||||
internal RangeWorker(IndexRange[] ranges, int nInitialRange, long nStep)
|
||||
internal RangeWorker(IndexRange[] ranges, int nInitialRange, long nStep, bool use32BitCurrentIndex)
|
||||
{
|
||||
m_indexRanges = ranges;
|
||||
m_nCurrentIndexRange = nInitialRange;
|
||||
_use32BitCurrentIndex = use32BitCurrentIndex;
|
||||
m_nStep = nStep;
|
||||
|
||||
m_nIncrementValue = nStep;
|
||||
@@ -104,8 +115,24 @@ namespace System.Threading.Tasks
|
||||
}
|
||||
|
||||
// this access needs to be on the array slot
|
||||
long nMyOffset = Interlocked.Add(ref m_indexRanges[m_nCurrentIndexRange].m_nSharedCurrentIndexOffset.Value,
|
||||
m_nIncrementValue) - m_nIncrementValue;
|
||||
long nMyOffset;
|
||||
if (IntPtr.Size == 4 && _use32BitCurrentIndex)
|
||||
{
|
||||
// In 32-bit processes, we prefer to use 32-bit interlocked operations, to avoid the possibility of doing
|
||||
// a 64-bit interlocked when the target value crosses a cache line, as that can be super expensive.
|
||||
// We use the first 32 bits of the Int64 index in such cases.
|
||||
unsafe
|
||||
{
|
||||
fixed (long* indexPtr = &m_indexRanges[m_nCurrentIndexRange].m_nSharedCurrentIndexOffset.Value)
|
||||
{
|
||||
nMyOffset = Interlocked.Add(ref *(int*)indexPtr, (int)m_nIncrementValue) - m_nIncrementValue;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
nMyOffset = Interlocked.Add(ref m_indexRanges[m_nCurrentIndexRange].m_nSharedCurrentIndexOffset.Value, m_nIncrementValue) - m_nIncrementValue;
|
||||
}
|
||||
|
||||
if (currentRange.m_nToExclusive - currentRange.m_nFromInclusive > nMyOffset)
|
||||
{
|
||||
@@ -187,6 +214,7 @@ namespace System.Threading.Tasks
|
||||
internal class RangeManager
|
||||
{
|
||||
internal readonly IndexRange[] m_indexRanges;
|
||||
internal readonly bool _use32BitCurrentIndex;
|
||||
|
||||
internal int m_nCurrentIndexRangeToAssign;
|
||||
internal long m_nStep;
|
||||
@@ -234,6 +262,7 @@ namespace System.Threading.Tasks
|
||||
// Convert to signed so the rest of the logic works.
|
||||
// Should be fine so long as uRangeSize < Int64.MaxValue, which we guaranteed by setting #workers >= 2.
|
||||
long nRangeSize = (long)uRangeSize;
|
||||
_use32BitCurrentIndex = IntPtr.Size == 4 && nRangeSize <= int.MaxValue;
|
||||
|
||||
// allocate the array of index ranges
|
||||
m_indexRanges = new IndexRange[nNumRanges];
|
||||
@@ -274,7 +303,7 @@ namespace System.Threading.Tasks
|
||||
|
||||
int nInitialRange = (Interlocked.Increment(ref m_nCurrentIndexRangeToAssign) - 1) % m_indexRanges.Length;
|
||||
|
||||
return new RangeWorker(m_indexRanges, nInitialRange, m_nStep);
|
||||
return new RangeWorker(m_indexRanges, nInitialRange, m_nStep, _use32BitCurrentIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1 +1 @@
|
||||
0ca6568e9fbbe9f32c8048eca0973d09bcb98996
|
||||
8638327150a075ac05d1173af19b1c7d862a30e3
|
@@ -1 +1 @@
|
||||
b15df277ab4b1bde5025f55ca2d78d2e066722e2
|
||||
de78cb0b18239aafe6c7a8caab4112989cc0690c
|
@@ -1507,7 +1507,10 @@ namespace System.Threading
|
||||
throw new NotSupportedException ("Timeout is too big. Maximum is Int32.MaxValue");
|
||||
|
||||
RegisteredWaitHandle waiter = new RegisteredWaitHandle (waitObject, callBack, state, new TimeSpan (0, 0, 0, 0, (int) millisecondsTimeOutInterval), executeOnlyOnce);
|
||||
QueueUserWorkItem (new WaitCallback (waiter.Wait), null);
|
||||
if (compressStack)
|
||||
QueueUserWorkItem (new WaitCallback (waiter.Wait), null);
|
||||
else
|
||||
UnsafeQueueUserWorkItem (new WaitCallback (waiter.Wait), null);
|
||||
|
||||
return waiter;
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user