1
0
Fork 0

Retire spinlocks

Use Mutex instead.

This is in preparaation for merging with master branch,
where we stilll don't have spinlocks.

Eventually spinlocks will be readded in some future
patch, once c++11 has been merged.

No functional change.
c++11
Marco Costalba 2015-03-10 21:50:45 +01:00
parent 6027652773
commit c0a1676a65
3 changed files with 27 additions and 39 deletions

View File

@ -765,7 +765,7 @@ moves_loop: // When in check and at SpNode search starts from here
continue;
moveCount = ++splitPoint->moveCount;
splitPoint->spinlock.release();
splitPoint->mutex.unlock();
}
else
++moveCount;
@ -834,7 +834,7 @@ moves_loop: // When in check and at SpNode search starts from here
&& moveCount >= FutilityMoveCounts[improving][depth])
{
if (SpNode)
splitPoint->spinlock.acquire();
splitPoint->mutex.lock();
continue;
}
@ -853,7 +853,7 @@ moves_loop: // When in check and at SpNode search starts from here
if (SpNode)
{
splitPoint->spinlock.acquire();
splitPoint->mutex.lock();
if (bestValue > splitPoint->bestValue)
splitPoint->bestValue = bestValue;
}
@ -865,7 +865,7 @@ moves_loop: // When in check and at SpNode search starts from here
if (predictedDepth < 4 * ONE_PLY && pos.see_sign(move) < VALUE_ZERO)
{
if (SpNode)
splitPoint->spinlock.acquire();
splitPoint->mutex.lock();
continue;
}
@ -965,7 +965,7 @@ moves_loop: // When in check and at SpNode search starts from here
// Step 18. Check for new best move
if (SpNode)
{
splitPoint->spinlock.acquire();
splitPoint->mutex.lock();
bestValue = splitPoint->bestValue;
alpha = splitPoint->alpha;
}
@ -1526,13 +1526,13 @@ void Thread::idle_loop() {
// If this thread has been assigned work, launch a search
while (searching)
{
Threads.spinlock.acquire();
Threads.mutex.lock();
assert(activeSplitPoint);
SplitPoint* sp = activeSplitPoint;
Threads.spinlock.release();
Threads.mutex.unlock();
Stack stack[MAX_PLY+4], *ss = stack+2; // To allow referencing (ss-2) and (ss+2)
Position pos(*sp->pos, this);
@ -1540,7 +1540,7 @@ void Thread::idle_loop() {
std::memcpy(ss-2, sp->ss-2, 5 * sizeof(Stack));
ss->splitPoint = sp;
sp->spinlock.acquire();
sp->mutex.lock();
assert(activePosition == nullptr);
@ -1578,7 +1578,7 @@ void Thread::idle_loop() {
// After releasing the lock we can't access any SplitPoint related data
// in a safe way because it could have been released under our feet by
// the sp master.
sp->spinlock.release();
sp->mutex.unlock();
// Try to late join to another split point if none of its slaves has
// already finished.
@ -1618,8 +1618,8 @@ void Thread::idle_loop() {
sp = bestSp;
// Recheck the conditions under lock protection
Threads.spinlock.acquire();
sp->spinlock.acquire();
Threads.mutex.lock();
sp->mutex.lock();
if ( sp->allSlavesSearching
&& sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
@ -1630,8 +1630,8 @@ void Thread::idle_loop() {
searching = true;
}
sp->spinlock.release();
Threads.spinlock.release();
sp->mutex.unlock();
Threads.mutex.unlock();
}
}
@ -1687,7 +1687,7 @@ void check_time() {
else if (Limits.nodes)
{
Threads.spinlock.acquire();
Threads.mutex.lock();
int64_t nodes = RootPos.nodes_searched();
@ -1698,7 +1698,7 @@ void check_time() {
{
SplitPoint& sp = th->splitPoints[i];
sp.spinlock.acquire();
sp.mutex.lock();
nodes += sp.nodes;
@ -1706,10 +1706,10 @@ void check_time() {
if (sp.slavesMask.test(idx) && Threads[idx]->activePosition)
nodes += Threads[idx]->activePosition->nodes_searched();
sp.spinlock.release();
sp.mutex.unlock();
}
Threads.spinlock.release();
Threads.mutex.unlock();
if (nodes >= Limits.nodes)
Signals.stop = true;

View File

@ -164,8 +164,8 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
// Try to allocate available threads and ask them to start searching setting
// 'searching' flag. This must be done under lock protection to avoid concurrent
// allocation of the same slave by another master.
Threads.spinlock.acquire();
sp.spinlock.acquire();
Threads.mutex.lock();
sp.mutex.lock();
sp.allSlavesSearching = true; // Must be set under lock protection
++splitPointsSize;
@ -187,8 +187,8 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
// it will instantly launch a search, because its 'searching' flag is set.
// The thread will return from the idle loop when all slaves have finished
// their work at this split point.
sp.spinlock.release();
Threads.spinlock.release();
sp.mutex.unlock();
Threads.mutex.unlock();
Thread::idle_loop(); // Force a call to base class idle_loop()
@ -201,8 +201,8 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
// We have returned from the idle loop, which means that all threads are
// finished. Note that setting 'searching' and decreasing splitPointsSize must
// be done under lock protection to avoid a race with Thread::available_to().
Threads.spinlock.acquire();
sp.spinlock.acquire();
Threads.mutex.lock();
sp.mutex.lock();
searching = true;
--splitPointsSize;
@ -212,8 +212,8 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
*bestMove = sp.bestMove;
*bestValue = sp.bestValue;
sp.spinlock.release();
Threads.spinlock.release();
sp.mutex.unlock();
Threads.mutex.unlock();
}

View File

@ -41,18 +41,6 @@ const size_t MAX_SPLITPOINTS_PER_THREAD = 8;
const size_t MAX_SLAVES_PER_SPLITPOINT = 4;
/// Spinlock class wraps low level atomic operations to provide a spin lock
class Spinlock {
Mutex m; // WARNING: Diasabled spinlocks to test on fishtest
public:
void acquire() { m.lock(); }
void release() { m.unlock(); }
};
/// SplitPoint struct stores information shared by the threads searching in
/// parallel below the same split point. It is populated at splitting time.
@ -72,7 +60,7 @@ struct SplitPoint {
SplitPoint* parentSplitPoint;
// Shared variable data
Spinlock spinlock;
Mutex mutex;
std::bitset<MAX_THREADS> slavesMask;
volatile bool allSlavesSearching;
volatile uint64_t nodes;
@ -163,7 +151,7 @@ struct ThreadPool : public std::vector<Thread*> {
void start_thinking(const Position&, const Search::LimitsType&, Search::StateStackPtr&);
Depth minimumSplitDepth;
Spinlock spinlock;
Mutex mutex;
ConditionVariable sleepCondition;
TimerThread* timer;
};