/* --------------------------------------------------------------------------- * * (c) The GHC Team, 1998-2006 * * The scheduler and thread-related functionality * * --------------------------------------------------------------------------*/ #include "PosixSource.h" #define KEEP_LOCKCLOSURE #include "Rts.h" #include "sm/Storage.h" #include "RtsUtils.h" #include "StgRun.h" #include "Schedule.h" #include "Interpreter.h" #include "Printer.h" #include "RtsSignals.h" #include "sm/Sanity.h" #include "Stats.h" #include "STM.h" #include "Prelude.h" #include "ThreadLabels.h" #include "Updates.h" #include "Proftimer.h" #include "ProfHeap.h" #include "Weak.h" /* PARALLEL_HASKELL includes go here */ #if defined(PARALLEL_RTS) #include "PEOpCodes.h" #include "MPSystem.h" #include "parallel/RTTables.h" #include "parallel/ParallelRts.h" #include "parallel/ParTicky.h" #endif #include "sm/GC.h" // waitForGcThreads, releaseGCThreads, N #include "Sparks.h" #include "Capability.h" #include "Task.h" #include "AwaitEvent.h" #if defined(mingw32_HOST_OS) #include "win32/IOManager.h" #endif #include "Trace.h" #include "RaiseAsync.h" #include "Threads.h" #include "Timer.h" #include "ThreadPaused.h" #ifdef HAVE_SYS_TYPES_H #include #endif #ifdef HAVE_UNISTD_H #include #endif #include #include #include #ifdef HAVE_ERRNO_H #include #endif /* ----------------------------------------------------------------------------- * Global variables * -------------------------------------------------------------------------- */ #if !defined(THREADED_RTS) // Blocked/sleeping thrads StgTSO *blocked_queue_hd = NULL; StgTSO *blocked_queue_tl = NULL; StgTSO *sleeping_queue = NULL; // perhaps replace with a hash table? #endif /* Threads blocked on blackholes. * LOCK: sched_mutex+capability, or all capabilities */ StgTSO *blackhole_queue = NULL; /* The blackhole_queue should be checked for threads to wake up. See * Schedule.h for more thorough comment. * LOCK: none (doesn't matter if we miss an update) */ rtsBool blackholes_need_checking = rtsFalse; /* Set to true when the latest garbage collection failed to reclaim * enough space, and the runtime should proceed to shut itself down in * an orderly fashion (emitting profiling info etc.) */ rtsBool heap_overflow = rtsFalse; /* flag that tracks whether we have done any execution in this time slice. * LOCK: currently none, perhaps we should lock (but needs to be * updated in the fast path of the scheduler). * * NB. must be StgWord, we do xchg() on it. */ volatile StgWord recent_activity = ACTIVITY_YES; /* if this flag is set as well, give up execution * LOCK: none (changes monotonically) */ volatile StgWord sched_state = SCHED_RUNNING; /* This is used in `TSO.h' and gcc 2.96 insists that this variable actually * exists - earlier gccs apparently didn't. * -= chak */ StgTSO dummy_tso; /* * Set to TRUE when entering a shutdown state (via shutdownHaskellAndExit()) -- * in an MT setting, needed to signal that a worker thread shouldn't hang around * in the scheduler when it is out of work. */ rtsBool shutting_down_scheduler = rtsFalse; /* * This mutex protects most of the global scheduler data in * the THREADED_RTS runtime. */ #if defined(THREADED_RTS) Mutex sched_mutex; #endif #if !defined(mingw32_HOST_OS) #define FORKPROCESS_PRIMOP_SUPPORTED #endif /* ----------------------------------------------------------------------------- * static function prototypes * -------------------------------------------------------------------------- */ static Capability *schedule (Capability *initialCapability, Task *task); // // These function all encapsulate parts of the scheduler loop, and are // abstracted only to make the structure and control flow of the // scheduler clearer. // static void schedulePreLoop (void); static void scheduleFindWork (Capability *cap, Task *task); #if defined(THREADED_RTS) static void scheduleYield (Capability **pcap, Task *task, rtsBool); #endif static void scheduleStartSignalHandlers (Capability *cap); static void scheduleCheckBlockedThreads (Capability *cap); static void scheduleCheckWakeupThreads(Capability *cap USED_IF_NOT_THREADS); static void scheduleCheckBlackHoles (Capability *cap); static void scheduleDetectDeadlock (Capability *cap, Task *task); static void schedulePushWork(Capability *cap, Task *task); #if defined(THREADED_RTS) static void scheduleActivateSpark(Capability *cap); #endif #if defined(PARALLEL_RTS) static rtsBool scheduleActivateSpark(Capability *cap, Task *task); #endif #if defined(PARALLEL_RTS) static void scheduleGetRemoteWork(Capability *cap); static void scheduleSendPendingMessages(void); static void processMessages(Capability *cap); static void startNewProcess(Capability *cap, StgClosure *graph); #endif static void schedulePostRunThread(Capability *cap, StgTSO *t); static rtsBool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t ); static void scheduleHandleStackOverflow( Capability *cap, Task *task, StgTSO *t); static rtsBool scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next ); static void scheduleHandleThreadBlocked( StgTSO *t ); static rtsBool scheduleHandleThreadFinished( Capability *cap, Task *task, StgTSO *t ); static rtsBool scheduleNeedHeapProfile(rtsBool ready_to_gc); static Capability *scheduleDoGC(Capability *cap, Task *task, rtsBool force_major); static rtsBool checkBlackHoles(Capability *cap); #ifdef PARALLEL_RTS static rtsBool checkBlockedFetches (Capability *cap); #endif static StgTSO *threadStackOverflow(Capability *cap, StgTSO *tso); static StgTSO *threadStackUnderflow(Capability *cap, Task *task, StgTSO *tso); static void deleteThread (Capability *cap, StgTSO *tso); static void deleteAllThreads (Capability *cap); #ifdef FORKPROCESS_PRIMOP_SUPPORTED static void deleteThread_(Capability *cap, StgTSO *tso); #endif /* ----------------------------------------------------------------------------- * Putting a thread on the run queue: different scheduling policies * -------------------------------------------------------------------------- */ STATIC_INLINE void addToRunQueue( Capability *cap, StgTSO *t ) { // this does round-robin scheduling; good for concurrency appendToRunQueue(cap,t); } /* --------------------------------------------------------------------------- Main scheduling loop. We use round-robin scheduling, each thread returning to the scheduler loop when one of these conditions is detected: * out of heap space * timer expires (thread yields) * thread blocks * thread ends * stack overflow GRAN version: In a GranSim setup this loop iterates over the global event queue. This revolves around the global event queue, which determines what to do next. Therefore, it's more complicated than either the concurrent or the parallel (GUM) setup. This version has been entirely removed (JB 2008/08). GUM version: GUM iterates over incoming messages. It starts with nothing to do (thus CurrentTSO == END_TSO_QUEUE), and sends out a fish whenever it has nothing to do; in-between doing the actual reductions (shared code below) it processes the incoming messages and deals with delayed operations (see PendingFetches). This is not the ugliest code you could imagine, but it's bloody close. (JB 2008/08) This version was formerly indicated by a PP-Flag PAR, now by PP-flag PARALLEL_HASKELL. The Eden RTS (in GHC-6.x) uses it, as well as future GUM versions. This file has been refurbished to only contain valid code, which is however incomplete, refers to invalid includes etc. (HWL 2009/12) The merged GUM/Eden RTS uses the cpp flag PARALLEL_RTS. ------------------------------------------------------------------------ */ static Capability * schedule (Capability *initialCapability, Task *task) { StgTSO *t; Capability *cap; StgThreadReturnCode ret; nat prev_what_next; rtsBool ready_to_gc; #if defined(THREADED_RTS) || defined(PARALLEL_RTS) rtsBool first = rtsTrue; rtsBool force_yield = rtsFalse; #endif #if defined(PARALLEL_RTS) #define TERMINATION_CONDITION (rtsTrue) #else #define TERMINATION_CONDITION rtsTrue #endif cap = initialCapability; // Pre-condition: this task owns initialCapability. // The sched_mutex is *NOT* held // NB. on return, we still hold a capability. debugTrace (DEBUG_sched, "cap %d: schedule()", initialCapability->no); schedulePreLoop(); // ----------------------------------------------------------- // Scheduler loop starts here: while (TERMINATION_CONDITION) { // Check whether we have re-entered the RTS from Haskell without // going via suspendThread()/resumeThread (i.e. a 'safe' foreign // call). if (cap->in_haskell) { errorBelch("schedule: re-entered unsafely.\n" " Perhaps a 'foreign import unsafe' should be 'safe'?"); stg_exit(EXIT_FAILURE); } // The interruption / shutdown sequence. // // In order to cleanly shut down the runtime, we want to: // * make sure that all main threads return to their callers // with the state 'Interrupted'. // * clean up all OS threads assocated with the runtime // * free all memory etc. // // So the sequence for ^C goes like this: // // * ^C handler sets sched_state := SCHED_INTERRUPTING and // arranges for some Capability to wake up // // * all threads in the system are halted, and the zombies are // placed on the run queue for cleaning up. We acquire all // the capabilities in order to delete the threads, this is // done by scheduleDoGC() for convenience (because GC already // needs to acquire all the capabilities). We can't kill // threads involved in foreign calls. // // * somebody calls shutdownHaskell(), which calls exitScheduler() // // * sched_state := SCHED_SHUTTING_DOWN // // * all workers exit when the run queue on their capability // drains. All main threads will also exit when their TSO // reaches the head of the run queue and they can return. // // * eventually all Capabilities will shut down, and the RTS can // exit. // // * We might be left with threads blocked in foreign calls, // we should really attempt to kill these somehow (TODO); switch (sched_state) { case SCHED_RUNNING: break; case SCHED_INTERRUPTING: debugTrace(DEBUG_sched, "SCHED_INTERRUPTING"); #if defined(THREADED_RTS) discardSparksCap(cap); #endif /* scheduleDoGC() deletes all the threads */ cap = scheduleDoGC(cap,task,rtsFalse); // after scheduleDoGC(), we must be shutting down. Either some // other Capability did the final GC, or we did it above, // either way we can fall through to the SCHED_SHUTTING_DOWN // case now. ASSERT(sched_state == SCHED_SHUTTING_DOWN); // fall through case SCHED_SHUTTING_DOWN: debugTrace(DEBUG_sched, "SCHED_SHUTTING_DOWN"); // If we are a worker, just exit. If we're a bound thread // then we will exit below when we've removed our TSO from // the run queue. if (task->tso == NULL && emptyRunQueue(cap)) { return cap; } break; default: barf("sched_state: %d", sched_state); } /* find new work by: * . activating a spark, if possible * . send a FISH to find remote work (PARALLEL_RTS only) * it also checks for sends and processes */ scheduleFindWork(cap, task); ASSERT(n_capabilities == 1); /* work pushing, currently relevant only for THREADED_RTS: (pushes threads, wakes up idle capabilities for stealing) */ schedulePushWork(cap,task); scheduleDetectDeadlock(cap,task); #if defined(THREADED_RTS) || defined(PARALLEL_RTS) cap = task->cap; // reload cap, it might have changed #endif // Normally, the only way we can get here with no threads to // run is if a keyboard interrupt received during // scheduleCheckBlockedThreads() or scheduleDetectDeadlock(). // Additionally, it is not fatal for the // threaded RTS to reach here with no threads to run. // // win32: might be here due to awaitEvent() being abandoned // as a result of a console event having been delivered. #if defined(THREADED_RTS) if (first) { // XXX: ToDo // // don't yield the first time, we want a chance to run this // // thread for a bit, even if there are others banging at the // // door. // first = rtsFalse; // ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task); } yield: scheduleYield(&cap,task,force_yield); force_yield = rtsFalse; #endif #if defined(THREADED_RTS) || defined(PARALLEL_RTS) if (emptyRunQueue(cap)) continue; // look for work again #endif #if !defined(THREADED_RTS) && !defined(mingw32_HOST_OS) && !defined(PARALLEL_RTS) if ( emptyRunQueue(cap) ) { ASSERT(sched_state >= SCHED_INTERRUPTING); } #endif // // Get a thread to run // t = popRunQueue(cap); #if defined(PARALLEL_RTS) // task->tso = t; #endif // Sanity check the thread we're about to run. This can be // expensive if there is lots of thread switching going on... IF_DEBUG(sanity,checkTSO(t)); // TODO: a VARIANT of the following should be done in the PARALLEL_RTS // without such a variant interaction with the THREADED_RTS is broken! #if defined(THREADED_RTS) // Check whether we can run this thread in the current task. // If not, we have to pass our capability to the right task. { Task *bound = t->bound; if (bound) { if (bound == task) { // yes, the Haskell thread is bound to the current native thread } else { debugTrace(DEBUG_sched, "thread %lu bound to another OS thread", (unsigned long)t->id); // no, bound to a different Haskell thread: pass to that thread pushOnRunQueue(cap,t); continue; } } else { // The thread we want to run is unbound. if (task->tso) { debugTrace(DEBUG_sched, "this OS thread cannot run thread %lu", (unsigned long)t->id); // no, the current native thread is bound to a different // Haskell thread, so pass it to any worker thread pushOnRunQueue(cap,t); continue; } } } #endif // If we're shutting down, and this thread has not yet been // killed, kill it now. This sometimes happens when a finalizer // thread is created by the final GC, or a thread previously // in a foreign call returns. if (sched_state >= SCHED_INTERRUPTING && !(t->what_next == ThreadComplete || t->what_next == ThreadKilled)) { deleteThread(cap,t); } /* context switches are initiated by the timer signal, unless * the user specified "context switch as often as possible", with * +RTS -C0 */ if (RtsFlags.ConcFlags.ctxtSwitchTicks == 0 && !emptyThreadQueues(cap)) { cap->context_switch = 1; } run_thread: // CurrentTSO is the thread to run. t might be different if we // loop back to run_thread, so make sure to set CurrentTSO after // that. cap->r.rCurrentTSO = t; #if 0 && defined(PARALLEL_RTS) debugTrace(DEBUG_sched, "-->> running thread %ld %s ...", (long)t->id, whatNext_strs[t->what_next]); #endif startHeapProfTimer(); // Check for exceptions blocked on this thread maybePerformBlockedException (cap, t); // ---------------------------------------------------------------------- // Run the current thread ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task); ASSERT(t->cap == cap); ASSERT(t->bound ? t->bound->cap == cap : 1); prev_what_next = t->what_next; errno = t->saved_errno; #if mingw32_HOST_OS SetLastError(t->saved_winerror); #endif cap->in_haskell = rtsTrue; dirty_TSO(cap,t); #if defined(THREADED_RTS) if (recent_activity == ACTIVITY_DONE_GC) { // ACTIVITY_DONE_GC means we turned off the timer signal to // conserve power (see #1623). Re-enable it here. nat prev; prev = xchg((P_)&recent_activity, ACTIVITY_YES); if (prev == ACTIVITY_DONE_GC) { startTimer(); } } else { recent_activity = ACTIVITY_YES; } #endif traceEventRunThread(cap, t); switch (prev_what_next) { case ThreadKilled: case ThreadComplete: /* Thread already finished, return to scheduler. */ ret = ThreadFinished; break; case ThreadRunGHC: { StgRegTable *r; // cost funtion will added here // par_ticky_before_StgRun(); r = StgRun((StgFunPtr) stg_returnToStackTop, &cap->r); // par_ticky_after_StgRun(); // and end here cap = regTableToCapability(r); ret = r->rRet; break; } case ThreadInterpret: cap = interpretBCO(cap); ret = cap->r.rRet; break; default: barf("schedule: invalid what_next field"); } cap->in_haskell = rtsFalse; // The TSO might have moved, eg. if it re-entered the RTS and a GC // happened. So find the new location: t = cap->r.rCurrentTSO; // We have run some Haskell code: there might be blackhole-blocked // threads to wake up now. // Lock-free test here should be ok, we're just setting a flag. if ( blackhole_queue != END_TSO_QUEUE ) { blackholes_need_checking = rtsTrue; } // And save the current errno in this thread. // XXX: possibly bogus for SMP because this thread might already // be running again, see code below. t->saved_errno = errno; #if mingw32_HOST_OS // Similarly for Windows error code t->saved_winerror = GetLastError(); #endif traceEventStopThread(cap, t, ret); #if defined(THREADED_RTS) // If ret is ThreadBlocked, and this Task is bound to the TSO that // blocked, we are in limbo - the TSO is now owned by whatever it // is blocked on, and may in fact already have been woken up, // perhaps even on a different Capability. It may be the case // that task->cap != cap. We better yield this Capability // immediately and return to normaility. if (ret == ThreadBlocked) { force_yield = rtsTrue; goto yield; } #endif ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task); ASSERT(t->cap == cap); // ---------------------------------------------------------------------- // Costs for the scheduler are assigned to CCS_SYSTEM stopHeapProfTimer(); #if defined(PROFILING) CCCS = CCS_SYSTEM; #endif schedulePostRunThread(cap,t); if (ret != StackOverflow) { t = threadStackUnderflow(cap,task,t); } ready_to_gc = rtsFalse; switch (ret) { case HeapOverflow: ready_to_gc = scheduleHandleHeapOverflow(cap,t); break; case StackOverflow: scheduleHandleStackOverflow(cap,task,t); break; case ThreadYielding: if (scheduleHandleYield(cap, t, prev_what_next)) { // shortcut for switching between compiler/interpreter: goto run_thread; } break; case ThreadBlocked: scheduleHandleThreadBlocked(t); break; case ThreadFinished: if (scheduleHandleThreadFinished(cap, task, t)) return cap; ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task); break; default: barf("schedule: invalid thread return code %d", (int)ret); } if (ready_to_gc || scheduleNeedHeapProfile(ready_to_gc)) { cap = scheduleDoGC(cap,task,rtsFalse); } } /* end of while() */ #if defined(PARALLEL_RTS) /* the parallel system might end up here (non-main PEs) */ IF_PAR_DEBUG(verbose, debugBelch("== Leaving schedule() after having received Finish\n")); return cap; #endif } /* ---------------------------------------------------------------------------- * Setting up the scheduler loop * ------------------------------------------------------------------------- */ static void schedulePreLoop(void) { // initialisation for scheduler - what cannot go into initScheduler() } /* ----------------------------------------------------------------------------- * scheduleFindWork() * * Search for work to do, and handle messages from elsewhere. * -------------------------------------------------------------------------- */ static void scheduleFindWork (Capability *cap, Task *task) { scheduleStartSignalHandlers(cap); // Only check the black holes here if we've nothing else to do. // During normal execution, the black hole list only gets checked // at GC time, to avoid repeatedly traversing this possibly long // list each time around the scheduler. if (emptyRunQueue(cap)) {scheduleCheckBlackHoles(cap); } #if defined(PARALLEL_RTS) IF_DEBUG(sanity, checkBHQ(blackhole_queue, rtsFalse)); IF_DEBUG(sanity, checkBFQ(cap)); /* In case of an empty run queue, this function must: * a) send all buffered messages away (PendingFetches) (GUM only) * b) look for local work, trying to activate a spark (GUM/SMP only) * c) look for remote work, sending a PP_FISH message (GUM only) * d) process any arrived messages (GUM/Eden) * * If the run queue is not empty, scheduler only calls * processMessages if there *are* any pending messages. */ // (a) checking BHs might have filled PendingFetches; send them now scheduleSendPendingMessages(); // buffered messages... #endif scheduleCheckWakeupThreads(cap); scheduleCheckBlockedThreads(cap); #if defined (PARALLEL_RTS) // no work? // (b) search for LOCAL work in spark pool if (emptyRunQueue(cap)) { if (scheduleActivateSpark(cap, task)) { return;} } #endif #if defined (THREADED_RTS) // no work? // (b) search for LOCAL work in spark pool if (emptyRunQueue(cap)) { scheduleActivateSpark(cap); } #endif #if defined(PARALLEL_RTS) // still no work? send a FISH // (c) search for REMOTE work by sending a FISH if (emptyRunQueue(cap)) {scheduleGetRemoteWork(cap); } // finally, check for new messages; could be lifted into scheduler // (d) process any arrived messages if (emptyRunQueue(cap) || MP_probe() ) { // TRY: use && here, but lose response time // nothing to do or messages available for us // TODO: this should be a blocking receive, but this needs // explicit checks whether the fish delay has expired; // this is currently only done in scheduleGetRemoteWork //if (next_fish_to_send_at==0) { processMessages(cap); // } // this call will set sched_state for termination as well } #endif } #if defined(PARALLEL_RTS) /* ------------------------------------------------------------------------- * processMessages() * * receive messages from other machines, process them. * ------------------------------------------------------------------------- * processMessages * called from scheduleFindWork when there *are* messages * or when there is nothing to do (the call is BLOCKING) * * For message codes, see PEOpCodes.h * At this level, there are basically 3 message types: * System messages: PP_FINISH, (PP_READY, PP_PETIDS not here) * Control messages: PP_RFORK, PP_TERMINATE * Data messages: PP_DATA, PP_HEAD, PP_CONSTR, PP_CONNECT * processMessages receives them and executes the required action. * * This function used to live inside HLComms.c, but has now moved to * the scheduler to bring Capabilities and such into scope. * The former HLComms.c is now DataComm.c and contains processing * and sending methods for data messages. */ // static data... static rtsPackBuffer *recvBuffer = NULL; static void initRecvBuffer(void); // done on demand in processMessages static void initRecvBuffer(void) { if (recvBuffer == NULL) { recvBuffer = (rtsPackBuffer*) stgMallocBytes(sizeof(StgWord)*DATASPACEWORDS, "recvBuffer"); } } // called at shutdown, declared in Parallel.h void freeRecvBuffer(void) { if (recvBuffer != NULL) { stgFree(recvBuffer); } } static void processMessages(Capability *cap) { OpCode opcode; nat pe; Port sender, receiver; StgTSO* tso; // for terminate messages rtsBool eventEmitted = rtsFalse; /* this makes it non-blocking; deprecated; check at call-site instead!*/ if (!MP_probe()) { return ; } initRecvBuffer(); IF_PAR_DEBUG(verbose, debugBelch("__ processMessages: run_queue has %lu elems, blackhole_queue has %lu elems; blockedFetches has %lu elems\n", run_queue_len(cap), queue_len(blackhole_queue), GALA_list_len(blockedFetches))); IF_PAR_DEBUG(verbose, { debugBelch("\nBHQ: "); print_queue(blackhole_queue); debugBelch("\nBFQ: "); print_BF_queue(cap);}); do { // using raw MP interface... And we unpack at most the maximum // size (MPSystem will determine and unpack the real size). MP_recv(sizeof(StgWord)*DATASPACEWORDS, (long*) recvBuffer, &opcode, &pe); ASSERT(LOOKS_LIKE_PE(pe)); ASSERT(ISOPCODE(opcode)); // if ( (!eventEmitted) && EDENTRACE) { // edentrace: start communication event // eventEmitted = rtsTrue; // } IF_PAR_DEBUG(verbose, debugBelch("Received %s (Code %x) from %d\n", getOpName(opcode),opcode,pe)); switch (opcode) { /* system messages (one valid) */ case PP_FINISH: IF_PAR_DEBUG(verbose, debugBelch("== received FINISH from [%d]\n", pe)); if (IAmMainThread) { /* One of the child PEs has stopped (internal error). We could * inform the other children and go on, but the system is * unstable in case of global memory. We abort execution. */ } else { // not IAmMainThread ASSERT(pe == 1); // only the main PE (with logical No.1) may // send a FINISH to children. } // this will stop the main scheduling loop, makes all threads // join and shut down the entire instance. sched_state = SCHED_INTERRUPTING; break; case PP_NEWPE: case PP_READY: case PP_PETIDS: barf("MP-System message %x found on scheduler level", opcode); /* When a new PE joins then potentially FISH & REVAL message may reach PES before they are notified of the new PEs existence. The only solution is to bounce/fail these messages back to the sender. But we will worry about it once we start seeing these race conditions! Currently, we assume a closed system. TODO: do something about supporting an open system! */ break; /* control messages */ case PP_RFORK: { StgClosure *graph; globalAddr **gamap = NULL; nat nGAs = 0; ASSERT(isRtsPort(recvBuffer->receiver) && recvBuffer->receiver.machine == thisPE); // edentrace: emit an event receiveMsg(recvBuffer->sender, // recvBuffer->receiver, RFORK) IF_PAR_DEBUG(verbose, debugBelch("UNSAFE call to UnpackGraph")); graph = UnpackGraph(recvBuffer, gamap, &nGAs, recvBuffer->receiver, cap); startNewProcess(cap, graph); break; } case PP_TERMINATE: // remote request to terminate a local thread // ports: sender = a remote inport, receiver = a local thread // receiver should be (thisPE, a process, a ThreadID) receiver = recvBuffer->receiver; sender = recvBuffer->sender; // checks if 1. this thread exists // 2. belongs to the process // TODO: this is currently unimplemented. what we want is to // kill the thread with thread->id == receiver.id tso = findTSOByP(receiver); if (tso == NULL) { // nothing to do break; } // checks if 3. (still) has receiver set to this sender if (equalPorts(*(MyReceiver(tso)), sender)) { // edentrace: emit event receiveMessage(recvBuffer->sender, // recvBuffer->receiver, TERMINATE) // terminate this thread (it may not catch ThreadKilled!) deleteThread(cap, tso); } else { // otherwise: nothing to do, ignore message IF_PAR_DEBUG(ports, debugBelch("WARN: Request from port (%d,%d,%d) " "to terminate thread %d (not connected).\n", (int) sender.machine, (int) sender.process, (int) sender.id, (int) tso->id)); } break; /* data messages: */ case PP_DATA: case PP_HEAD: case PP_CONSTR: // unpack ports, check 1:1 connection, // unpack data in the heap, update Blackhole. // All done inside DataComms.c processDataMsg(cap, opcode, recvBuffer); // received some data, check blocked threads blackholes_need_checking = rtsTrue; break; case PP_CONNECT: // connect receiver port to sender (checking 1:1, one connection allowed) connectInportByP(recvBuffer->receiver, recvBuffer->sender); break; case PP_FETCH: // request for data processFetch(recvBuffer ,cap); break; case PP_RESUME: // data being delivered processResume(cap, pe,recvBuffer ); blackholes_need_checking = rtsTrue; break; case PP_ACK: // ack a successful SCHEDULE message; contains a gagamap processAck(cap , recvBuffer); blackholes_need_checking = rtsTrue; break; case PP_FISH: // searching for remote work processFish(cap,opcode,pe,recvBuffer); blackholes_need_checking = rtsTrue; break; case PP_FREE: // free some weight for a GA processFree(cap); blackholes_need_checking = rtsTrue; break; case PP_SCHEDULE: // new work processSchedule(cap, pe, recvBuffer); blackholes_need_checking = rtsTrue; break; case PP_INF: // unpackstatic(cap,recvBuffer); break; #ifdef DIST // GdH only case PP_REVAL: processReval(task); break; #endif default: /* Anything we're not prepared to deal with. */ barf("PE %d: Unexpected opcode %x from %x", thisPE, opcode, pe); } /* switch */ } while (sched_state < SCHED_INTERRUPTING && // stop shortcut MP_probe()); // While there are messages: process them // do the check if we have processed some data message (flag is set) scheduleCheckBlackHoles(cap); // edentrace: // if ( eventEmitted && EDENTRACE) { // traceEvent( FETE_END_COMM | RECORD_TRACE, 0,0); // } return; } /* processMessages */ /* startNewProcess * start a new process to evaluate data we usually received via * processMessages (called from there). * * As opposed to "forking" a thread from haskell, a new processID is * assigned and the new process not supposed to share heap data with * other existing threads. */ void startNewProcess(Capability *cap, StgClosure* graph) { StgTSO *tso; IF_PAR_DEBUG(verbose, debugBelch("Starting a new process for graph @ %p.\n", graph)); // create a thread, push graph, set to a new process, schedule the thread // besides registering a new process, this is an imitation of what // happens in forkzh tso = createIOThread(cap, RtsFlags.GcFlags.initialStkSize, graph); // create a new process table entry, set this TSO as first thread newProcess(tso); IF_DEBUG(sanity, checkTSO(tso)); IF_PAR_DEBUG(procs, printTSO(tso)); // schedule the thread (will go to the end of the run queue) scheduleThread(cap, tso); return; } #endif // PARALLEL_RTS #if defined(THREADED_RTS) STATIC_INLINE rtsBool shouldYieldCapability (Capability *cap, Task *task) { // we need to yield this capability to someone else if.. // - another thread is initiating a GC // - another Task is returning from a foreign call // - the thread at the head of the run queue cannot be run // by this Task (it is bound to another Task, or it is unbound // and this task it bound). return (waiting_for_gc || cap->returning_tasks_hd != NULL || (!emptyRunQueue(cap) && (task->tso == NULL ? cap->run_queue_hd->bound != NULL : cap->run_queue_hd->bound != task))); } // This is the single place where a Task goes to sleep. There are // two reasons it might need to sleep: // - there are no threads to run // - we need to yield this Capability to someone else // (see shouldYieldCapability()) // // Careful: the scheduler loop is quite delicate. Make sure you run // the tests in testsuite/concurrent (all ways) after modifying this, // and also check the benchmarks in nofib/parallel for regressions. static void scheduleYield (Capability **pcap, Task *task, rtsBool force_yield) { Capability *cap = *pcap; // if we have work, and we don't need to give up the Capability, continue. // // The force_yield flag is used when a bound thread blocks. This // is a particularly tricky situation: the current Task does not // own the TSO any more, since it is on some queue somewhere, and // might be woken up or manipulated by another thread at any time. // The TSO and Task might be migrated to another Capability. // Certain invariants might be in doubt, such as task->bound->cap // == cap. We have to yield the current Capability immediately, // no messing around. // if (!force_yield && !shouldYieldCapability(cap,task) && (!emptyRunQueue(cap) || !emptyWakeupQueue(cap) || blackholes_need_checking || sched_state >= SCHED_INTERRUPTING)) return; // otherwise yield (sleep), and keep yielding if necessary. do { yieldCapability(&cap,task); } while (shouldYieldCapability(cap,task)); // note there may still be no threads on the run queue at this // point, the caller has to check. *pcap = cap; return; } #endif /* ----------------------------------------------------------------------------- * schedulePushWork() * * Push work to other Capabilities if we have some. * -------------------------------------------------------------------------- */ static void schedulePushWork(Capability *cap USED_IF_THREADS, Task *task USED_IF_THREADS) { /* following code not for PARALLEL_HASKELL. I kept the call general, future GUM versions might use pushing in a distributed setup */ #if defined(THREADED_RTS) Capability *free_caps[n_capabilities], *cap0; nat i, n_free_caps; // migration can be turned off with +RTS -qg if (!RtsFlags.ParFlags.migrate) return; // Check whether we have more threads on our run queue, or sparks // in our pool, that we could hand to another Capability. if (cap->run_queue_hd == END_TSO_QUEUE) { if (sparkPoolSizeCap(cap) < 2) return; } else { if (cap->run_queue_hd->_link == END_TSO_QUEUE && sparkPoolSizeCap(cap) < 1) return; } // First grab as many free Capabilities as we can. for (i=0, n_free_caps=0; i < n_capabilities; i++) { cap0 = &capabilities[i]; if (cap != cap0 && tryGrabCapability(cap0,task)) { if (!emptyRunQueue(cap0) || cap->returning_tasks_hd != NULL) { // it already has some work, we just grabbed it at // the wrong moment. Or maybe it's deadlocked! releaseCapability(cap0); } else { free_caps[n_free_caps++] = cap0; } } } // we now have n_free_caps free capabilities stashed in // free_caps[]. Share our run queue equally with them. This is // probably the simplest thing we could do; improvements we might // want to do include: // // - giving high priority to moving relatively new threads, on // the gournds that they haven't had time to build up a // working set in the cache on this CPU/Capability. // // - giving low priority to moving long-lived threads if (n_free_caps > 0) { StgTSO *prev, *t, *next; rtsBool pushed_to_all; debugTrace(DEBUG_sched, "cap %d: %s and %d free capabilities, sharing...", cap->no, (!emptyRunQueue(cap) && cap->run_queue_hd->_link != END_TSO_QUEUE)? "excess threads on run queue":"sparks to share (>=2)", n_free_caps); i = 0; pushed_to_all = rtsFalse; if (cap->run_queue_hd != END_TSO_QUEUE) { prev = cap->run_queue_hd; t = prev->_link; prev->_link = END_TSO_QUEUE; for (; t != END_TSO_QUEUE; t = next) { next = t->_link; t->_link = END_TSO_QUEUE; if (t->what_next == ThreadRelocated || t->bound == task // don't move my bound thread || tsoLocked(t)) { // don't move a locked thread setTSOLink(cap, prev, t); prev = t; } else if (i == n_free_caps) { pushed_to_all = rtsTrue; i = 0; // keep one for us setTSOLink(cap, prev, t); prev = t; } else { debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no); appendToRunQueue(free_caps[i],t); traceEventMigrateThread (cap, t, free_caps[i]->no); if (t->bound) { t->bound->cap = free_caps[i]; } t->cap = free_caps[i]; i++; } } cap->run_queue_tl = prev; } #ifdef SPARK_PUSHING /* JB I left this code in place, it would work but is not necessary */ // If there are some free capabilities that we didn't push any // threads to, then try to push a spark to each one. if (!pushed_to_all) { StgClosure *spark; // i is the next free capability to push to for (; i < n_free_caps; i++) { if (emptySparkPoolCap(free_caps[i])) { spark = tryStealSpark(cap->sparks,1); if (spark != NULL) { debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no); traceEventStealSpark(free_caps[i], t, cap->no); newSpark(&(free_caps[i]->r), spark); } } } } #endif /* SPARK_PUSHING */ // release the capabilities for (i = 0; i < n_free_caps; i++) { task->cap = free_caps[i]; releaseAndWakeupCapability(free_caps[i]); } } task->cap = cap; // reset to point to our Capability. #endif /* THREADED_RTS */ } /* ---------------------------------------------------------------------------- * Start any pending signal handlers * ------------------------------------------------------------------------- */ #if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS) static void scheduleStartSignalHandlers(Capability *cap) { if (RtsFlags.MiscFlags.install_signal_handlers && signals_pending()) { // safe outside the lock startSignalHandlers(cap); } } #else static void scheduleStartSignalHandlers(Capability *cap STG_UNUSED) { } #endif /* ---------------------------------------------------------------------------- * Check for blocked threads that can be woken up. * ------------------------------------------------------------------------- */ static void scheduleCheckBlockedThreads(Capability *cap USED_IF_NOT_THREADS) { #if !defined(THREADED_RTS) // // Check whether any waiting threads need to be woken up. If the // run queue is empty, and there are no other tasks running, we // can wait indefinitely for something to happen. // if ( !emptyQueue(blocked_queue_hd) || !emptyQueue(sleeping_queue) ) { awaitEvent( emptyRunQueue(cap) && !blackholes_need_checking ); } #endif } /* ---------------------------------------------------------------------------- * Check for threads woken up by other Capabilities * ------------------------------------------------------------------------- */ static void scheduleCheckWakeupThreads(Capability *cap USED_IF_THREADS) { #if defined(THREADED_RTS) // Any threads that were woken up by other Capabilities get // appended to our run queue. if (!emptyWakeupQueue(cap)) { ACQUIRE_LOCK(&cap->lock); if (emptyRunQueue(cap)) { cap->run_queue_hd = cap->wakeup_queue_hd; cap->run_queue_tl = cap->wakeup_queue_tl; } else { setTSOLink(cap, cap->run_queue_tl, cap->wakeup_queue_hd); cap->run_queue_tl = cap->wakeup_queue_tl; } cap->wakeup_queue_hd = cap->wakeup_queue_tl = END_TSO_QUEUE; RELEASE_LOCK(&cap->lock); } #endif } /* ---------------------------------------------------------------------------- * Check for threads blocked on BLACKHOLEs that can be woken up * ------------------------------------------------------------------------- */ static void scheduleCheckBlackHoles (Capability *cap) { if ( blackholes_need_checking ) // check without the lock first { ACQUIRE_LOCK(&sched_mutex); if ( blackholes_need_checking ) { blackholes_need_checking = rtsFalse; // important that we reset the flag *before* checking the // blackhole queue, otherwise we could get deadlock. This // happens as follows: we wake up a thread that // immediately runs on another Capability, blocks on a // blackhole, and then we reset the blackholes_need_checking flag. checkBlackHoles(cap); #ifdef PARALLEL_RTS checkBlockedFetches(cap); #endif } RELEASE_LOCK(&sched_mutex); } } /* ---------------------------------------------------------------------------- * Detect deadlock conditions and attempt to resolve them. * ------------------------------------------------------------------------- */ static void scheduleDetectDeadlock (Capability *cap, Task *task) { #if defined(PARALLEL_RTS) return; #endif /* * Detect deadlock: when we have no threads to run, there are no * threads blocked, waiting for I/O, or sleeping, and all the * other tasks are waiting for work, we must have a deadlock of * some description. */ if ( emptyThreadQueues(cap) ) { #if defined(THREADED_RTS) /* * In the threaded RTS, we only check for deadlock if there * has been no activity in a complete timeslice. This means * we won't eagerly start a full GC just because we don't have * any threads to run currently. */ if (recent_activity != ACTIVITY_INACTIVE) return; #endif debugTrace(DEBUG_sched, "deadlocked, forcing major GC..."); // Garbage collection can release some new threads due to // either (a) finalizers or (b) threads resurrected because // they are unreachable and will therefore be sent an // exception. Any threads thus released will be immediately // runnable. cap = scheduleDoGC (cap, task, rtsTrue/*force major GC*/); // when force_major == rtsTrue. scheduleDoGC sets // recent_activity to ACTIVITY_DONE_GC and turns off the timer // signal. if ( !emptyRunQueue(cap) ) return; #if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS) /* If we have user-installed signal handlers, then wait * for signals to arrive rather then bombing out with a * deadlock. */ if ( RtsFlags.MiscFlags.install_signal_handlers && anyUserHandlers() ) { debugTrace(DEBUG_sched, "still deadlocked, waiting for signals..."); awaitUserSignals(); if (signals_pending()) { startSignalHandlers(cap); } // either we have threads to run, or we were interrupted: ASSERT(!emptyRunQueue(cap) || sched_state >= SCHED_INTERRUPTING); return; } #endif #if !defined(THREADED_RTS) /* Probably a real deadlock. Send the current main thread the * Deadlock exception. */ if (task->tso) { switch (task->tso->why_blocked) { case BlockedOnSTM: case BlockedOnBlackHole: case BlockedOnException: case BlockedOnMVar: throwToSingleThreaded(cap, task->tso, (StgClosure *)nonTermination_closure); return; default: barf("deadlock: main thread blocked in a strange way"); } } return; #endif } } /* ---------------------------------------------------------------------------- * Send pending messages (PARALLEL_HASKELL only) * ------------------------------------------------------------------------- */ #if defined(PARALLEL_RTS) static void scheduleSendPendingMessages(void) { #if 0 if (PendingFetches != END_TSO_QUEUE) { processPendingFetches(); } #warning "ToCheck: sendOldBuffers disabled in Eden-style code" fprintf(stderr,"\n\n RtsFlags.ParFlags.BufferTim = %d \n\n",RtsFlags.ParFlags.BufferTime); if (RtsFlags.ParFlags.BufferTime) { // if we use message buffering, we must send away all message // packets which have become too old... sendOldBuffers(); } #endif } #endif /* ---------------------------------------------------------------------------- * Activate spark threads (PARALLEL_HASKELL and THREADED_RTS) * ------------------------------------------------------------------------- */ #if defined(PARALLEL_RTS) static rtsBool scheduleActivateSpark(Capability *cap, Task *task) { #ifndef EAGER_THREAD_CREATION ASSERT(emptyRunQueue(cap)); #endif if (anySparks(cap) && (advisory_thread_count < RtsFlags.ParFlags.maxThreads) ) { IF_PAR_DEBUG(verbose, fprintf(stderr," createSparkThread: estim spark pool size = %d \n", sparkPoolSizeCap(cap)) ); createSparkThread(cap, task); debugTrace(DEBUG_sched, "creating a spark thread"); return rtsTrue; } return rtsFalse; } #endif // PARALLEL_HASKELL #if defined(THREADED_RTS) static void scheduleActivateSpark(Capability *cap) { #ifndef EAGER_THREAD_CREATION ASSERT(emptyRunQueue(cap)); #endif if (anySparks(cap) #if defined(PARALLEL_RTS) && (advisory_thread_count < RtsFlags.ParFlags.maxThreads) #endif ) { createSparkThread(cap); debugTrace(DEBUG_sched, "creating a spark thread"); } } #endif // THREADED_RTS /* ---------------------------------------------------------------------------- * Communicate with remote nodes (PARALLEL_HASKELL only) * * remote nodes can: * send us work: processMessages, * hold work we want: scheduleGetRemoteWork * await messages: <- fetching, not implemented -> --------------------------------------------------------------------------- */ /* scheduleGetRemoteWork: * called from scheduler if idle. * This function flushes message buffers, sends a fish message and * blocks waiting for messages. Returns rtsTrue when finishing * message was received. * ------------------------------------------------------------------------- */ #if defined(PARALLEL_RTS) static void scheduleGetRemoteWork(Capability *cap) { nat pe; rtsTime delay ; rtsBool receivedFinish = rtsFalse; #if 0 #warning "ToCheck: sendImmediately disabled in Eden-style code" if (RtsFlags.ParFlags.BufferTime) { IF_PAR_DEBUG(verbose, debugBelch("...send all pending data,")); { nat i; for (i=1; i <= nPEs; i++) sendImmediately(i); // send all messages away immediately } } #endif /* We get here, if we have no work, tried to activate a local spark, but still have no work. We try to get a remote spark, by sending a FISH message. This should only be activated if we run an Eden-style RTS. Thread migration should be added here, and triggered when a sequence of fishes returns without work. */ delay = (rtsTime) RtsFlags.ParFlags.fishDelay; // delay = (RtsFlags.ParFlags.fishDelay!=0ll ? RtsFlags.ParFlags.fishDelay : 0ll); // delay = (PARFLAGS_FISHDELAY!=011 ? PARFLAGS_FISHDELAY : 011); /* * We really have absolutely no work. Send out a fish * (there may be some out there already), and wait for * something to arrive. We clearly can't run any threads * until a SCHEDULE or RESUME arrives, and so that's what * we're hoping to see. (Of course, we still have to * respond to other types of messages.) */ rtsTime now = msTime() /*CURRENT_TIME*/; IF_PAR_DEBUG(fish, // verbose, debugBelch("-- scheduleGetRemoteWork: now=%llu; outstandingFishes=%u; advisory_thread_count=%u\n", (ullong) now, (nat)outstandingFishes, (nat)advisory_thread_count)); if (outstandingFishes < RtsFlags.ParFlags.maxFishes && advisory_thread_count < RtsFlags.ParFlags.maxThreads) { // send a FISH, but when? if (((last_fish_arrived_at == 0 ) || (last_fish_arrived_at+delay <= now))/* && thisPE != 1 */) { // send FISH now! /* outstandingFishes is set in sendFish, processFish; avoid flooding system with fishes via delay */ // if (thisPE==1) { // with this code, PE 1 will NEVER send a FISH!!! -- HWL if (thisPE==1 && last_fish_arrived_at+delay*RtsFlags.ParFlags.fishDelayFactor > now) { next_fish_to_send_at = last_fish_arrived_at+delay*RtsFlags.ParFlags.fishDelayFactor; IF_PAR_DEBUG(fish, debugBelch("-- First fish from PE 1 delayed by a factor of %u; delay = %u; next fish to send at =%llu\n", RtsFlags.ParFlags.fishDelayFactor, ((nat)delay)*RtsFlags.ParFlags.fishDelayFactor, (ullong)next_fish_to_send_at)); return ; } else { // all other PEs next_fish_to_send_at = 0; // i.e. send fish now } } else { /* ToDo: this should be done in the main scheduling loop to avoid the busy wait here; not so bad if fish delay is very small */ // int iq = 0; // DEBUGGING -- HWL next_fish_to_send_at = last_fish_arrived_at+delay; // remember when to send #ifdef DEBUG IF_PAR_DEBUG(fish, // verbose, if (outstandingFishes < RtsFlags.ParFlags.maxFishes && (last_fish_arrived_at!=0 && last_fish_arrived_at+delay > now)) { debugBelch("--$$ <%llu> delaying FISH by %u until %llu (last fish %llu, delay %llu)\n", (nat)delay, (ullong) now, (ullong) last_fish_arrived_at+delay, (ullong) last_fish_arrived_at, (ullong) delay); }); #endif return; /* send a fish when ready, but process messages that arrive in the meantime */ } // JB: IMHO, this should all be hidden inside sendFish(...) pe = choosePE(); IF_PAR_DEBUG(fish, // verbose, debugBelch("--$$ <%llu> sending fish to %u; active/total threads=%d/%d\n", now,pe,run_queue_len(cap),advisory_thread_count)); sendFish(pe, thisPE, NEW_FISH_AGE, NEW_FISH_HISTORY, RtsFlags.ParFlags.fishHunger); // NEW_FISH_HUNGER); // add spark event to .gr profile /*if (RtsFlags.ParFlags.ParStats.Sparks) DumpRawGranEvent(thisPE, pe, SP_REQUEST, ((StgTSO *)NULL), (StgClosure *)NULL, 0, spark_queue_len(cap));*/ #if defined(PAR_TICKY) // Global statistics: count no. of fishes if (RtsFlags.ParFlags.ParStats.Global && RtsFlags.GcFlags.giveStats > NO_GC_STATS) { globalParStats.tot_fish_mess++; } #endif /* delayed fishes must have been sent by now! */ // TODO: check whether this var is still needed next_fish_to_send_at = 0; } //processMessages(cap); // blocking receive... } #endif /* PARALLEL_RTS */ /* ---------------------------------------------------------------------------- * After running a thread... * ------------------------------------------------------------------------- */ static void schedulePostRunThread (Capability *cap, StgTSO *t) { // We have to be able to catch transactions that are in an // infinite loop as a result of seeing an inconsistent view of // memory, e.g. // // atomically $ do // [a,b] <- mapM readTVar [ta,tb] // when (a == b) loop // // and a is never equal to b given a consistent view of memory. // if (t -> trec != NO_TREC && t -> why_blocked == NotBlocked) { if (!stmValidateNestOfTransactions (t -> trec)) { debugTrace(DEBUG_sched | DEBUG_stm, "trec %p found wasting its time", t); // strip the stack back to the // ATOMICALLY_FRAME, aborting the (nested) // transaction, and saving the stack of any // partially-evaluated thunks on the heap. throwToSingleThreaded_(cap, t, NULL, rtsTrue); // ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME); } } /* some statistics gathering in the parallel case */ } /* ----------------------------------------------------------------------------- * Handle a thread that returned to the scheduler with ThreadHeepOverflow * -------------------------------------------------------------------------- */ static rtsBool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t ) { // did the task ask for a large block? if (cap->r.rHpAlloc > BLOCK_SIZE) { // if so, get one and push it on the front of the nursery. bdescr *bd; lnat blocks; blocks = (lnat)BLOCK_ROUND_UP(cap->r.rHpAlloc) / BLOCK_SIZE; debugTrace(DEBUG_sched, "--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n", (long)t->id, what_next_strs[t->what_next], blocks); // don't do this if the nursery is (nearly) full, we'll GC first. if (cap->r.rCurrentNursery->link != NULL || cap->r.rNursery->n_blocks == 1) { // paranoia to prevent infinite loop // if the nursery has only one block. ACQUIRE_SM_LOCK bd = allocGroup( blocks ); RELEASE_SM_LOCK cap->r.rNursery->n_blocks += blocks; // link the new group into the list bd->link = cap->r.rCurrentNursery; bd->u.back = cap->r.rCurrentNursery->u.back; if (cap->r.rCurrentNursery->u.back != NULL) { cap->r.rCurrentNursery->u.back->link = bd; } else { cap->r.rNursery->blocks = bd; } cap->r.rCurrentNursery->u.back = bd; // initialise it as a nursery block. We initialise the // step, gen_no, and flags field of *every* sub-block in // this large block, because this is easier than making // sure that we always find the block head of a large // block whenever we call Bdescr() (eg. evacuate() and // isAlive() in the GC would both have to do this, at // least). { bdescr *x; for (x = bd; x < bd + blocks; x++) { initBdescr(x,g0,g0); x->free = x->start; x->flags = 0; } } // This assert can be a killer if the app is doing lots // of large block allocations. IF_DEBUG(sanity, checkNurserySanity(cap->r.rNursery)); // now update the nursery to point to the new block cap->r.rCurrentNursery = bd; // we might be unlucky and have another thread get on the // run queue before us and steal the large block, but in that // case the thread will just end up requesting another large // block. pushOnRunQueue(cap,t); return rtsFalse; /* not actually GC'ing */ } } if (cap->r.rHpLim == NULL || cap->context_switch) { // Sometimes we miss a context switch, e.g. when calling // primitives in a tight loop, MAYBE_GC() doesn't check the // context switch flag, and we end up waiting for a GC. // See #1984, and concurrent/should_run/1984 cap->context_switch = 0; addToRunQueue(cap,t); } else { pushOnRunQueue(cap,t); } return rtsTrue; /* actual GC is done at the end of the while loop in schedule() */ } /* ----------------------------------------------------------------------------- * Handle a thread that returned to the scheduler with ThreadStackOverflow * -------------------------------------------------------------------------- */ static void scheduleHandleStackOverflow (Capability *cap, Task *task, StgTSO *t) { /* just adjust the stack for this thread, then pop it back * on the run queue. */ { /* enlarge the stack */ StgTSO *new_t = threadStackOverflow(cap, t); /* The TSO attached to this Task may have moved, so update the * pointer to it. */ if (task->tso == t) { task->tso = new_t; } pushOnRunQueue(cap,new_t); } } /* ----------------------------------------------------------------------------- * Handle a thread that returned to the scheduler with ThreadYielding * -------------------------------------------------------------------------- */ static rtsBool scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next ) { // Reset the context switch flag. We don't do this just before // running the thread, because that would mean we would lose ticks // during GC, which can lead to unfair scheduling (a thread hogs // the CPU because the tick always arrives during GC). This way // penalises threads that do a lot of allocation, but that seems // better than the alternative. cap->context_switch = 0; /* put the thread back on the run queue. Then, if we're ready to * GC, check whether this is the last task to stop. If so, wake * up the GC thread. getThread will block during a GC until the * GC is finished. */ #ifdef DEBUG if (t->what_next != prev_what_next) { debugTrace(DEBUG_sched, "--<< thread %ld (%s) stopped to switch evaluators", (long)t->id, what_next_strs[t->what_next]); } #endif ASSERT(t->_link == END_TSO_QUEUE); // Shortcut if we're just switching evaluators: don't bother // doing stack squeezing (which can be expensive), just run the // thread. if (t->what_next != prev_what_next) { return rtsTrue; } IF_DEBUG(sanity, //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id); checkTSO(t)); addToRunQueue(cap,t); return rtsFalse; } /* ----------------------------------------------------------------------------- * Handle a thread that returned to the scheduler with ThreadBlocked * -------------------------------------------------------------------------- */ static void scheduleHandleThreadBlocked( StgTSO *t #if !defined(DEBUG) STG_UNUSED #endif ) { // We don't need to do anything. The thread is blocked, and it // has tidied up its stack and placed itself on whatever queue // it needs to be on. // ASSERT(t->why_blocked != NotBlocked); // Not true: for example, // - in THREADED_RTS, the thread may already have been woken // up by another Capability. This actually happens: try // conc023 +RTS -N2. // - the thread may have woken itself up already, because // threadPaused() might have raised a blocked throwTo // exception, see maybePerformBlockedException(). #ifdef DEBUG traceThreadStatus(DEBUG_sched, t); #endif } /* ----------------------------------------------------------------------------- * Handle a thread that returned to the scheduler with ThreadFinished * -------------------------------------------------------------------------- */ static rtsBool scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t) { /* Need to check whether this was a main thread, and if so, * return with the return value. * * We also end up here if the thread kills itself with an * uncaught exception, see Exception.cmm. */ debugTrace(DEBUG_sched, "--++ thread %lu finished", (unsigned long)t->id); // , whatNext_strs[t->what_next]); IF_PAR_DEBUG(verbose, debugBelch("--++ thread %lu finished\n", (unsigned long)t->id)); // blocked exceptions can now complete, even if the thread was in // blocked mode (see #2910). This unconditionally calls // lockTSO(), which ensures that we don't miss any threads that // are engaged in throwTo() with this thread as a target. awakenBlockedExceptionQueue (cap, t); #if defined(PARALLEL_RTS) // Remove thread from its process: completed or killed // threads are left on the run_queue in order for the // garbage collection to find and evacuate them. removeTSO(t->id); #endif // // Check whether the thread that just completed was a bound // thread, and if so return with the result. // // There is an assumption here that all thread completion goes // through this point; we need to make sure that if a thread // ends up in the ThreadKilled state, that it stays on the run // queue so it can be dealt with here. // if (t->bound) { if (t->bound != task) { #if !defined(THREADED_RTS) // Must be a bound thread that is not the topmost one. Leave // it on the run queue until the stack has unwound to the // point where we can deal with this. Leaving it on the run // queue also ensures that the garbage collector knows about // this thread and its return value (it gets dropped from the // step->threads list so there's no other way to find it). appendToRunQueue(cap,t); return rtsFalse; #else // this cannot happen in the threaded RTS, because a // bound thread can only be run by the appropriate Task. barf("finished bound thread that isn't mine"); #endif } #if !defined(PARALLEL_RTS) // TODO: use a different assertion for parallel code here ASSERT(task->tso == t); #endif if (t->what_next == ThreadComplete) { if (task->ret) { // NOTE: return val is tso->sp[1] (see StgStartup.hc) *(task->ret) = (StgClosure *)task->tso->sp[1]; } task->stat = Success; } else { if (task->ret) { *(task->ret) = NULL; } if (sched_state >= SCHED_INTERRUPTING) { if (heap_overflow) { task->stat = HeapExhausted; } else { task->stat = Interrupted; } } else { task->stat = Killed; } } #ifdef DEBUG removeThreadLabel((StgWord)task->tso->id); #endif return rtsTrue; // tells schedule() to return } return rtsFalse; } /* ----------------------------------------------------------------------------- * Perform a heap census * -------------------------------------------------------------------------- */ static rtsBool scheduleNeedHeapProfile( rtsBool ready_to_gc STG_UNUSED ) { // When we have +RTS -i0 and we're heap profiling, do a census at // every GC. This lets us get repeatable runs for debugging. if (performHeapProfile || (RtsFlags.ProfFlags.profileInterval==0 && RtsFlags.ProfFlags.doHeapProfile && ready_to_gc)) { return rtsTrue; } else { return rtsFalse; } } /* ----------------------------------------------------------------------------- * Perform a garbage collection if necessary * -------------------------------------------------------------------------- */ static Capability * scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major) { rtsBool heap_census; #ifdef THREADED_RTS /* extern static volatile StgWord waiting_for_gc; lives inside capability.c */ rtsBool gc_type, prev_pending_gc; nat i; #endif if (sched_state == SCHED_SHUTTING_DOWN) { // The final GC has already been done, and the system is // shutting down. We'll probably deadlock if we try to GC // now. return cap; } #ifdef THREADED_RTS if (sched_state < SCHED_INTERRUPTING && RtsFlags.ParFlags.parGcEnabled && N >= RtsFlags.ParFlags.parGcGen && ! oldest_gen->mark) { gc_type = PENDING_GC_PAR; } else { gc_type = PENDING_GC_SEQ; } // In order to GC, there must be no threads running Haskell code. // Therefore, the GC thread needs to hold *all* the capabilities, // and release them after the GC has completed. // // This seems to be the simplest way: previous attempts involved // making all the threads with capabilities give up their // capabilities and sleep except for the *last* one, which // actually did the GC. But it's quite hard to arrange for all // the other tasks to sleep and stay asleep. // /* Other capabilities are prevented from running yet more Haskell threads if waiting_for_gc is set. Tested inside yieldCapability() and releaseCapability() in Capability.c */ prev_pending_gc = cas(&waiting_for_gc, 0, gc_type); if (prev_pending_gc) { do { debugTrace(DEBUG_sched, "someone else is trying to GC (%d)...", prev_pending_gc); ASSERT(cap); yieldCapability(&cap,task); } while (waiting_for_gc); return cap; // NOTE: task->cap might have changed here } setContextSwitches(); // The final shutdown GC is always single-threaded, because it's // possible that some of the Capabilities have no worker threads. if (gc_type == PENDING_GC_SEQ) { traceEventRequestSeqGc(cap); } else { traceEventRequestParGc(cap); debugTrace(DEBUG_sched, "ready_to_gc, grabbing GC threads"); } // do this while the other Capabilities stop: if (cap) scheduleCheckBlackHoles(cap); if (gc_type == PENDING_GC_SEQ) { // single-threaded GC: grab all the capabilities for (i=0; i < n_capabilities; i++) { debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities); if (cap != &capabilities[i]) { Capability *pcap = &capabilities[i]; // we better hope this task doesn't get migrated to // another Capability while we're waiting for this one. // It won't, because load balancing happens while we have // all the Capabilities, but even so it's a slightly // unsavoury invariant. task->cap = pcap; waitForReturnCapability(&pcap, task); if (pcap != &capabilities[i]) { barf("scheduleDoGC: got the wrong capability"); } } } } else { // multi-threaded GC: make sure all the Capabilities donate one // GC thread each. waitForGcThreads(cap); } #else /* !THREADED_RTS */ // do this while the other Capabilities stop: if (cap) scheduleCheckBlackHoles(cap); #endif IF_DEBUG(scheduler, printAllThreads()); delete_threads_and_gc: /* * We now have all the capabilities; if we're in an interrupting * state, then we should take the opportunity to delete all the * threads in the system. */ if (sched_state == SCHED_INTERRUPTING) { deleteAllThreads(cap); sched_state = SCHED_SHUTTING_DOWN; } heap_census = scheduleNeedHeapProfile(rtsTrue); traceEventGcStart(cap); #if defined(THREADED_RTS) // reset waiting_for_gc *before* GC, so that when the GC threads // emerge they don't immediately re-enter the GC. waiting_for_gc = 0; GarbageCollect(force_major || heap_census, gc_type, cap); #else GarbageCollect(force_major || heap_census, 0, cap); #endif traceEventGcEnd(cap); if (recent_activity == ACTIVITY_INACTIVE && force_major) { // We are doing a GC because the system has been idle for a // timeslice and we need to check for deadlock. Record the // fact that we've done a GC and turn off the timer signal; // it will get re-enabled if we run any threads after the GC. recent_activity = ACTIVITY_DONE_GC; stopTimer(); } else { // the GC might have taken long enough for the timer to set // recent_activity = ACTIVITY_INACTIVE, but we aren't // necessarily deadlocked: recent_activity = ACTIVITY_YES; } #if defined(THREADED_RTS) if (gc_type == PENDING_GC_PAR) { releaseGCThreads(cap); } #endif if (heap_census) { debugTrace(DEBUG_sched, "performing heap census"); heapCensus(); performHeapProfile = rtsFalse; } if (heap_overflow && sched_state < SCHED_INTERRUPTING) { // GC set the heap_overflow flag, so we should proceed with // an orderly shutdown now. Ultimately we want the main // thread to return to its caller with HeapExhausted, at which // point the caller should call hs_exit(). The first step is // to delete all the threads. // // Another way to do this would be to raise an exception in // the main thread, which we really should do because it gives // the program a chance to clean up. But how do we find the // main thread? It should presumably be the same one that // gets ^C exceptions, but that's all done on the Haskell side // (GHC.TopHandler). sched_state = SCHED_INTERRUPTING; goto delete_threads_and_gc; } #ifdef SPARKBALANCE /* JB Once we are all together... this would be the place to balance all spark pools. No concurrent stealing or adding of new sparks can occur. Should be defined in Sparks.c. */ balanceSparkPoolsCaps(n_capabilities, capabilities); #endif #if defined(THREADED_RTS) if (gc_type == PENDING_GC_SEQ) { // release our stash of capabilities. for (i = 0; i < n_capabilities; i++) { if (cap != &capabilities[i]) { task->cap = &capabilities[i]; releaseCapability(&capabilities[i]); } } } if (cap) { task->cap = cap; } else { task->cap = NULL; } #endif #if defined(PARALLEL_RTS) // NB: rebuildLAGAtable in now called from GC.c:752 #endif return cap; } /* --------------------------------------------------------------------------- * Singleton fork(). Do not copy any running threads. * ------------------------------------------------------------------------- */ pid_t forkProcess(HsStablePtr *entry #ifndef FORKPROCESS_PRIMOP_SUPPORTED STG_UNUSED #endif ) { #ifdef FORKPROCESS_PRIMOP_SUPPORTED Task *task; pid_t pid; StgTSO* t,*next; Capability *cap; nat g; #if defined(THREADED_RTS) if (RtsFlags.ParFlags.nNodes > 1) { errorBelch("forking not supported with +RTS -N greater than 1"); stg_exit(EXIT_FAILURE); } #endif debugTrace(DEBUG_sched, "forking!"); // ToDo: for SMP, we should probably acquire *all* the capabilities cap = rts_lock(); // no funny business: hold locks while we fork, otherwise if some // other thread is holding a lock when the fork happens, the data // structure protected by the lock will forever be in an // inconsistent state in the child. See also #1391. ACQUIRE_LOCK(&sched_mutex); ACQUIRE_LOCK(&cap->lock); ACQUIRE_LOCK(&cap->running_task->lock); pid = fork(); if (pid) { // parent RELEASE_LOCK(&sched_mutex); RELEASE_LOCK(&cap->lock); RELEASE_LOCK(&cap->running_task->lock); // just return the pid rts_unlock(cap); return pid; } else { // child #if defined(THREADED_RTS) initMutex(&sched_mutex); initMutex(&cap->lock); initMutex(&cap->running_task->lock); #endif // Now, all OS threads except the thread that forked are // stopped. We need to stop all Haskell threads, including // those involved in foreign calls. Also we need to delete // all Tasks, because they correspond to OS threads that are // now gone. for (g = 0; g < RtsFlags.GcFlags.generations; g++) { for (t = generations[g].threads; t != END_TSO_QUEUE; t = next) { if (t->what_next == ThreadRelocated) { next = t->_link; } else { next = t->global_link; // don't allow threads to catch the ThreadKilled // exception, but we do want to raiseAsync() because these // threads may be evaluating thunks that we need later. deleteThread_(cap,t); } } } // Empty the run queue. It seems tempting to let all the // killed threads stay on the run queue as zombies to be // cleaned up later, but some of them correspond to bound // threads for which the corresponding Task does not exist. cap->run_queue_hd = END_TSO_QUEUE; cap->run_queue_tl = END_TSO_QUEUE; // Any suspended C-calling Tasks are no more, their OS threads // don't exist now: cap->suspended_ccalling_tasks = NULL; // Empty the threads lists. Otherwise, the garbage // collector may attempt to resurrect some of these threads. for (g = 0; g < RtsFlags.GcFlags.generations; g++) { generations[g].threads = END_TSO_QUEUE; } // Wipe the task list, except the current Task. ACQUIRE_LOCK(&sched_mutex); for (task = all_tasks; task != NULL; task=task->all_link) { if (task != cap->running_task) { #if defined(THREADED_RTS) initMutex(&task->lock); // see #1391 #endif discardTask(task); } } RELEASE_LOCK(&sched_mutex); #if defined(THREADED_RTS) // Wipe our spare workers list, they no longer exist. New // workers will be created if necessary. cap->spare_workers = NULL; cap->returning_tasks_hd = NULL; cap->returning_tasks_tl = NULL; #endif // On Unix, all timers are reset in the child, so we need to start // the timer again. initTimer(); startTimer(); #if defined(THREADED_RTS) cap = ioManagerStartCap(cap); #endif cap = rts_evalStableIO(cap, entry, NULL); // run the action rts_checkSchedStatus("forkProcess",cap); rts_unlock(cap); hs_exit(); // clean up and exit stg_exit(EXIT_SUCCESS); } #else /* !FORKPROCESS_PRIMOP_SUPPORTED */ barf("forkProcess#: primop not supported on this platform, sorry!\n"); #endif } /* --------------------------------------------------------------------------- * Delete all the threads in the system * ------------------------------------------------------------------------- */ static void deleteAllThreads ( Capability *cap ) { // NOTE: only safe to call if we own all capabilities. StgTSO* t, *next; nat g; debugTrace(DEBUG_sched,"deleting all threads"); for (g = 0; g < RtsFlags.GcFlags.generations; g++) { for (t = generations[g].threads; t != END_TSO_QUEUE; t = next) { if (t->what_next == ThreadRelocated) { next = t->_link; } else { next = t->global_link; deleteThread(cap,t); } } } // The run queue now contains a bunch of ThreadKilled threads. We // must not throw these away: the main thread(s) will be in there // somewhere, and the main scheduler loop has to deal with it. // Also, the run queue is the only thing keeping these threads from // being GC'd, and we don't want the "main thread has been GC'd" panic. #if !defined(THREADED_RTS) ASSERT(blocked_queue_hd == END_TSO_QUEUE); ASSERT(sleeping_queue == END_TSO_QUEUE); #endif } /* ----------------------------------------------------------------------------- Managing the suspended_ccalling_tasks list. Locks required: sched_mutex -------------------------------------------------------------------------- */ STATIC_INLINE void suspendTask (Capability *cap, Task *task) { ASSERT(task->next == NULL && task->prev == NULL); task->next = cap->suspended_ccalling_tasks; task->prev = NULL; if (cap->suspended_ccalling_tasks) { cap->suspended_ccalling_tasks->prev = task; } cap->suspended_ccalling_tasks = task; } STATIC_INLINE void recoverSuspendedTask (Capability *cap, Task *task) { if (task->prev) { task->prev->next = task->next; } else { ASSERT(cap->suspended_ccalling_tasks == task); cap->suspended_ccalling_tasks = task->next; } if (task->next) { task->next->prev = task->prev; } task->next = task->prev = NULL; } /* --------------------------------------------------------------------------- * Suspending & resuming Haskell threads. * * When making a "safe" call to C (aka _ccall_GC), the task gives back * its capability before calling the C function. This allows another * task to pick up the capability and carry on running Haskell * threads. It also means that if the C call blocks, it won't lock * the whole system. * * The Haskell thread making the C call is put to sleep for the * duration of the call, on the susepended_ccalling_threads queue. We * give out a token to the task, which it can use to resume the thread * on return from the C function. * ------------------------------------------------------------------------- */ void * suspendThread (StgRegTable *reg) { Capability *cap; int saved_errno; StgTSO *tso; Task *task; #if mingw32_HOST_OS StgWord32 saved_winerror; #endif saved_errno = errno; #if mingw32_HOST_OS saved_winerror = GetLastError(); #endif /* assume that *reg is a pointer to the StgRegTable part of a Capability. */ cap = regTableToCapability(reg); task = cap->running_task; tso = cap->r.rCurrentTSO; traceEventStopThread(cap, tso, THREAD_SUSPENDED_FOREIGN_CALL); // XXX this might not be necessary --SDM tso->what_next = ThreadRunGHC; threadPaused(cap,tso); if ((tso->flags & TSO_BLOCKEX) == 0) { tso->why_blocked = BlockedOnCCall; tso->flags |= TSO_BLOCKEX; tso->flags &= ~TSO_INTERRUPTIBLE; } else { tso->why_blocked = BlockedOnCCall_NoUnblockExc; } // Hand back capability task->suspended_tso = tso; ACQUIRE_LOCK(&cap->lock); suspendTask(cap,task); cap->in_haskell = rtsFalse; releaseCapability_(cap,rtsFalse); RELEASE_LOCK(&cap->lock); errno = saved_errno; #if mingw32_HOST_OS SetLastError(saved_winerror); #endif return task; } StgRegTable * resumeThread (void *task_) { StgTSO *tso; Capability *cap; Task *task = task_; int saved_errno; #if mingw32_HOST_OS StgWord32 saved_winerror; #endif saved_errno = errno; #if mingw32_HOST_OS saved_winerror = GetLastError(); #endif cap = task->cap; // Wait for permission to re-enter the RTS with the result. waitForReturnCapability(&cap,task); // we might be on a different capability now... but if so, our // entry on the suspended_ccalling_tasks list will also have been // migrated. // Remove the thread from the suspended list recoverSuspendedTask(cap,task); tso = task->suspended_tso; task->suspended_tso = NULL; tso->_link = END_TSO_QUEUE; // no write barrier reqd traceEventRunThread(cap, tso); if (tso->why_blocked == BlockedOnCCall) { // avoid locking the TSO if we don't have to if (tso->blocked_exceptions != END_TSO_QUEUE) { awakenBlockedExceptionQueue(cap,tso); } tso->flags &= ~(TSO_BLOCKEX | TSO_INTERRUPTIBLE); } /* Reset blocking status */ tso->why_blocked = NotBlocked; cap->r.rCurrentTSO = tso; cap->in_haskell = rtsTrue; errno = saved_errno; #if mingw32_HOST_OS SetLastError(saved_winerror); #endif /* We might have GC'd, mark the TSO dirty again */ dirty_TSO(cap,tso); IF_DEBUG(sanity, checkTSO(tso)); return &cap->r; } /* --------------------------------------------------------------------------- * scheduleThread() * * scheduleThread puts a thread on the end of the runnable queue. * This will usually be done immediately after a thread is created. * The caller of scheduleThread must create the thread using e.g. * createThread and push an appropriate closure * on this thread's stack before the scheduler is invoked. * ------------------------------------------------------------------------ */ void scheduleThread(Capability *cap, StgTSO *tso) { // The thread goes at the *end* of the run-queue, to avoid possible // starvation of any threads already on the queue. appendToRunQueue(cap,tso); } void scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso) { #if defined(THREADED_RTS) tso->flags |= TSO_LOCKED; // we requested explicit affinity; don't // move this thread from now on. cpu %= RtsFlags.ParFlags.nNodes; if (cpu == cap->no) { appendToRunQueue(cap,tso); } else { traceEventMigrateThread (cap, tso, capabilities[cpu].no); wakeupThreadOnCapability(cap, &capabilities[cpu], tso); } #else appendToRunQueue(cap,tso); #endif } Capability * scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap) { Task *task; // We already created/initialised the Task task = cap->running_task; // This TSO is now a bound thread; make the Task and TSO // point to each other. tso->bound = task; tso->cap = cap; task->tso = tso; task->ret = ret; task->stat = NoStatus; appendToRunQueue(cap,tso); debugTrace(DEBUG_sched, "new bound thread (%lu)", (unsigned long)tso->id); cap = schedule(cap,task); ASSERT(task->stat != NoStatus); ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task); debugTrace(DEBUG_sched, "bound thread (%lu) finished", (unsigned long)task->tso->id); return cap; } /* ---------------------------------------------------------------------------- * Starting Tasks * ------------------------------------------------------------------------- */ #if defined(PARALLEL_RTS) // Entry point into the scheduling loop for the parallel runtime // system. Every PE (running instance of the runtime system) except // the main PE (the one which was first started) will call this // function to run an (initially empty) scheduler. Mostly similar to // "workerStart" for the threaded runtime system - just below, but the // executing thread operates on the main capability. void startEmptyScheduler(Capability *cap) { // schedule() runs without a lock. cap = schedule(cap, cap->running_task); } #endif #if defined(THREADED_RTS) void OSThreadProcAttr workerStart(Task *task) { Capability *cap; // See startWorkerTask(). ACQUIRE_LOCK(&task->lock); cap = task->cap; RELEASE_LOCK(&task->lock); if (RtsFlags.ParFlags.setAffinity) { setThreadAffinity(cap->no, n_capabilities); } // set the thread-local pointer to the Task: taskEnter(task); // schedule() runs without a lock. cap = schedule(cap,task); // On exit from schedule(), we have a Capability, but possibly not // the same one we started with. // During shutdown, the requirement is that after all the // Capabilities are shut down, all workers that are shutting down // have finished workerTaskStop(). This is why we hold on to // cap->lock until we've finished workerTaskStop() below. // // There may be workers still involved in foreign calls; those // will just block in waitForReturnCapability() because the // Capability has been shut down. // ACQUIRE_LOCK(&cap->lock); releaseCapability_(cap,rtsFalse); workerTaskStop(task); RELEASE_LOCK(&cap->lock); } #endif /* --------------------------------------------------------------------------- * initScheduler() * * Initialise the scheduler. This resets all the queues - if the * queues contained any threads, they'll be garbage collected at the * next pass. * * ------------------------------------------------------------------------ */ void initScheduler(void) { #if !defined(THREADED_RTS) blocked_queue_hd = END_TSO_QUEUE; blocked_queue_tl = END_TSO_QUEUE; sleeping_queue = END_TSO_QUEUE; #endif blackhole_queue = END_TSO_QUEUE; #if defined(PARALLEL_RTS) blockedFetches = (GALA*)NULL; #endif sched_state = SCHED_RUNNING; recent_activity = ACTIVITY_YES; #if defined(THREADED_RTS) /* Initialise the mutex and condition variables used by * the scheduler. */ initMutex(&sched_mutex); #endif ACQUIRE_LOCK(&sched_mutex); /* A capability holds the state a native thread needs in * order to execute STG code. At least one capability is * floating around (only THREADED_RTS builds have more than one). */ initCapabilities(); initTaskManager(); #if defined(THREADED_RTS) || defined(PARALLEL_RTS) initSparkPools(); #endif #if defined(THREADED_RTS) /* * Eagerly start one worker to run each Capability, except for * Capability 0. The idea is that we're probably going to start a * bound thread on Capability 0 pretty soon, so we don't want a * worker task hogging it. */ { nat i; Capability *cap; for (i = 1; i < n_capabilities; i++) { cap = &capabilities[i]; ACQUIRE_LOCK(&cap->lock); startWorkerTask(cap, workerStart); RELEASE_LOCK(&cap->lock); } } #endif RELEASE_LOCK(&sched_mutex); } void exitScheduler( rtsBool wait_foreign #if !defined(THREADED_RTS) __attribute__((unused)) #endif ) /* see Capability.c, shutdownCapability() */ { Task *task = NULL; task = newBoundTask(); // If we haven't killed all the threads yet, do it now. if (sched_state < SCHED_SHUTTING_DOWN) { sched_state = SCHED_INTERRUPTING; waitForReturnCapability(&task->cap,task); scheduleDoGC(task->cap,task,rtsFalse); releaseCapability(task->cap); } sched_state = SCHED_SHUTTING_DOWN; #if defined(THREADED_RTS) { nat i; for (i = 0; i < n_capabilities; i++) { shutdownCapability(&capabilities[i], task, wait_foreign); } } #endif boundTaskExiting(task); } void freeScheduler( void ) { nat still_running; ACQUIRE_LOCK(&sched_mutex); still_running = freeTaskManager(); // We can only free the Capabilities if there are no Tasks still // running. We might have a Task about to return from a foreign // call into waitForReturnCapability(), for example (actually, // this should be the *only* thing that a still-running Task can // do at this point, and it will block waiting for the // Capability). if (still_running == 0) { freeCapabilities(); if (n_capabilities != 1) { stgFree(capabilities); } } RELEASE_LOCK(&sched_mutex); #if defined(THREADED_RTS) closeMutex(&sched_mutex); #endif } /* ----------------------------------------------------------------------------- performGC This is the interface to the garbage collector from Haskell land. We provide this so that external C code can allocate and garbage collect when called from Haskell via _ccall_GC. -------------------------------------------------------------------------- */ static void performGC_(rtsBool force_major) { Task *task; // We must grab a new Task here, because the existing Task may be // associated with a particular Capability, and chained onto the // suspended_ccalling_tasks queue. task = newBoundTask(); waitForReturnCapability(&task->cap,task); scheduleDoGC(task->cap,task,force_major); releaseCapability(task->cap); boundTaskExiting(task); } void performGC(void) { performGC_(rtsFalse); } void performMajorGC(void) { performGC_(rtsTrue); } /* ----------------------------------------------------------------------------- Stack overflow If the thread has reached its maximum stack size, then raise the StackOverflow exception in the offending thread. Otherwise relocate the TSO into a larger chunk of memory and adjust its stack size appropriately. -------------------------------------------------------------------------- */ static StgTSO * threadStackOverflow(Capability *cap, StgTSO *tso) { nat new_stack_size, stack_words; lnat new_tso_size; StgPtr new_sp; StgTSO *dest; IF_DEBUG(sanity,checkTSO(tso)); // don't allow throwTo() to modify the blocked_exceptions queue // while we are moving the TSO: lockClosure((StgClosure *)tso); if (tso->stack_size >= tso->max_stack_size && !(tso->flags & TSO_BLOCKEX)) { // NB. never raise a StackOverflow exception if the thread is // inside Control.Exceptino.block. It is impractical to protect // against stack overflow exceptions, since virtually anything // can raise one (even 'catch'), so this is the only sensible // thing to do here. See bug #767. // if (tso->flags & TSO_SQUEEZED) { unlockTSO(tso); return tso; } // #3677: In a stack overflow situation, stack squeezing may // reduce the stack size, but we don't know whether it has been // reduced enough for the stack check to succeed if we try // again. Fortunately stack squeezing is idempotent, so all we // need to do is record whether *any* squeezing happened. If we // are at the stack's absolute -K limit, and stack squeezing // happened, then we try running the thread again. The // TSO_SQUEEZED flag is set by threadPaused() to tell us whether // squeezing happened or not. debugTrace(DEBUG_gc, "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)", (long)tso->id, tso, (long)tso->stack_size, (long)tso->max_stack_size); IF_DEBUG(gc, /* If we're debugging, just print out the top of the stack */ printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size, tso->sp+64))); // Send this thread the StackOverflow exception unlockTSO(tso); throwToSingleThreaded(cap, tso, (StgClosure *)stackOverflow_closure); return tso; } // We also want to avoid enlarging the stack if squeezing has // already released some of it. However, we don't want to get into // a pathalogical situation where a thread has a nearly full stack // (near its current limit, but not near the absolute -K limit), // keeps allocating a little bit, squeezing removes a little bit, // and then it runs again. So to avoid this, if we squeezed *and* // there is still less than BLOCK_SIZE_W words free, then we enlarge // the stack anyway. if ((tso->flags & TSO_SQUEEZED) && ((W_)(tso->sp - tso->stack) >= BLOCK_SIZE_W)) { unlockTSO(tso); return tso; } /* Try to double the current stack size. If that takes us over the * maximum stack size for this thread, then use the maximum instead * (that is, unless we're already at or over the max size and we * can't raise the StackOverflow exception (see above), in which * case just double the size). Finally round up so the TSO ends up as * a whole number of blocks. */ if (tso->stack_size >= tso->max_stack_size) { new_stack_size = tso->stack_size * 2; } else { new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size); } new_tso_size = (lnat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) + TSO_STRUCT_SIZE)/sizeof(W_); new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */ new_stack_size = new_tso_size - TSO_STRUCT_SIZEW; debugTrace(DEBUG_sched, "increasing stack size from %ld words to %d.", (long)tso->stack_size, new_stack_size); dest = (StgTSO *)allocate(cap,new_tso_size); TICK_ALLOC_TSO(new_stack_size,0); /* copy the TSO block and the old stack into the new area */ memcpy(dest,tso,TSO_STRUCT_SIZE); stack_words = tso->stack + tso->stack_size - tso->sp; new_sp = (P_)dest + new_tso_size - stack_words; memcpy(new_sp, tso->sp, stack_words * sizeof(W_)); /* relocate the stack pointers... */ dest->sp = new_sp; dest->stack_size = new_stack_size; /* Mark the old TSO as relocated. We have to check for relocated * TSOs in the garbage collector and any primops that deal with TSOs. * * It's important to set the sp value to just beyond the end * of the stack, so we don't attempt to scavenge any part of the * dead TSO's stack. */ tso->what_next = ThreadRelocated; setTSOLink(cap,tso,dest); tso->sp = (P_)&(tso->stack[tso->stack_size]); tso->why_blocked = NotBlocked; unlockTSO(dest); unlockTSO(tso); IF_DEBUG(sanity,checkTSO(dest)); #if 0 IF_DEBUG(scheduler,printTSO(dest)); #endif return dest; } static StgTSO * threadStackUnderflow (Capability *cap, Task *task, StgTSO *tso) { bdescr *bd, *new_bd; lnat free_w, tso_size_w; StgTSO *new_tso; tso_size_w = tso_sizeW(tso); if (tso_size_w < MBLOCK_SIZE_W || // TSO is less than 2 mblocks (since the first mblock is // shorter than MBLOCK_SIZE_W) (tso_size_w - BLOCKS_PER_MBLOCK*BLOCK_SIZE_W) % MBLOCK_SIZE_W != 0 || // or TSO is not a whole number of megablocks (ensuring // precondition of splitLargeBlock() below) (tso_size_w <= round_up_to_mblocks(RtsFlags.GcFlags.initialStkSize)) || // or TSO is smaller than the minimum stack size (rounded up) (nat)(tso->stack + tso->stack_size - tso->sp) > tso->stack_size / 4) // or stack is using more than 1/4 of the available space { // then do nothing return tso; } // don't allow throwTo() to modify the blocked_exceptions queue // while we are moving the TSO: lockClosure((StgClosure *)tso); // this is the number of words we'll free free_w = round_to_mblocks(tso_size_w/2); bd = Bdescr((StgPtr)tso); new_bd = splitLargeBlock(bd, free_w / BLOCK_SIZE_W); bd->free = bd->start + TSO_STRUCT_SIZEW; new_tso = (StgTSO *)new_bd->start; memcpy(new_tso,tso,TSO_STRUCT_SIZE); new_tso->stack_size = new_bd->free - new_tso->stack; // The original TSO was dirty and probably on the mutable // list. The new TSO is not yet on the mutable list, so we better // put it there. new_tso->dirty = 0; new_tso->flags &= ~TSO_LINK_DIRTY; dirty_TSO(cap, new_tso); debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu", (long)tso->id, tso_size_w, tso_sizeW(new_tso)); tso->what_next = ThreadRelocated; tso->_link = new_tso; // no write barrier reqd: same generation // The TSO attached to this Task may have moved, so update the // pointer to it. if (task->tso == tso) { task->tso = new_tso; } unlockTSO(new_tso); unlockTSO(tso); IF_DEBUG(sanity,checkTSO(new_tso)); return new_tso; } /* --------------------------------------------------------------------------- Interrupt execution - usually called inside a signal handler so it mustn't do anything fancy. ------------------------------------------------------------------------ */ void interruptStgRts(void) { sched_state = SCHED_INTERRUPTING; setContextSwitches(); #if defined(THREADED_RTS) wakeUpRts(); #endif } /* ----------------------------------------------------------------------------- Wake up the RTS This function causes at least one OS thread to wake up and run the scheduler loop. It is invoked when the RTS might be deadlocked, or an external event has arrived that may need servicing (eg. a keyboard interrupt). In the single-threaded RTS we don't do anything here; we only have one thread anyway, and the event that caused us to want to wake up will have interrupted any blocking system call in progress anyway. -------------------------------------------------------------------------- */ #if defined(THREADED_RTS) void wakeUpRts(void) { // This forces the IO Manager thread to wakeup, which will // in turn ensure that some OS thread wakes up and runs the // scheduler loop, which will cause a GC and deadlock check. ioManagerWakeup(); } #endif /* ----------------------------------------------------------------------------- * checkBlackHoles() * * Check the blackhole_queue for threads that can be woken up. We do * this periodically: before every GC, and whenever the run queue is * empty. * * An elegant solution might be to just wake up all the blocked * threads with awakenBlockedQueue occasionally: they'll go back to * sleep again if the object is still a BLACKHOLE. Unfortunately this * doesn't give us a way to tell whether we've actually managed to * wake up any threads, so we would be busy-waiting. * * -------------------------------------------------------------------------- */ static rtsBool checkBlackHoles (Capability *cap) { StgTSO **prev, *t; rtsBool any_woke_up = rtsFalse; StgHalfWord type; // blackhole_queue is global: ASSERT_LOCK_HELD(&sched_mutex); debugTrace(DEBUG_sched, "checking threads blocked on black holes"); // ASSUMES: sched_mutex prev = &blackhole_queue; t = blackhole_queue; while (t != END_TSO_QUEUE) { if (t->what_next == ThreadRelocated) { t = t->_link; continue; } ASSERT(t->why_blocked == BlockedOnBlackHole || t->why_blocked == BlockedOnFetchMe || t->why_blocked == BlockedOnFetchMe_NoSend); ASSERT(checkClosure(UNTAG_CLOSURE(t->block_info.closure))); type = get_itbl(UNTAG_CLOSURE(t->block_info.closure))->type; #if defined(PARALLEL_RTS) if (type == FETCH_ME) { // FM => send message (if not sent already) and stay on queue globalAddr *local_ga, *remote_ga; IF_DEBUG(sanity,checkTSO(t)); switch (t->why_blocked) { case BlockedOnFetchMe: // first time an FM is entered, send Fetch case BlockedOnBlackHole: // if closure is an RBH, also send a Fetch // grab GA from the payload of the FM remote_ga = (globalAddr *)((StgFetchMe *)(t->block_info.closure))->ga; // TODO: enable: ASSERT(LOOKS_LIKE_GA(remote_ga)); /* Assign a brand-new global address to the newly created FMBQ */ /* This was essential in the old GUM, check whether still needed! */ // StgWord tag ; // tag = GET_CLOSURE_TAG(t->block_info.closure); local_ga = makeGlobal(t->block_info.closure, rtsFalse); // tag-safe /* TODO: ENABLE splitWeight below; this was a workaround for a hard-to-find * bug way back in GUM 0.xx !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */ // splitWeight(remote_ga, local_ga); ASSERT(local_ga->pe==thisPE); IF_PAR_DEBUG(verbose, { char str1[MAX_GA_STR_LEN]; char str2[MAX_GA_STR_LEN]; showGA(remote_ga, str1); showGA(local_ga, str2); debugBelch("\t TSO %d is about to send Fetch message from FM %p for GA %s, needed locally by %s\n", t->id, t->block_info.closure, // remote_ga->pe, remote_ga->slot, remote_ga->weight str1, str2);};); // printLAGAtable(); sendFetch(remote_ga, local_ga, 0/*load*/); // now turn it into a FM_BQ, to avoid repeated sends // SET_INFO(t->block_info.closure, &stg_FETCH_ME_BQ_info); // mustafa t->why_blocked = BlockedOnFetchMe_NoSend; // maybe use lockCLosure from SMP.h instead? break; case BlockedOnFetchMe_NoSend: /* nothing; fetch request has been sent already */ break; default: debugTrace(DEBUG_sched, "\t unexpected why_blocked %d for TSO %p (%lu) on closure %p (%s)\n", t->why_blocked, t, (unsigned long)t->id, t->block_info.closure, info_type(UNTAG_CLOSURE(t->block_info.closure))); break; } /* NB: we do NOT take the TSO off the queue here, because we don't have the data, yet. All we did is to ask for the data and we need to wait for the arrival. Once it arrives, the closure will be updated, and the next time around in this loop the TSO will we awoken. -- HWL */ prev = &t->_link; t = t->_link; continue; } if (type == RBH) { // RBH => do nothing; once updated, the TSO will be woken debugTrace(DEBUG_sched, "\t still blocked on an RBH %p (TSO %d)\n", t->block_info.closure, t->id); /* another TSO might have sent a FETCH request in this instance of checkBHQ; in this case t->why_blocked needs to be changed from BlockedOnFM to BlockedOnFMnoSend to maintain the global invariant that t->why_blocked==BlockedOnFM ==> get_itbl(t->blocked_info.closure)->type==FETCH_ME */ if (t->why_blocked==BlockedOnFetchMe) { t->why_blocked=BlockedOnFetchMe_NoSend; } prev = &t->_link; t = t->_link; continue; } #endif // the main sequential case if (type != BLACKHOLE && type != CAF_BLACKHOLE) { debugTrace(DEBUG_sched, "\t about to be unblocked from BH %p\n", t->block_info.closure); // TODO: write a RESUME event to the log file IF_DEBUG(sanity,checkTSO(t)); t = unblockOne(cap, t); *prev = t; any_woke_up = rtsTrue; } else { debugTrace(DEBUG_sched, "\t still blocked on BH %p\n", t->block_info.closure); prev = &t->_link; t = t->_link; } } return any_woke_up; } #if defined(PARALLEL_RTS) #if 0 /* ----------------------------------------------------------------------------- checkGhostTSOs is similar to checkBlackHoles, traversing the ghost_TSO_queue. However, since these are no proper TSOs, they must not go on any other queue once the data is available. ----------------------------------------------------------------------------- */ static rtsBool checkGhostTSOs (Capability *cap) { // fprintf(stderr, "\n\n checking Ghost TSOs, blocked GhostTSOson black holes\n\n"); StgTSO **prev, *t, *t0; StgClosure *closure; rtsBool any_woke_up = rtsFalse; StgHalfWord type; // blackhole_queue is global: ASSERT_LOCK_HELD(&sched_mutex); debugTrace(DEBUG_sched, "checking Ghost TSOs, blocked on black holes"); // ASSUMES: sched_mutex prev = &ghost_TSO_queue; t = ghost_TSO_queue; while (t != END_TSO_QUEUE) { if (t->what_next == ThreadRelocated) { t = t->_link; continue; } closure = UNTAG_CLOSURE(t->block_info.closure); // TODO: modify spec of ghostTSO and enable this assertion again: ASSERT(isGhostrTSO(t)); ASSERT(t->why_blocked == BlockedOnFetchMe || t->why_blocked == BlockedOnFetchMe_NoSend || t->why_blocked == BlockedOnRemoteFetch); // initially for a ghost TSO type = get_itbl(closure)->type; /* if a fetch for a FETCH_ME arrives, it is immediately forwarded to the other PE (HLComms.c::processFetch); no GhostTSO is generated in this case, therefore the following assertion */ // NO: it might have been a BH orig, hence the ghost TSO, but then transfered somewhere else, hence now a FETCH_ME: ASSERT(type != FETCH_ME); // ******************************************************* START CODE DUPLICATION from checkBlackholes if (type == FETCH_ME) { // FM => send message (if not sent already) and stay on queue globalAddr fmbq_ga; globalAddr *local_ga, *remote_ga; IF_PAR_DEBUG(verbose, debugBelch("%%%%== ghost TSO blocked on fetch; local data, for which there is a fetch request, has been stolen by another PE; closure=%p (%s)\n", closure, info_type(closure))); // NO: this is a ghost TSO: IF_DEBUG(sanity,checkTSO(t)); switch (t->why_blocked) { case BlockedOnFetchMe: // first time an FM is entered, send Fetch case BlockedOnRemoteFetch: // if it's a ghost TSO, make sure a Fetch is sent now case BlockedOnBlackHole: // if closure is an RBH, also send a Fetch // grab GA from the payload of the FM remote_ga = (globalAddr *)((StgFetchMe *)(t->block_info.closure))->ga; // TODO: enable: ASSERT(LOOKS_LIKE_GA(remote_ga)); /* Assign a brand-new global address to the newly created FMBQ */ /* This was essential in the old GUM, check whether still needed! */ // StgWord tag ; // tag = GET_CLOSURE_TAG(t->block_info.closure); local_ga = makeGlobal(t->block_info.closure, rtsFalse); // tag-safe splitWeight(&fmbq_ga, local_ga); IF_PAR_DEBUG(verbose, debugBelch("\t about to send Fetch message from FM %p for GA ((%x, %d, %x))\n", t->block_info.closure, remote_ga->pe, remote_ga->slot, remote_ga->weight)); // printLAGAtable(); sendFetch(remote_ga, &fmbq_ga, 0/*load*/); // now turn it into a FM_BQ, to avoid repeated sends // SET_INFO(t->block_info.closure, &stg_FETCH_ME_BQ_info); // mustafa t->why_blocked = BlockedOnFetchMe_NoSend; // maybe use lockCLosure from SMP.h instead? break; case BlockedOnFetchMe_NoSend: /* nothing; fetch request has been sent already */ break; default: debugTrace(DEBUG_sched, "\t unexpected why_blocked %d for TSO %p (%lu) on closure %p (%s)\n", t->why_blocked, t, (unsigned long)t->id, t->block_info.closure, info_type(UNTAG_CLOSURE(t->block_info.closure))); break; } /* NB: we do NOT take the TSO off the queue here, because we don't have the data, yet. All we did is to ask for the data and we need to wait for the arrival. Once it arrives, the closure will be updated, and the next time around in this loop the TSO will we awoken. -- HWL */ prev = &t->_link; t = t->_link; continue; } if (type == RBH) { // RBH => do nothing; once updated, the TSO will be woken debugTrace(DEBUG_sched, "\t still blocked on an RBH %p (TSO %d)\n", t->block_info.closure, t->id); /* another TSO might have sent a FETCH request in this instance of checkBHQ; in this case t->why_blocked needs to be changed from BlockedOnFM to BlockedOnFMnoSend to maintain the global invariant that t->why_blocked==BlockedOnFM ==> get_itbl(t->blocked_info.closure)->type==FETCH_ME */ if (t->why_blocked==BlockedOnFetchMe) { t->why_blocked=BlockedOnFetchMe_NoSend; } prev = &t->_link; t = t->_link; continue; } // ******************************************************* END CODE DUPLICATION // the main case of a BH having been replaced by the result data if (type != BLACKHOLE && type != CAF_BLACKHOLE) { debugTrace(DEBUG_sched, "\t about to unblock Ghost TSO %d @ %p (serve fetch request) on closure %p for GA ((%u, %d, %x))\n", t->id, t, t->block_info.closure, ((globalAddr*)t->blocked_exceptions)->pe, ((globalAddr*)t->blocked_exceptions)->slot, ((globalAddr*)t->blocked_exceptions)->weight); /* In the past we used a PendingFetches queue here. Now we send the resume message directly */ IF_PAR_DEBUG(fetch, // mpcomm, { char str[MAX_GA_STR_LEN]; showGA(((globalAddr*)t->blocked_exceptions), str); debugBelch("%%%% packAndSendResume for GA %s, data at %p (%s); ghost TSO %d\n", str, closure, info_type(closure), t->id);};); // send a resume right here, right now -- HWL GUM6 packAndSendResume(closure, t); // remove Ghost TSO from queue; no waking up needed (it's garbage now) t0 = t; *prev = t->_link; // take this TSO off the queue t = t->_link; t0->_link = END_TSO_QUEUE; any_woke_up = rtsFalse; } else { debugTrace(DEBUG_sched, "\t Ghost TSO still blocked on BH %p\n", closure); prev = &t->_link; t = t->_link; } } return any_woke_up; } #endif /* ----------------------------------------------------------------------------- checkBlockedFetches is similar to checkBlackHoles, traversing the blockedFetches queue. We separate this queue from the black_hole_queue to isolate parallel specific code. NB: At some point in the past GUM6 used special ghost TSOs for the same purpose. These proved to hairy to maintain and have therefore been nuked. ----------------------------------------------------------------------------- */ static rtsBool checkBlockedFetches (Capability *cap) { GALA **prev, *bf, *bf_gc; StgClosure *closure; rtsBool any_woke_up = rtsFalse; StgHalfWord type; // blockedFetches queue is global: ASSERT_LOCK_HELD(&sched_mutex); debugTrace(DEBUG_sched, "checking blockedFetched"); // ASSUMES: sched_mutex prev = &blockedFetches; bf = blockedFetches; while (bf != (GALA*)NULL) { closure = UNTAG_CLOSURE(bf->la); type = get_itbl(closure)->type; // ******************************************************* START CODE DUPLICATION from checkBlackholes if (type == FETCH_ME) { // FM => send message (if not sent already) and stay on queue globalAddr *local_ga, *remote_ga; IF_PAR_DEBUG(verbose, debugBelch("%%%%== blockedFetch: closure=%p (%s)\n", closure, info_type(closure))); // NO: this is a ghost TSO: IF_DEBUG(sanity,checkTSO(t)); if (!(bf->preferred)) { // have we sent a fetch already? we abuse the preferred field here // grab GA from the payload of the FM remote_ga = (globalAddr *)((StgFetchMe *)(closure))->ga; // TODO: enable: ASSERT(LOOKS_LIKE_GA(remote_ga)); /* Assign a brand-new global address to the newly created FMBQ */ /* This was essential in the old GUM, check whether still needed! */ // StgWord tag ; // tag = GET_CLOSURE_TAG(t->block_info.closure); local_ga = makeGlobal(closure, rtsFalse); // tag-safe /* TODO: ENABLE splitWeight below; this was a workaround for a hard-to-find * bug way back in GUM 0.xx !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */ // splitWeight(remote_ga, local_ga); ASSERT(local_ga->pe==thisPE); IF_PAR_DEBUG(verbose, { char str1[MAX_GA_STR_LEN]; char str2[MAX_GA_STR_LEN]; showGA(remote_ga, str1); showGA(local_ga, str2); debugBelch("\t blockedFetch @ %p is about to send Fetch message from FM %p for GA %s, needed locally by %s\n", bf, closure, // remote_ga->pe, remote_ga->slot, remote_ga->weight str1, str2);};); // printLAGAtable(); sendFetch(remote_ga, local_ga, 0/*load*/); bf->preferred = rtsFalse; // remember that we have sent a fetch } else { /* nothing; fetch request has been sent already */ } /* NB: we do NOT take the TSO off the queue here, because we don't have the data, yet. All we did is to ask for the data and we need to wait for the arrival. Once it arrives, the closure will be updated, and the next time around in this loop the TSO will we awoken. -- HWL */ prev = &bf->next; bf = bf->next; continue; } if (type == RBH) { // RBH => do nothing; once updated, the TSO will be woken debugTrace(DEBUG_sched, "\t still blocked on an RBH %p (TSO %d)\n", closure); /* another TSO might have sent a FETCH request in this instance of checkBHQ; in this case t->why_blocked needs to be changed from BlockedOnFM to BlockedOnFMnoSend to maintain the global invariant that t->why_blocked==BlockedOnFM ==> get_itbl(t->blocked_info.closure)->type==FETCH_ME if (t->why_blocked==BlockedOnFetchMe) { t->why_blocked=BlockedOnFetchMe_NoSend; } */ prev = &bf->next; bf = bf->next; continue; } // ******************************************************* END CODE DUPLICATION // the main case of a BH having been replaced by the result data if (type != BLACKHOLE && type != CAF_BLACKHOLE) { debugTrace(DEBUG_sched, "\t about to unblock blockedFetch %p (serve fetch request) on closure %p\n", bf, closure); /* In the past we used a PendingFetches queue here. Now we send the resume message directly */ IF_PAR_DEBUG(fetch, // mpcomm, { char str[MAX_GA_STR_LEN]; showGA(((globalAddr*)&(bf->ga)), str); debugBelch("%%%% packAndSendResume for GA %s, data at %p (%s); blockedFetch %p\n", str, closure, info_type(closure), bf);};); // send a resume right here, right now -- HWL GUM6 packAndSendResume(closure, bf); // remove bf from the queue, and return it to the freelist bf_gc = bf; // this is now garbage *prev = bf->next; // take this TSO off the queue bf = bf->next; // bf is next elem deallocGALA(bf_gc); // put it on the freelist any_woke_up = rtsFalse; } else { debugTrace(DEBUG_sched, "\t blocked Fetch still blocked on BH %p\n", closure); prev = &bf->next; bf = bf->next; } } return any_woke_up; } #endif /* ----------------------------------------------------------------------------- Deleting threads This is used for interruption (^C) and forking, and corresponds to raising an exception but without letting the thread catch the exception. -------------------------------------------------------------------------- */ static void deleteThread (Capability *cap, StgTSO *tso) { // NOTE: must only be called on a TSO that we have exclusive // access to, because we will call throwToSingleThreaded() below. // The TSO must be on the run queue of the Capability we own, or // we must own all Capabilities. if (tso->why_blocked != BlockedOnCCall && tso->why_blocked != BlockedOnCCall_NoUnblockExc #if defined(PARALLEL_RTS) && tso->why_blocked !=BlockedOnFetchMe #endif ) { throwToSingleThreaded(cap,tso,NULL); } } #ifdef FORKPROCESS_PRIMOP_SUPPORTED static void deleteThread_(Capability *cap, StgTSO *tso) { // for forkProcess only: // like deleteThread(), but we delete threads in foreign calls, too. if (tso->why_blocked == BlockedOnCCall || tso->why_blocked == BlockedOnCCall_NoUnblockExc) { unblockOne(cap,tso); tso->what_next = ThreadKilled; } else { deleteThread(cap,tso); } } #endif /* ----------------------------------------------------------------------------- raiseExceptionHelper This function is called by the raise# primitve, just so that we can move some of the tricky bits of raising an exception from C-- into C. Who knows, it might be a useful re-useable thing here too. -------------------------------------------------------------------------- */ StgWord raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception) { Capability *cap = regTableToCapability(reg); StgThunk *raise_closure = NULL; StgPtr p, next; StgRetInfoTable *info; // // This closure represents the expression 'raise# E' where E // is the exception raise. It is used to overwrite all the // thunks which are currently under evaluataion. // // OLD COMMENT (we don't have MIN_UPD_SIZE now): // LDV profiling: stg_raise_info has THUNK as its closure // type. Since a THUNK takes at least MIN_UPD_SIZE words in its // payload, MIN_UPD_SIZE is more approprate than 1. It seems that // 1 does not cause any problem unless profiling is performed. // However, when LDV profiling goes on, we need to linearly scan // small object pool, where raise_closure is stored, so we should // use MIN_UPD_SIZE. // // raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate, // sizeofW(StgClosure)+1); // // // Walk up the stack, looking for the catch frame. On the way, // we update any closures pointed to from update frames with the // raise closure that we just built. // p = tso->sp; while(1) { info = get_ret_itbl((StgClosure *)p); next = p + stack_frame_sizeW((StgClosure *)p); switch (info->i.type) { case UPDATE_FRAME: // Only create raise_closure if we need to. if (raise_closure == NULL) { raise_closure = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1); SET_HDR(raise_closure, &stg_raise_info, CCCS); raise_closure->payload[0] = exception; } UPD_IND(((StgUpdateFrame *)p)->updatee,(StgClosure *)raise_closure); p = next; continue; case ATOMICALLY_FRAME: debugTrace(DEBUG_stm, "found ATOMICALLY_FRAME at %p", p); tso->sp = p; return ATOMICALLY_FRAME; case CATCH_FRAME: tso->sp = p; return CATCH_FRAME; case CATCH_STM_FRAME: debugTrace(DEBUG_stm, "found CATCH_STM_FRAME at %p", p); tso->sp = p; return CATCH_STM_FRAME; case STOP_FRAME: tso->sp = p; return STOP_FRAME; case CATCH_RETRY_FRAME: default: p = next; continue; } } } /* ----------------------------------------------------------------------------- findRetryFrameHelper This function is called by the retry# primitive. It traverses the stack leaving tso->sp referring to the frame which should handle the retry. This should either be a CATCH_RETRY_FRAME (if the retry# is within an orElse#) or should be a ATOMICALLY_FRAME (if the retry# reaches the top level). We skip CATCH_STM_FRAMEs (aborting and rolling back the nested tx that they create) because retries are not considered to be exceptions, despite the similar implementation. We should not expect to see CATCH_FRAME or STOP_FRAME because those should not be created within memory transactions. -------------------------------------------------------------------------- */ StgWord findRetryFrameHelper (StgTSO *tso) { StgPtr p, next; StgRetInfoTable *info; p = tso -> sp; while (1) { info = get_ret_itbl((StgClosure *)p); next = p + stack_frame_sizeW((StgClosure *)p); switch (info->i.type) { case ATOMICALLY_FRAME: debugTrace(DEBUG_stm, "found ATOMICALLY_FRAME at %p during retry", p); tso->sp = p; return ATOMICALLY_FRAME; case CATCH_RETRY_FRAME: debugTrace(DEBUG_stm, "found CATCH_RETRY_FRAME at %p during retrry", p); tso->sp = p; return CATCH_RETRY_FRAME; case CATCH_STM_FRAME: { StgTRecHeader *trec = tso -> trec; StgTRecHeader *outer = trec -> enclosing_trec; debugTrace(DEBUG_stm, "found CATCH_STM_FRAME at %p during retry", p); debugTrace(DEBUG_stm, "trec=%p outer=%p", trec, outer); stmAbortTransaction(tso -> cap, trec); stmFreeAbortedTRec(tso -> cap, trec); tso -> trec = outer; p = next; continue; } default: ASSERT(info->i.type != CATCH_FRAME); ASSERT(info->i.type != STOP_FRAME); p = next; continue; } } } /* ----------------------------------------------------------------------------- resurrectThreads is called after garbage collection on the list of threads found to be garbage. Each of these threads will be woken up and sent a signal: BlockedOnDeadMVar if the thread was blocked on an MVar, or NonTermination if the thread was blocked on a Black Hole. Locks: assumes we hold *all* the capabilities. -------------------------------------------------------------------------- */ void resurrectThreads (StgTSO *threads) { StgTSO *tso, *next; Capability *cap; generation *gen; for (tso = threads; tso != END_TSO_QUEUE; tso = next) { next = tso->global_link; gen = Bdescr((P_)tso)->gen; tso->global_link = gen->threads; gen->threads = tso; debugTrace(DEBUG_sched, "resurrecting thread %lu", (unsigned long)tso->id); // Wake up the thread on the Capability it was last on cap = tso->cap; switch (tso->why_blocked) { case BlockedOnMVar: /* Called by GC - sched_mutex lock is currently held. */ throwToSingleThreaded(cap, tso, (StgClosure *)blockedIndefinitelyOnMVar_closure); break; case BlockedOnBlackHole: throwToSingleThreaded(cap, tso, (StgClosure *)nonTermination_closure); break; case BlockedOnSTM: throwToSingleThreaded(cap, tso, (StgClosure *)blockedIndefinitelyOnSTM_closure); break; case NotBlocked: /* This might happen if the thread was blocked on a black hole * belonging to a thread that we've just woken up (raiseAsync * can wake up threads, remember...). */ continue; case BlockedOnException: // throwTo should never block indefinitely: if the target // thread dies or completes, throwTo returns. barf("resurrectThreads: thread BlockedOnException"); break; default: barf("resurrectThreads: thread blocked in a strange way"); } } } /* ----------------------------------------------------------------------------- performPendingThrowTos is called after garbage collection, and passed a list of threads that were found to have pending throwTos (tso->blocked_exceptions was not empty), and were blocked. Normally this doesn't happen, because we would deliver the exception directly if the target thread is blocked, but there are small windows where it might occur on a multiprocessor (see throwTo()). NB. we must be holding all the capabilities at this point, just like resurrectThreads(). -------------------------------------------------------------------------- */ void performPendingThrowTos (StgTSO *threads) { StgTSO *tso, *next; Capability *cap; Task *task, *saved_task;; generation *gen; task = myTask(); cap = task->cap; for (tso = threads; tso != END_TSO_QUEUE; tso = next) { next = tso->global_link; #if defined(PARALLEL_RTS) // TODO: replace this with ASSERT(!isGhostTSO(tso)); // we should only call this function with exception_threads, but all ghost TSOs are only on the ghost_TSO_queue if (isGhostTSO(tso)) { continue ; } #endif gen = Bdescr((P_)tso)->gen; tso->global_link = gen->threads; gen->threads = tso; debugTrace(DEBUG_sched, "performing blocked throwTo to thread %lu", (unsigned long)tso->id); // We must pretend this Capability belongs to the current Task // for the time being, as invariants will be broken otherwise. // In fact the current Task has exclusive access to the systme // at this point, so this is just bookkeeping: task->cap = tso->cap; saved_task = tso->cap->running_task; tso->cap->running_task = task; maybePerformBlockedException(tso->cap, tso); tso->cap->running_task = saved_task; } // Restore our original Capability: task->cap = cap; }