Author: bonefish Date: 2011-06-04 23:53:17 +0200 (Sat, 04 Jun 2011) New Revision: 41915 Changeset: https://dev.haiku-os.org/changeset/41915 Modified: haiku/branches/developer/bonefish/signals/headers/private/kernel/UserTimer.h haiku/branches/developer/bonefish/signals/headers/private/kernel/cpu.h haiku/branches/developer/bonefish/signals/headers/private/kernel/team.h haiku/branches/developer/bonefish/signals/headers/private/kernel/thread_types.h haiku/branches/developer/bonefish/signals/src/system/kernel/UserTimer.cpp haiku/branches/developer/bonefish/signals/src/system/kernel/scheduler/scheduler.cpp haiku/branches/developer/bonefish/signals/src/system/kernel/team.cpp haiku/branches/developer/bonefish/signals/src/system/kernel/thread.cpp Log: * Added nextThread/previousThread parameter to user_timer_stop_cpu_timers()/ user_timer_continue_cpu_timers(). * Added cpu_ent::previous_thread, so we actually know which thread ran before the current one. * team_get_usage_info(): Renamed to common_get_team_info(), removed the return parameter and made it team.cpp static again. * Added Team::CPUTime() instead. * Implemented TeamTimeUserTimer. * Added Deactivate() methods to TeamTimeUserTimer and ThreadTimeUserTimer, DeactivateCPUTimeUserTimers() methods to Team and Thread and call the latter in thread_exit(), respectively team_shutdown_team(). * thread_exit(): - Update the thread's kernel_time once more before adding it to the team's dead_threads_kernel_time. - Call user_timer_stop_cpu_timers(), so CPU timers for the thread/team get stopped/updated. * Team: - Derive from KernelReferenceable instead of BReferenceable. - Added missing CPU time user timer methods. * Don't allow CPU time clocks or timers for the kernel team. Modified: haiku/branches/developer/bonefish/signals/headers/private/kernel/UserTimer.h =================================================================== --- haiku/branches/developer/bonefish/signals/headers/private/kernel/UserTimer.h 2011-06-04 21:25:04 UTC (rev 41914) +++ haiku/branches/developer/bonefish/signals/headers/private/kernel/UserTimer.h 2011-06-04 21:53:17 UTC (rev 41915) @@ -85,11 +85,22 @@ bigtime_t& _interval, uint32& _overrunCount); + void Deactivate(); + + void Update(Thread* unscheduledThread); + +protected: + virtual void HandleTimer(); + private: + void _Update(bool unscheduling); + +private: team_id fTeamID; Team* fTeam; bigtime_t fNextTime; bigtime_t fInterval; + int32 fRunningThreads; public: // conceptually package private @@ -108,6 +119,8 @@ bigtime_t& _interval, uint32& _overrunCount); + void Deactivate(); + void Start(); void Stop(); @@ -173,8 +186,9 @@ status_t user_timer_get_clock(clockid_t clockID, bigtime_t& _time); -void user_timer_stop_cpu_timers(Thread* thread); -void user_timer_continue_cpu_timers(Thread* thread); +void user_timer_stop_cpu_timers(Thread* thread, Thread* nextThread); +void user_timer_continue_cpu_timers(Thread* thread, + Thread* previousThread); status_t _user_get_clock(clockid_t clockID, bigtime_t* _time); status_t _user_set_clock(clockid_t clockID, bigtime_t time); Modified: haiku/branches/developer/bonefish/signals/headers/private/kernel/cpu.h =================================================================== --- haiku/branches/developer/bonefish/signals/headers/private/kernel/cpu.h 2011-06-04 21:25:04 UTC (rev 41914) +++ haiku/branches/developer/bonefish/signals/headers/private/kernel/cpu.h 2011-06-04 21:53:17 UTC (rev 41915) @@ -51,6 +51,7 @@ jmp_buf fault_jump_buffer; Thread* running_thread; + Thread* previous_thread; bool invoke_scheduler; bool invoke_scheduler_if_idle; bool disabled; Modified: haiku/branches/developer/bonefish/signals/headers/private/kernel/team.h =================================================================== --- haiku/branches/developer/bonefish/signals/headers/private/kernel/team.h 2011-06-04 21:25:04 UTC (rev 41914) +++ haiku/branches/developer/bonefish/signals/headers/private/kernel/team.h 2011-06-04 21:53:17 UTC (rev 41915) @@ -61,9 +61,6 @@ bool team_associate_data(AssociatedData* data); bool team_dissociate_data(AssociatedData* data); -status_t team_get_usage_info(team_id id, int32 who, team_usage_info* info, - uint32 flags, bigtime_t* _timeOffset); - // used in syscalls.c thread_id _user_load_image(const char* const* flatArgs, size_t flatArgsSize, int32 argCount, int32 envCount, int32 priority, uint32 flags, Modified: haiku/branches/developer/bonefish/signals/headers/private/kernel/thread_types.h =================================================================== --- haiku/branches/developer/bonefish/signals/headers/private/kernel/thread_types.h 2011-06-04 21:25:04 UTC (rev 41914) +++ haiku/branches/developer/bonefish/signals/headers/private/kernel/thread_types.h 2011-06-04 21:53:17 UTC (rev 41915) @@ -205,7 +205,7 @@ }; -struct Team : TeamThreadIteratorEntry<team_id>, BReferenceable, +struct Team : TeamThreadIteratorEntry<team_id>, KernelReferenceable, AssociatedDataOwner { DoublyLinkedListLink<Team> global_list_link; Team *hash_next; // next in hash @@ -264,9 +264,10 @@ struct team_debug_info debug_info; + // protected by scheduler lock bigtime_t dead_threads_kernel_time; bigtime_t dead_threads_user_time; - bigtime_t cpu_clock_offset; // protected by fLock + bigtime_t cpu_clock_offset; // user group information; protected by fLock, the *_uid/*_gid fields also // by the scheduler lock @@ -362,9 +363,19 @@ bool CheckAddUserDefinedTimer(); void UserDefinedTimersRemoved(int32 count); + void UserTimerActivated(TeamTimeUserTimer* timer) + { fCPUTimeUserTimers.Add(timer); } + void UserTimerDeactivated(TeamTimeUserTimer* timer) + { fCPUTimeUserTimers.Remove(timer); } + void DeactivateCPUTimeUserTimers(); bool HasActiveCPUTimeUserTimers() const { return !fCPUTimeUserTimers.IsEmpty(); } + TeamTimeUserTimerList::ConstIterator + CPUTimeUserTimerIterator() const + { return fCPUTimeUserTimers.GetIterator(); } + bigtime_t CPUTime(bool ignoreCurrentRun) const; + private: Team(team_id id, bool kernel); @@ -566,6 +577,7 @@ { fCPUTimeUserTimers.Add(timer); } void UserTimerDeactivated(ThreadTimeUserTimer* timer) { fCPUTimeUserTimers.Remove(timer); } + void DeactivateCPUTimeUserTimers(); bool HasActiveCPUTimeUserTimers() const { return !fCPUTimeUserTimers.IsEmpty(); } ThreadTimeUserTimerList::ConstIterator Modified: haiku/branches/developer/bonefish/signals/src/system/kernel/UserTimer.cpp =================================================================== --- haiku/branches/developer/bonefish/signals/src/system/kernel/UserTimer.cpp 2011-06-04 21:25:04 UTC (rev 41914) +++ haiku/branches/developer/bonefish/signals/src/system/kernel/UserTimer.cpp 2011-06-04 21:53:17 UTC (rev 41915) @@ -347,7 +347,54 @@ TeamTimeUserTimer::Schedule(bigtime_t nextTime, bigtime_t interval, uint32 flags, bigtime_t& _oldRemainingTime, bigtime_t& _oldInterval) { - // TODO:... + InterruptsSpinLocker schedulerLocker(gSchedulerLock); + + // get the current time, but only if needed + bool nowValid = fTeam != NULL; + bigtime_t now = nowValid ? fTeam->CPUTime(false) : 0; + + // Cancel the old timer, if still scheduled, and get the previous values. + if (fTeam != NULL) { + if (fScheduled) { + cancel_timer(&fTimer); + fScheduled = false; + } + + _oldRemainingTime = fNextTime - now; + _oldInterval = fInterval; + + fTeam->UserTimerDeactivated(this); + fTeam->ReleaseReference(); + fTeam = NULL; + } else { + _oldRemainingTime = B_INFINITE_TIMEOUT; + _oldInterval = 0; + } + + // schedule the new timer + fNextTime = nextTime; + fInterval = interval; + + if (fNextTime == B_INFINITE_TIMEOUT) + return; + + // Get the team. If it doesn't exist anymore, just don't schedule the + // timer anymore. + fTeam = Team::Get(fTeamID); + if (fTeam == NULL) + return; + + // convert relative to absolute timeouts + if ((flags & B_RELATIVE_TIMEOUT) != 0) { + if (!nowValid) + now = fTeam->CPUTime(false); + fNextTime += now; + } + + fTeam->UserTimerActivated(this); + + // schedule/udpate the kernel timer + Update(NULL); } @@ -355,10 +402,131 @@ TeamTimeUserTimer::GetInfo(bigtime_t& _remainingTime, bigtime_t& _interval, uint32& _overrunCount) { - // TODO:... + InterruptsSpinLocker schedulerLocker(gSchedulerLock); + + if (fTeam != NULL) { + _remainingTime = fNextTime - fTeam->CPUTime(false); + _interval = fInterval; + } else { + _remainingTime = B_INFINITE_TIMEOUT; + _interval = 0; + } + + _overrunCount = fOverrunCount; } +/*! Deactivates the timer, if it is activated. + + The caller must hold the scheduler lock. +*/ +void +TeamTimeUserTimer::Deactivate() +{ + if (fTeam == NULL) + return; + + // unschedule, if scheduled + if (fScheduled) { + cancel_timer(&fTimer); + fScheduled = false; + } + + // deactivate + fTeam->UserTimerDeactivated(this); + fTeam->ReleaseReference(); + fTeam = NULL; +} + + +/*! Starts/stops the timer as necessary, if it is active. + + Called whenever threads of the team whose CPU time is referred to by the + timer are scheduled or unscheduled (or leave the team), or when the timer + was just set. Schedules a kernel timer for the remaining time, respectively + cancels it. + + The caller must hold the scheduler lock. + + \param unscheduledThread If not \c NULL, this is the thread that is + currently running and which is in the process of being unscheduled. +*/ +void +TeamTimeUserTimer::Update(Thread* unscheduledThread) +{ + if (fTeam == NULL) + return; + + // determine how many of the team's threads are currently running + fRunningThreads = 0; + int32 cpuCount = smp_get_num_cpus(); + for (int32 i = 0; i < cpuCount; i++) { + Thread* thread = gCPU[i].running_thread; + if (thread != unscheduledThread && thread->team == fTeam) + fRunningThreads++; + } + + _Update(unscheduledThread != NULL); +} + + +void +TeamTimeUserTimer::HandleTimer() +{ + UserTimer::HandleTimer(); + + // If the timer is not periodic, it is no longer active. Otherwise + // reschedule the kernel timer. + if (fTeam != NULL) { + if (fInterval == 0) { + fTeam->UserTimerDeactivated(this); + fTeam->ReleaseReference(); + fTeam = NULL; + } else + _Update(false); + } +} + + +/*! Schedules/cancels the kernel timer as necessary. + + \c fRunningThreads must be up-to-date. + The caller must hold the scheduler lock. + + \param unscheduling \c true, when the current thread is in the process of + being unscheduled. +*/ +void +TeamTimeUserTimer::_Update(bool unscheduling) +{ + // unschedule the kernel timer, if scheduled + if (fScheduled) + cancel_timer(&fTimer); + + // if no more threads are running, we're done + if (fRunningThreads == 0) { + fScheduled = false; + return; + } + + // There are still threads running. Reschedule the kernel timer. + bigtime_t now = fTeam->CPUTime(unscheduling); + + fTimer.schedule_time = system_time() + + ROUNDUP(fNextTime - now, fRunningThreads); + fTimer.period = 0; + // We reschedule periodic timers manually in HandleTimer() to avoid + // rounding errors. + + add_timer(&fTimer, &_HandleTimerHook, fNextTime, + B_ONE_SHOT_ABSOLUTE_TIMER | B_TIMER_ACQUIRE_SCHEDULER_LOCK); + // We use B_TIMER_ACQUIRE_SCHEDULER_LOCK to avoid race conditions + // between setting/canceling the timer and the event handler. + + fScheduled = true; +} + + // #pragma mark - ThreadTimeUserTimer @@ -450,6 +618,29 @@ } +/*! Deactivates the timer, if it is activated. + + The caller must hold the scheduler lock. +*/ +void +ThreadTimeUserTimer::Deactivate() +{ + if (fThread == NULL) + return; + + // unschedule, if scheduled + if (fScheduled) { + cancel_timer(&fTimer); + fScheduled = false; + } + + // deactivate + fThread->UserTimerDeactivated(this); + fThread->ReleaseReference(); + fThread = NULL; +} + + /*! Starts the timer, if it is active. Called when the thread whose CPU time is referred to by the timer is @@ -666,6 +857,9 @@ // access its clock. if (clockID <= 0) return B_BAD_VALUE; + if (clockID == team_get_kernel_team_id()) + return B_NOT_ALLOWED; + Team* timedTeam = Team::GetAndLock(clockID); if (timedTeam == NULL) return B_BAD_VALUE; @@ -817,8 +1011,7 @@ { Thread* thread = thread_get_current_thread(); InterruptsSpinLocker schedulerLocker(gSchedulerLock); - _time = thread->user_time + thread->kernel_time - + thread->cpu_clock_offset; + _time = thread->CPUTime(false); return B_OK; } @@ -832,18 +1025,22 @@ } else { if (clockID < 0) return B_BAD_VALUE; + if (clockID == team_get_kernel_team_id()) + return B_NOT_ALLOWED; + teamID = clockID; } - // get the usage info - team_usage_info info; - bigtime_t timeOffset; - status_t error = team_get_usage_info(teamID, B_TEAM_USAGE_SELF, - &info, B_CHECK_PERMISSION, &timeOffset); - if (error != B_OK) - return error; + // get the team + Team* team = Team::Get(teamID); + if (team == NULL) + return B_BAD_VALUE; + BReference<Team> teamReference(team, true); - _time = info.user_time + info.kernel_time + timeOffset; + // get the time + InterruptsSpinLocker schedulerLocker(gSchedulerLock); + _time = team->CPUTime(false); + return B_OK; } } @@ -851,7 +1048,7 @@ void -user_timer_stop_cpu_timers(Thread* thread) +user_timer_stop_cpu_timers(Thread* thread, Thread* nextThread) { // stop thread timers for (ThreadTimeUserTimerList::ConstIterator it @@ -860,16 +1057,28 @@ timer->Stop(); } - // stop team timers - // TODO:... + // update team timers + if (nextThread == NULL || nextThread->team != thread->team) { + for (TeamTimeUserTimerList::ConstIterator it + = thread->team->CPUTimeUserTimerIterator(); + TeamTimeUserTimer* timer = it.Next();) { + timer->Update(thread); + } + } } void -user_timer_continue_cpu_timers(Thread* thread) +user_timer_continue_cpu_timers(Thread* thread, Thread* previousThread) { - // start team timers - // TODO:... + // update team timers + if (previousThread == NULL || previousThread->team != thread->team) { + for (TeamTimeUserTimerList::ConstIterator it + = thread->team->CPUTimeUserTimerIterator(); + TeamTimeUserTimer* timer = it.Next();) { + timer->Update(NULL); + } + } // start thread timers for (ThreadTimeUserTimerList::ConstIterator it @@ -939,26 +1148,22 @@ } else { if (clockID < 0) return B_BAD_VALUE; + if (clockID == team_get_kernel_team_id()) + return B_NOT_ALLOWED; + teamID = clockID; } - // get the usage info - team_usage_info info; - bigtime_t timeOffset; - status_t error = team_get_usage_info(teamID, B_TEAM_USAGE_SELF, - &info, B_CHECK_PERMISSION, &timeOffset); - if (error != B_OK) - return error; - - // lock the team and adjust the time offset - Team* team = Team::GetAndLock(teamID); + // get the team + Team* team = Team::Get(teamID); if (team == NULL) return B_BAD_VALUE; + BReference<Team> teamReference(team, true); - team->cpu_clock_offset = time - (info.user_time + info.kernel_time); + // set the time offset + InterruptsSpinLocker schedulerLocker(gSchedulerLock); + team->cpu_clock_offset += time - team->CPUTime(false); - team->UnlockAndReleaseReference(); - return B_OK; } } Modified: haiku/branches/developer/bonefish/signals/src/system/kernel/scheduler/scheduler.cpp =================================================================== --- haiku/branches/developer/bonefish/signals/src/system/kernel/scheduler/scheduler.cpp 2011-06-04 21:25:04 UTC (rev 41914) +++ haiku/branches/developer/bonefish/signals/src/system/kernel/scheduler/scheduler.cpp 2011-06-04 21:53:17 UTC (rev 41915) @@ -59,7 +59,7 @@ // stop CPU time based user timers if (fromThread->HasActiveCPUTimeUserTimers() || fromThread->team->HasActiveCPUTimeUserTimers()) { - user_timer_stop_cpu_timers(fromThread); + user_timer_stop_cpu_timers(fromThread, toThread); } // update CPU and Thread structures and perform the context switch @@ -67,6 +67,7 @@ toThread->previous_cpu = toThread->cpu = cpu; fromThread->cpu = NULL; cpu->running_thread = toThread; + cpu->previous_thread = fromThread; arch_thread_set_current_thread(toThread); arch_thread_context_switch(fromThread, toThread); @@ -78,7 +79,7 @@ // continue CPU time based user timers if (fromThread->HasActiveCPUTimeUserTimers() || fromThread->team->HasActiveCPUTimeUserTimers()) { - user_timer_continue_cpu_timers(fromThread); + user_timer_continue_cpu_timers(fromThread, cpu->previous_thread); } // notify the user debugger code Modified: haiku/branches/developer/bonefish/signals/src/system/kernel/team.cpp =================================================================== --- haiku/branches/developer/bonefish/signals/src/system/kernel/team.cpp 2011-06-04 21:25:04 UTC (rev 41914) +++ haiku/branches/developer/bonefish/signals/src/system/kernel/team.cpp 2011-06-04 21:53:17 UTC (rev 41915) @@ -886,6 +886,46 @@ } +void +Team::DeactivateCPUTimeUserTimers() +{ + while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head()) + timer->Deactivate(); +} + + +/*! Returns the team's current total CPU time (kernel + user + offset). + + The caller must hold the scheduler lock. + + \param ignoreCurrentRun If \c true and the current thread is one team's + threads, don't add the time since the last time \c last_time was + updated. Should be used in "thread unscheduled" scheduler callbacks, + since although the thread is still running at that time, its time has + already been stopped. + \return The thread's current total CPU time. +*/ +bigtime_t +Team::CPUTime(bool ignoreCurrentRun) const +{ + bigtime_t time = cpu_clock_offset + dead_threads_kernel_time + + dead_threads_user_time; + + for (Thread* thread = thread_list; thread != NULL; + thread = thread->team_next) { + time += thread->kernel_time + thread->user_time; + } + + if (!ignoreCurrentRun) { + Thread* thread = thread_get_current_thread(); + if (thread->team == this) + time += system_time() - thread->last_time; + } + + return time; +} + + // #pragma mark - ProcessGroup @@ -2492,6 +2532,74 @@ } +static status_t +common_get_team_usage_info(team_id id, int32 who, team_usage_info* info, + uint32 flags) +{ + if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN) + return B_BAD_VALUE; + + // get the team + Team* team = Team::GetAndLock(id); + if (team == NULL) + return B_BAD_TEAM_ID; + BReference<Team> teamReference(team, true); + TeamLocker teamLocker(team, true); + + if ((flags & B_CHECK_PERMISSION) != 0) { + uid_t uid = geteuid(); + if (uid != 0 && uid != team->effective_uid) + return B_NOT_ALLOWED; + } + + bigtime_t kernelTime = 0; + bigtime_t userTime = 0; + + switch (who) { + case B_TEAM_USAGE_SELF: + { + Thread* thread = team->thread_list; + + for (; thread != NULL; thread = thread->team_next) { + kernelTime += thread->kernel_time; + userTime += thread->user_time; + } + + kernelTime += team->dead_threads_kernel_time; + userTime += team->dead_threads_user_time; + break; + } + + case B_TEAM_USAGE_CHILDREN: + { + Team* child = team->children; + for (; child != NULL; child = child->siblings_next) { + TeamLocker childLocker(child); + + Thread* thread = team->thread_list; + + for (; thread != NULL; thread = thread->team_next) { + kernelTime += thread->kernel_time; + userTime += thread->user_time; + } + + kernelTime += child->dead_threads_kernel_time; + userTime += child->dead_threads_user_time; + } + + kernelTime += team->dead_children.kernel_time; + userTime += team->dead_children.user_time; + break; + } + } + + info->kernel_time = kernelTime; + info->user_time = userTime; + + return B_OK; +} + + // #pragma mark - Private kernel API @@ -2862,6 +2970,14 @@ // delete all timers team->DeleteUserTimers(false); + // deactivate CPU time user timers for the team + InterruptsSpinLocker schedulerLocker(gSchedulerLock); + + if (team->HasActiveCPUTimeUserTimers()) + team->DeactivateCPUTimeUserTimers(); + + schedulerLocker.Unlock(); + // kill all threads but the main thread team_death_entry deathEntry; deathEntry.condition.Init(team, "team death"); @@ -3340,77 +3456,6 @@ } -status_t -team_get_usage_info(team_id id, int32 who, team_usage_info* info, uint32 flags, - bigtime_t* _timeOffset) -{ - if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN) - return B_BAD_VALUE; - - // get the team - Team* team = Team::GetAndLock(id); - if (team == NULL) - return B_BAD_TEAM_ID; - BReference<Team> teamReference(team, true); - TeamLocker teamLocker(team, true); - - if ((flags & B_CHECK_PERMISSION) != 0) { - uid_t uid = geteuid(); - if (uid != 0 && uid != team->effective_uid) - return B_NOT_ALLOWED; - } - - bigtime_t kernelTime = 0; - bigtime_t userTime = 0; - - switch (who) { - case B_TEAM_USAGE_SELF: - { - Thread* thread = team->thread_list; - - for (; thread != NULL; thread = thread->team_next) { - kernelTime += thread->kernel_time; - userTime += thread->user_time; - } - - kernelTime += team->dead_threads_kernel_time; - userTime += team->dead_threads_user_time; - - if (_timeOffset != NULL) - *_timeOffset = team->cpu_clock_offset; - break; - } - - case B_TEAM_USAGE_CHILDREN: - { - Team* child = team->children; - for (; child != NULL; child = child->siblings_next) { - TeamLocker childLocker(child); - - Thread* thread = team->thread_list; - - for (; thread != NULL; thread = thread->team_next) { - kernelTime += thread->kernel_time; - userTime += thread->user_time; - } - - kernelTime += child->dead_threads_kernel_time; - userTime += child->dead_threads_user_time; - } - - kernelTime += team->dead_children.kernel_time; - userTime += team->dead_children.user_time; - break; - } - } - - info->kernel_time = kernelTime; - info->user_time = userTime; - - return B_OK; -} - - // #pragma mark - Public kernel API @@ -3573,7 +3618,7 @@ if (size != sizeof(team_usage_info)) return B_BAD_VALUE; - return team_get_usage_info(id, who, info, 0, NULL); + return common_get_team_usage_info(id, who, info, 0); } @@ -4083,8 +4128,8 @@ return B_BAD_VALUE; team_usage_info info; - status_t status = team_get_usage_info(team, who, &info, B_CHECK_PERMISSION, - NULL); + status_t status = common_get_team_usage_info(team, who, &info, + B_CHECK_PERMISSION); if (userInfo == NULL || !IS_USER_ADDRESS(userInfo) || user_memcpy(userInfo, &info, size) != B_OK) { Modified: haiku/branches/developer/bonefish/signals/src/system/kernel/thread.cpp =================================================================== --- haiku/branches/developer/bonefish/signals/src/system/kernel/thread.cpp 2011-06-04 21:25:04 UTC (rev 41914) +++ haiku/branches/developer/bonefish/signals/src/system/kernel/thread.cpp 2011-06-04 21:53:17 UTC (rev 41915) @@ -466,6 +466,14 @@ } +void +Thread::DeactivateCPUTimeUserTimers() +{ + while (ThreadTimeUserTimer* timer = fCPUTimeUserTimers.Head()) + timer->Deactivate(); +} + + // #pragma mark - ThreadListIterator @@ -683,7 +691,7 @@ // start CPU time based user timers if (thread->HasActiveCPUTimeUserTimers() || thread->team->HasActiveCPUTimeUserTimers()) { - user_timer_continue_cpu_timers(thread); + user_timer_continue_cpu_timers(thread, thread->cpu->previous_thread); } // notify the user debugger code @@ -1942,9 +1950,22 @@ // team needs to be an atomic operation // remember how long this thread lasted + bigtime_t now = system_time(); + thread->kernel_time += now - thread->last_time; + thread->last_time = now; team->dead_threads_kernel_time += thread->kernel_time; team->dead_threads_user_time += thread->user_time; + // stop/update thread/team CPU time user timers + if (thread->HasActiveCPUTimeUserTimers() + || team->HasActiveCPUTimeUserTimers()) { + user_timer_stop_cpu_timers(thread, NULL); + } + + // deactivate CPU time user timers for the thread + if (thread->HasActiveCPUTimeUserTimers()) + thread->DeactivateCPUTimeUserTimers(); + // put the thread into the kernel team until it dies remove_thread_from_team(team, thread); insert_thread_into_team(kernelTeam, thread);