1
0
Fork 0
mirror of https://github.com/Atmosphere-NX/Atmosphere.git synced 2024-12-22 10:22:08 +00:00
Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_scheduler_lock.hpp

96 lines
3.8 KiB
C++
Raw Normal View History

2020-02-01 00:25:17 +00:00
/*
* Copyright (c) Atmosphère-NX
2020-02-01 00:25:17 +00:00
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_spin_lock.hpp>
#include <mesosphere/kern_k_current_context.hpp>
#include <mesosphere/kern_k_scoped_lock.hpp>
2020-02-01 00:25:17 +00:00
namespace ams::kern {
class KThread;
template<typename T>
concept KSchedulerLockable = !std::is_reference<T>::value && requires(T) {
{ T::DisableScheduling() } -> std::same_as<void>;
{ T::EnableScheduling(std::declval<u64>()) } -> std::same_as<void>;
{ T::UpdateHighestPriorityThreads() } -> std::convertible_to<u64>;
};
2020-02-01 00:25:17 +00:00
template<typename SchedulerType> requires KSchedulerLockable<SchedulerType>
2020-02-01 00:25:17 +00:00
class KAbstractSchedulerLock {
private:
KAlignedSpinLock m_spin_lock;
s32 m_lock_count;
KThread *m_owner_thread;
2020-02-01 00:25:17 +00:00
public:
constexpr ALWAYS_INLINE KAbstractSchedulerLock() : m_spin_lock(), m_lock_count(0), m_owner_thread(nullptr) { MESOSPHERE_ASSERT_THIS(); }
2020-02-01 00:25:17 +00:00
ALWAYS_INLINE bool IsLockedByCurrentThread() const {
MESOSPHERE_ASSERT_THIS();
return m_owner_thread == GetCurrentThreadPointer();
2020-02-01 00:25:17 +00:00
}
MESOSPHERE_ALWAYS_INLINE_IF_RELEASE void Lock() {
2020-02-01 00:25:17 +00:00
MESOSPHERE_ASSERT_THIS();
if (this->IsLockedByCurrentThread()) {
2023-02-21 15:08:08 +00:00
/* If we already own the lock, the lock count should be > 0. */
/* For debug, ensure this is true. */
MESOSPHERE_ASSERT(m_lock_count > 0);
2020-02-01 00:25:17 +00:00
} else {
/* Otherwise, we want to disable scheduling and acquire the spinlock. */
SchedulerType::DisableScheduling();
m_spin_lock.Lock();
2020-02-01 00:25:17 +00:00
/* For debug, ensure that our state is valid. */
MESOSPHERE_ASSERT(m_lock_count == 0);
MESOSPHERE_ASSERT(m_owner_thread == nullptr);
2020-02-01 00:25:17 +00:00
2023-02-21 15:08:08 +00:00
/* Take ownership of the lock. */
m_owner_thread = GetCurrentThreadPointer();
2020-02-01 00:25:17 +00:00
}
2023-02-21 15:08:08 +00:00
/* Increment the lock count. */
m_lock_count++;
2020-02-01 00:25:17 +00:00
}
MESOSPHERE_ALWAYS_INLINE_IF_RELEASE void Unlock() {
2020-02-01 00:25:17 +00:00
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
MESOSPHERE_ASSERT(m_lock_count > 0);
2020-02-01 00:25:17 +00:00
/* Release an instance of the lock. */
if ((--m_lock_count) == 0) {
2021-10-26 01:34:47 +01:00
/* Perform a memory barrier here. */
cpu::DataMemoryBarrierInnerShareable();
2020-02-01 00:25:17 +00:00
/* We're no longer going to hold the lock. Take note of what cores need scheduling. */
const u64 cores_needing_scheduling = SchedulerType::UpdateHighestPriorityThreads();
/* Note that we no longer hold the lock, and unlock the spinlock. */
m_owner_thread = nullptr;
m_spin_lock.Unlock();
2020-02-01 00:25:17 +00:00
/* Enable scheduling, and perform a rescheduling operation. */
SchedulerType::EnableScheduling(cores_needing_scheduling);
2020-02-01 00:25:17 +00:00
}
}
};
}