1
0
Fork 0
mirror of https://github.com/Atmosphere-NX/Atmosphere.git synced 2025-01-10 03:16:29 +00:00

Use scoped_lock, etc

This commit is contained in:
TuxSH 2018-11-05 14:12:38 +01:00 committed by Michael Scire
parent 698fa9fcb0
commit cd1f74154d
7 changed files with 21 additions and 21 deletions

View file

@ -51,13 +51,13 @@ class KObjectAllocator {
void RegisterObject(T &obj) noexcept void RegisterObject(T &obj) noexcept
{ {
std::lock_guard guard{mutex}; std::scoped_lock guard{mutex};
allocatedSet.insert(obj); allocatedSet.insert(obj);
} }
void UnregisterObject(T &obj) noexcept void UnregisterObject(T &obj) noexcept
{ {
std::lock_guard guard{mutex}; std::scoped_lock guard{mutex};
allocatedSet.erase(obj); allocatedSet.erase(obj);
} }

View file

@ -8,7 +8,7 @@ namespace mesosphere
void KAlarm::AddAlarmable(IAlarmable &alarmable) void KAlarm::AddAlarmable(IAlarmable &alarmable)
{ {
std::lock_guard guard{spinlock}; std::scoped_lock guard{spinlock};
alarmables.insert(alarmable); alarmables.insert(alarmable);
KSystemClock::SetAlarm(alarmables.cbegin()->GetAlarmTime()); KSystemClock::SetAlarm(alarmables.cbegin()->GetAlarmTime());
@ -16,7 +16,7 @@ void KAlarm::AddAlarmable(IAlarmable &alarmable)
void KAlarm::RemoveAlarmable(const IAlarmable &alarmable) void KAlarm::RemoveAlarmable(const IAlarmable &alarmable)
{ {
std::lock_guard guard{spinlock}; std::scoped_lock guard{spinlock};
alarmables.erase(alarmable); alarmables.erase(alarmable);
KSystemClock::SetAlarm(alarmables.cbegin()->GetAlarmTime()); KSystemClock::SetAlarm(alarmables.cbegin()->GetAlarmTime());
@ -26,8 +26,8 @@ void KAlarm::HandleAlarm()
{ {
{ {
KCriticalSection &critsec = KScheduler::GetCriticalSection(); KCriticalSection &critsec = KScheduler::GetCriticalSection();
std::lock_guard criticalSection{critsec}; std::scoped_lock criticalSection{critsec};
std::lock_guard guard{spinlock}; std::scoped_lock guard{spinlock};
KSystemClock::SetInterruptMasked(true); // mask timer interrupt KSystemClock::SetInterruptMasked(true); // mask timer interrupt
KSystemClock::time_point currentTime = KSystemClock::now(), maxAlarmTime; KSystemClock::time_point currentTime = KSystemClock::now(), maxAlarmTime;

View file

@ -8,27 +8,27 @@ KResourceLimit KResourceLimit::defaultInstance{};
size_t KResourceLimit::GetCurrentValue(KResourceLimit::Category category) const size_t KResourceLimit::GetCurrentValue(KResourceLimit::Category category) const
{ {
// Caller should check category // Caller should check category
std::lock_guard guard{condvar.mutex()}; std::scoped_lock guard{condvar.mutex()};
return currentValues[(uint)category]; return currentValues[(uint)category];
} }
size_t KResourceLimit::GetLimitValue(KResourceLimit::Category category) const size_t KResourceLimit::GetLimitValue(KResourceLimit::Category category) const
{ {
// Caller should check category // Caller should check category
std::lock_guard guard{condvar.mutex()}; std::scoped_lock guard{condvar.mutex()};
return limitValues[(uint)category]; return limitValues[(uint)category];
} }
size_t KResourceLimit::GetRemainingValue(KResourceLimit::Category category) const size_t KResourceLimit::GetRemainingValue(KResourceLimit::Category category) const
{ {
// Caller should check category // Caller should check category
std::lock_guard guard{condvar.mutex()}; std::scoped_lock guard{condvar.mutex()};
return limitValues[(uint)category] - currentValues[(uint)category]; return limitValues[(uint)category] - currentValues[(uint)category];
} }
bool KResourceLimit::SetLimitValue(KResourceLimit::Category category, size_t value) bool KResourceLimit::SetLimitValue(KResourceLimit::Category category, size_t value)
{ {
std::lock_guard guard{condvar.mutex()}; std::scoped_lock guard{condvar.mutex()};
if ((long)value < 0 || currentValues[(uint)category] > value) { if ((long)value < 0 || currentValues[(uint)category] > value) {
return false; return false;
} else { } else {
@ -40,7 +40,7 @@ bool KResourceLimit::SetLimitValue(KResourceLimit::Category category, size_t val
void KResourceLimit::Release(KResourceLimit::Category category, size_t count, size_t realCount) void KResourceLimit::Release(KResourceLimit::Category category, size_t count, size_t realCount)
{ {
// Caller should ensure parameters are correct // Caller should ensure parameters are correct
std::lock_guard guard{condvar.mutex()}; std::scoped_lock guard{condvar.mutex()};
currentValues[(uint)category] -= count; currentValues[(uint)category] -= count;
realValues[(uint)category] -= realCount; realValues[(uint)category] -= realCount;
condvar.notify_all(); condvar.notify_all();
@ -48,7 +48,7 @@ void KResourceLimit::Release(KResourceLimit::Category category, size_t count, si
bool KResourceLimit::ReserveDetail(KResourceLimit::Category category, size_t count, const KSystemClock::time_point &timeoutTime) bool KResourceLimit::ReserveDetail(KResourceLimit::Category category, size_t count, const KSystemClock::time_point &timeoutTime)
{ {
std::lock_guard guard{condvar.mutex()}; std::scoped_lock guard{condvar.mutex()};
if ((long)count <= 0 || realValues[(uint)category] >= limitValues[(uint)category]) { if ((long)count <= 0 || realValues[(uint)category] >= limitValues[(uint)category]) {
return false; return false;
} }

View file

@ -20,7 +20,7 @@ SharedPtr<KAutoObject> KHandleTable::GetAutoObject(Handle handle) const
// Note: official kernel locks the spinlock here, but we don't need to. // Note: official kernel locks the spinlock here, but we don't need to.
return nullptr; return nullptr;
} else { } else {
std::lock_guard guard{spinlock}; std::scoped_lock guard{spinlock};
return IsValid(handle) ? entries[handle.index].object : nullptr; return IsValid(handle) ? entries[handle.index].object : nullptr;
} }
} }
@ -50,7 +50,7 @@ bool KHandleTable::Close(Handle handle)
if (handle.IsAliasOrFree()) { if (handle.IsAliasOrFree()) {
return false; return false;
} else { } else {
std::lock_guard guard{spinlock}; std::scoped_lock guard{spinlock};
if (IsValid(handle)) { if (IsValid(handle)) {
entries[-firstFreeIndex].id = firstFreeIndex; entries[-firstFreeIndex].id = firstFreeIndex;
firstFreeIndex = -(s16)handle.index; firstFreeIndex = -(s16)handle.index;
@ -67,7 +67,7 @@ bool KHandleTable::Generate(Handle &out, SharedPtr<KAutoObject> obj)
{ {
// Note: nullptr is accepted, for deferred-init. // Note: nullptr is accepted, for deferred-init.
std::lock_guard guard{spinlock}; std::scoped_lock guard{spinlock};
if (numActive >= capacity) { if (numActive >= capacity) {
return false; // caller should return 0xD201 return false; // caller should return 0xD201
} }
@ -93,7 +93,7 @@ bool KHandleTable::Generate(Handle &out, SharedPtr<KAutoObject> obj)
bool KHandleTable::Set(SharedPtr<KAutoObject> obj, Handle handle) bool KHandleTable::Set(SharedPtr<KAutoObject> obj, Handle handle)
{ {
if (!handle.IsAliasOrFree() && IsValid(handle)) { if (!handle.IsAliasOrFree() && IsValid(handle)) {
std::lock_guard guard{spinlock}; std::scoped_lock guard{spinlock};
entries[handle.index].object = std::move(obj); entries[handle.index].object = std::move(obj);
return true; return true;
} else { } else {

View file

@ -10,7 +10,7 @@ void KConditionVariable::wait_until_impl(const KSystemClock::time_point &timeout
// Official kernel counts number of waiters, but that isn't necessary // Official kernel counts number of waiters, but that isn't necessary
{ {
KThread *currentThread = KCoreContext::GetCurrentInstance().GetCurrentThread(); KThread *currentThread = KCoreContext::GetCurrentInstance().GetCurrentThread();
std::lock_guard guard{KScheduler::GetCriticalSection()}; KScopedCriticalSection criticalSection{};
mutex_.unlock(); mutex_.unlock();
if (currentThread->WaitForKernelSync(waiterList)) { if (currentThread->WaitForKernelSync(waiterList)) {
(void)timeoutPoint; //TODO! (void)timeoutPoint; //TODO!
@ -23,7 +23,7 @@ void KConditionVariable::wait_until_impl(const KSystemClock::time_point &timeout
void KConditionVariable::notify_one() noexcept void KConditionVariable::notify_one() noexcept
{ {
std::lock_guard guard{KScheduler::GetCriticalSection()}; KScopedCriticalSection criticalSection{};
auto t = waiterList.begin(); auto t = waiterList.begin();
if (t != waiterList.end()) { if (t != waiterList.end()) {
t->ResumeFromKernelSync(); t->ResumeFromKernelSync();
@ -32,7 +32,7 @@ void KConditionVariable::notify_one() noexcept
void KConditionVariable::notify_all() noexcept void KConditionVariable::notify_all() noexcept
{ {
std::lock_guard guard{KScheduler::GetCriticalSection()}; KScopedCriticalSection criticalSection{};
KThread::ResumeAllFromKernelSync(waiterList); KThread::ResumeAllFromKernelSync(waiterList);
} }

View file

@ -9,7 +9,7 @@ void KMutex::lock_slow_path(KThread &owner, KThread &requester)
{ {
// Requester is currentThread most of (all ?) the time // Requester is currentThread most of (all ?) the time
KCriticalSection &critsec = KScheduler::GetCriticalSection(); KCriticalSection &critsec = KScheduler::GetCriticalSection();
std::lock_guard criticalSection{critsec}; std::scoped_lock criticalSection{critsec};
if (KCoreContext::GetCurrentInstance().GetScheduler()->IsActive()) { if (KCoreContext::GetCurrentInstance().GetScheduler()->IsActive()) {
requester.SetWantedMutex((uiptr)this); requester.SetWantedMutex((uiptr)this);
owner.AddMutexWaiter(requester); owner.AddMutexWaiter(requester);

View file

@ -32,7 +32,7 @@ void KThread::AdjustScheduling(ushort oldMaskFull)
void KThread::Reschedule(KThread::SchedulingStatus newStatus) void KThread::Reschedule(KThread::SchedulingStatus newStatus)
{ {
//std::lock_guard criticalSection{KScheduler::GetCriticalSection()}; //KScopedCriticalSection criticalSection{};
// TODO check the above ^ // TODO check the above ^
AdjustScheduling(SetSchedulingStatusField(newStatus)); AdjustScheduling(SetSchedulingStatusField(newStatus));
} }