1
0
Fork 0
mirror of https://github.com/Atmosphere-NX/Atmosphere.git synced 2024-11-17 17:36:44 +00:00

kern: optimize KHandleTable to use indices instead of pointers

This commit is contained in:
Michael Scire 2021-04-07 14:07:10 -07:00 committed by SciresM
parent 4407237f5b
commit b4498734e4
2 changed files with 83 additions and 106 deletions

View file

@ -53,46 +53,29 @@ namespace ams::kern {
return pack.Get<HandleEncoded>();
}
class Entry {
private:
union {
union EntryInfo {
struct {
u16 linear_id;
u16 type;
} info;
Entry *next_free_entry;
} m_meta;
KAutoObject *m_object;
public:
constexpr Entry() : m_meta(), m_object(nullptr) { /* ... */ }
s32 next_free_index;
constexpr ALWAYS_INLINE void SetFree(Entry *next) {
m_object = nullptr;
m_meta.next_free_entry = next;
}
constexpr ALWAYS_INLINE void SetUsed(KAutoObject *obj, u16 linear_id, u16 type) {
m_object = obj;
m_meta.info = { linear_id, type };
}
constexpr ALWAYS_INLINE KAutoObject *GetObject() const { return m_object; }
constexpr ALWAYS_INLINE Entry *GetNextFreeEntry() const { return m_meta.next_free_entry; }
constexpr ALWAYS_INLINE u16 GetLinearId() const { return m_meta.info.linear_id; }
constexpr ALWAYS_INLINE u16 GetType() const { return m_meta.info.type; }
constexpr ALWAYS_INLINE u16 GetLinearId() const { return info.linear_id; }
constexpr ALWAYS_INLINE u16 GetType() const { return info.type; }
constexpr ALWAYS_INLINE s32 GetNextFreeIndex() const { return next_free_index; }
};
private:
mutable KSpinLock m_lock;
Entry *m_table;
Entry *m_free_head;
Entry m_entries[MaxTableSize];
EntryInfo m_entry_infos[MaxTableSize];
KAutoObject *m_objects[MaxTableSize];
s32 m_free_head_index;
u16 m_table_size;
u16 m_max_count;
u16 m_next_linear_id;
u16 m_count;
mutable KSpinLock m_lock;
public:
constexpr KHandleTable() :
m_lock(), m_table(nullptr), m_free_head(nullptr), m_entries(), m_table_size(0), m_max_count(0), m_next_linear_id(MinLinearId), m_count(0)
m_entry_infos(), m_objects(), m_free_head_index(-1), m_table_size(0), m_max_count(0), m_next_linear_id(MinLinearId), m_count(0), m_lock()
{ MESOSPHERE_ASSERT_THIS(); }
constexpr NOINLINE Result Initialize(s32 size) {
@ -101,19 +84,18 @@ namespace ams::kern {
R_UNLESS(size <= static_cast<s32>(MaxTableSize), svc::ResultOutOfMemory());
/* Initialize all fields. */
m_table = m_entries;
m_max_count = 0;
m_table_size = (size <= 0) ? MaxTableSize : size;
m_next_linear_id = MinLinearId;
m_count = 0;
m_max_count = 0;
m_free_head_index = -1;
/* Free all entries. */
for (size_t i = 0; i < static_cast<size_t>(m_table_size - 1); i++) {
m_entries[i].SetFree(std::addressof(m_entries[i + 1]));
for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) {
m_objects[i] = nullptr;
m_entry_infos[i].next_free_index = i - 1;
m_free_head_index = i;
}
m_entries[m_table_size - 1].SetFree(nullptr);
m_free_head = std::addressof(m_entries[0]);
return ResultSuccess();
}
@ -134,7 +116,7 @@ namespace ams::kern {
if constexpr (std::is_same<T, KAutoObject>::value) {
return this->GetObjectImpl(handle);
} else {
if (auto *obj = this->GetObjectImpl(handle); obj != nullptr) {
if (auto *obj = this->GetObjectImpl(handle); AMS_LIKELY(obj != nullptr)) {
return obj->DynamicCast<T*>();
} else {
return nullptr;
@ -256,27 +238,29 @@ namespace ams::kern {
NOINLINE Result Add(ams::svc::Handle *out_handle, KAutoObject *obj, u16 type);
NOINLINE void Register(ams::svc::Handle handle, KAutoObject *obj, u16 type);
constexpr ALWAYS_INLINE Entry *AllocateEntry() {
constexpr ALWAYS_INLINE s32 AllocateEntry() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(m_count < m_table_size);
Entry *entry = m_free_head;
m_free_head = entry->GetNextFreeEntry();
const auto index = m_free_head_index;
m_count++;
m_max_count = std::max(m_max_count, m_count);
m_free_head_index = m_entry_infos[index].GetNextFreeIndex();
return entry;
m_max_count = std::max(m_max_count, ++m_count);
return index;
}
constexpr ALWAYS_INLINE void FreeEntry(Entry *entry) {
constexpr ALWAYS_INLINE void FreeEntry(s32 index) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(m_count > 0);
entry->SetFree(m_free_head);
m_free_head = entry;
m_objects[index] = nullptr;
m_entry_infos[index].next_free_index = m_free_head_index;
m_count--;
m_free_head_index = index;
--m_count;
}
constexpr ALWAYS_INLINE u16 AllocateLinearId() {
@ -287,13 +271,7 @@ namespace ams::kern {
return id;
}
constexpr ALWAYS_INLINE size_t GetEntryIndex(Entry *entry) {
const size_t index = entry - m_table;
MESOSPHERE_ASSERT(index < m_table_size);
return index;
}
constexpr ALWAYS_INLINE Entry *FindEntry(ams::svc::Handle handle) const {
constexpr ALWAYS_INLINE bool IsValidHandle(ams::svc::Handle handle) const {
MESOSPHERE_ASSERT_THIS();
/* Unpack the handle. */
@ -306,38 +284,38 @@ namespace ams::kern {
MESOSPHERE_UNUSED(reserved);
/* Validate our indexing information. */
if (raw_value == 0) {
return nullptr;
if (AMS_UNLIKELY(raw_value == 0)) {
return false;
}
if (linear_id == 0) {
return nullptr;
if (AMS_UNLIKELY(linear_id == 0)) {
return false;
}
if (index >= m_table_size) {
return nullptr;
if (AMS_UNLIKELY(index >= m_table_size)) {
return false;
}
/* Get the entry, and ensure our serial id is correct. */
Entry *entry = std::addressof(m_table[index]);
if (entry->GetObject() == nullptr) {
return nullptr;
/* Check that there's an object, and our serial id is correct. */
if (AMS_UNLIKELY(m_objects[index] == nullptr)) {
return false;
}
if (entry->GetLinearId() != linear_id) {
return nullptr;
if (AMS_UNLIKELY(m_entry_infos[index].GetLinearId() != linear_id)) {
return false;
}
return entry;
return true;
}
constexpr ALWAYS_INLINE KAutoObject *GetObjectImpl(ams::svc::Handle handle) const {
MESOSPHERE_ASSERT_THIS();
/* Handles must not have reserved bits set. */
if (GetHandleBitPack(handle).Get<HandleReserved>() != 0) {
const auto handle_pack = GetHandleBitPack(handle);
if (AMS_UNLIKELY(handle_pack.Get<HandleReserved>() != 0)) {
return nullptr;
}
if (Entry *entry = this->FindEntry(handle); entry != nullptr) {
return entry->GetObject();
if (AMS_LIKELY(this->IsValidHandle(handle))) {
return m_objects[handle_pack.Get<HandleIndex>()];
} else {
return nullptr;
}
@ -347,18 +325,17 @@ namespace ams::kern {
MESOSPHERE_ASSERT_THIS();
/* Index must be in bounds. */
if (index >= m_table_size || m_table == nullptr) {
if (AMS_UNLIKELY(index >= m_table_size)) {
return nullptr;
}
/* Ensure entry has an object. */
Entry *entry = std::addressof(m_table[index]);
if (entry->GetObject() == nullptr) {
if (KAutoObject *obj = m_objects[index]; obj != nullptr) {
*out_handle = EncodeHandle(index, m_entry_infos[index].GetLinearId());
return obj;
} else {
return nullptr;
}
*out_handle = EncodeHandle(index, entry->GetLinearId());
return entry->GetObject();
}
};

View file

@ -21,23 +21,18 @@ namespace ams::kern {
MESOSPHERE_ASSERT_THIS();
/* Get the table and clear our record of it. */
Entry *saved_table = nullptr;
u16 saved_table_size = 0;
{
KScopedDisableDispatch dd;
KScopedSpinLock lk(m_lock);
std::swap(m_table, saved_table);
std::swap(m_table_size, saved_table_size);
}
/* Close and free all entries. */
for (size_t i = 0; i < saved_table_size; i++) {
Entry *entry = std::addressof(saved_table[i]);
if (KAutoObject *obj = entry->GetObject(); obj != nullptr) {
if (KAutoObject *obj = m_objects[i]; obj != nullptr) {
obj->Close();
this->FreeEntry(entry);
}
}
@ -48,12 +43,13 @@ namespace ams::kern {
MESOSPHERE_ASSERT_THIS();
/* Don't allow removal of a pseudo-handle. */
if (ams::svc::IsPseudoHandle(handle)) {
if (AMS_UNLIKELY(ams::svc::IsPseudoHandle(handle))) {
return false;
}
/* Handles must not have reserved bits set. */
if (GetHandleBitPack(handle).Get<HandleReserved>() != 0) {
const auto handle_pack = GetHandleBitPack(handle);
if (AMS_UNLIKELY(handle_pack.Get<HandleReserved>() != 0)) {
return false;
}
@ -63,9 +59,11 @@ namespace ams::kern {
KScopedDisableDispatch dd;
KScopedSpinLock lk(m_lock);
if (Entry *entry = this->FindEntry(handle); entry != nullptr) {
obj = entry->GetObject();
this->FreeEntry(entry);
if (AMS_LIKELY(this->IsValidHandle(handle))) {
const auto index = handle_pack.Get<HandleIndex>();
obj = m_objects[index];
this->FreeEntry(index);
} else {
return false;
}
@ -87,10 +85,14 @@ namespace ams::kern {
/* Allocate entry, set output handle. */
{
const auto linear_id = this->AllocateLinearId();
Entry *entry = this->AllocateEntry();
entry->SetUsed(obj, linear_id, type);
const auto index = this->AllocateEntry();
m_entry_infos[index].info = { .linear_id = linear_id, .type = type };
m_objects[index] = obj;
obj->Open();
*out_handle = EncodeHandle(this->GetEntryIndex(entry), linear_id);
*out_handle = EncodeHandle(index, linear_id);
}
return ResultSuccess();
@ -104,7 +106,7 @@ namespace ams::kern {
/* Never exceed our capacity. */
R_UNLESS(m_count < m_table_size, svc::ResultOutOfHandles());
*out_handle = EncodeHandle(this->GetEntryIndex(this->AllocateEntry()), this->AllocateLinearId());
*out_handle = EncodeHandle(this->AllocateEntry(), this->AllocateLinearId());
return ResultSuccess();
}
@ -122,13 +124,10 @@ namespace ams::kern {
MESOSPHERE_ASSERT(linear_id != 0);
MESOSPHERE_UNUSED(linear_id, reserved);
if (index < m_table_size) {
/* Free the entry. */
if (AMS_LIKELY(index < m_table_size)) {
/* NOTE: This code does not check the linear id. */
Entry *entry = std::addressof(m_table[index]);
MESOSPHERE_ASSERT(entry->GetObject() == nullptr);
this->FreeEntry(entry);
MESOSPHERE_ASSERT(m_objects[index] == nullptr);
this->FreeEntry(index);
}
}
@ -146,12 +145,13 @@ namespace ams::kern {
MESOSPHERE_ASSERT(linear_id != 0);
MESOSPHERE_UNUSED(reserved);
if (index < m_table_size) {
if (AMS_LIKELY(index < m_table_size)) {
/* Set the entry. */
Entry *entry = std::addressof(m_table[index]);
MESOSPHERE_ASSERT(entry->GetObject() == nullptr);
MESOSPHERE_ASSERT(m_objects[index] == nullptr);
m_entry_infos[index].info = { .linear_id = linear_id, .type = type };
m_objects[index] = obj;
entry->SetUsed(obj, linear_id, type);
obj->Open();
}
}