mirror of
https://github.com/yuzu-emu/yuzu.git
synced 2024-07-04 23:31:19 +01:00
NVDRV: Fix Open/Close and make sure each device is correctly created.
This commit is contained in:
parent
de0e8eff42
commit
af35dbcf63
14 changed files with 296 additions and 204 deletions
|
@ -3,9 +3,11 @@
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||||
// or any later version Refer to the license.txt file included.
|
// or any later version Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include <bit>
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
|
||||||
|
#include <fmt/format.h>
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "common/scope_exit.h"
|
#include "common/scope_exit.h"
|
||||||
|
@ -22,8 +24,19 @@ namespace Service::Nvidia::Devices {
|
||||||
nvhost_ctrl::nvhost_ctrl(Core::System& system_, EventInterface& events_interface_,
|
nvhost_ctrl::nvhost_ctrl(Core::System& system_, EventInterface& events_interface_,
|
||||||
NvCore::Container& core_)
|
NvCore::Container& core_)
|
||||||
: nvdevice{system_}, events_interface{events_interface_}, core{core_},
|
: nvdevice{system_}, events_interface{events_interface_}, core{core_},
|
||||||
syncpoint_manager{core_.GetSyncpointManager()} {}
|
syncpoint_manager{core_.GetSyncpointManager()} {
|
||||||
nvhost_ctrl::~nvhost_ctrl() = default;
|
events_interface.RegisterForSignal(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
nvhost_ctrl::~nvhost_ctrl() {
|
||||||
|
events_interface.UnregisterForSignal(this);
|
||||||
|
for (auto& event : events) {
|
||||||
|
if (!event.registered) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
events_interface.FreeEvent(event.kevent);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||||
std::vector<u8>& output) {
|
std::vector<u8>& output) {
|
||||||
|
@ -87,7 +100,7 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT({
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||||
if (must_unmark_fail) {
|
if (must_unmark_fail) {
|
||||||
events_interface.fails[event_id] = 0;
|
events[event_id].fails = 0;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -116,12 +129,12 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector
|
||||||
auto& gpu = system.GPU();
|
auto& gpu = system.GPU();
|
||||||
const u32 target_value = params.fence.value;
|
const u32 target_value = params.fence.value;
|
||||||
|
|
||||||
auto lock = events_interface.Lock();
|
auto lock = NvEventsLock();
|
||||||
|
|
||||||
u32 slot = [&]() {
|
u32 slot = [&]() {
|
||||||
if (is_allocation) {
|
if (is_allocation) {
|
||||||
params.value.raw = 0;
|
params.value.raw = 0;
|
||||||
return events_interface.FindFreeEvent(fence_id);
|
return FindFreeNvEvent(fence_id);
|
||||||
} else {
|
} else {
|
||||||
return params.value.raw;
|
return params.value.raw;
|
||||||
}
|
}
|
||||||
|
@ -130,7 +143,7 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector
|
||||||
must_unmark_fail = true;
|
must_unmark_fail = true;
|
||||||
|
|
||||||
const auto check_failing = [&]() {
|
const auto check_failing = [&]() {
|
||||||
if (events_interface.fails[slot] > 2) {
|
if (events[slot].fails > 2) {
|
||||||
{
|
{
|
||||||
auto lk = system.StallProcesses();
|
auto lk = system.StallProcesses();
|
||||||
gpu.WaitFence(fence_id, target_value);
|
gpu.WaitFence(fence_id, target_value);
|
||||||
|
@ -142,6 +155,10 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector
|
||||||
return false;
|
return false;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if (slot >= MaxNvEvents) {
|
||||||
|
return NvResult::BadParameter;
|
||||||
|
}
|
||||||
|
|
||||||
if (params.timeout == 0) {
|
if (params.timeout == 0) {
|
||||||
if (check_failing()) {
|
if (check_failing()) {
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
|
@ -149,17 +166,13 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector
|
||||||
return NvResult::Timeout;
|
return NvResult::Timeout;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (slot >= MaxNvEvents) {
|
auto& event = events[slot];
|
||||||
|
|
||||||
|
if (!event.registered) {
|
||||||
return NvResult::BadParameter;
|
return NvResult::BadParameter;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto* event = events_interface.events[slot];
|
if (event.IsBeingUsed()) {
|
||||||
|
|
||||||
if (!event) {
|
|
||||||
return NvResult::BadParameter;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (events_interface.IsBeingUsed(slot)) {
|
|
||||||
return NvResult::BadParameter;
|
return NvResult::BadParameter;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -169,9 +182,9 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector
|
||||||
|
|
||||||
params.value.raw = 0;
|
params.value.raw = 0;
|
||||||
|
|
||||||
events_interface.status[slot].store(EventState::Waiting, std::memory_order_release);
|
event.status.store(EventState::Waiting, std::memory_order_release);
|
||||||
events_interface.assigned_syncpt[slot] = fence_id;
|
event.assigned_syncpt = fence_id;
|
||||||
events_interface.assigned_value[slot] = target_value;
|
event.assigned_value = target_value;
|
||||||
if (is_allocation) {
|
if (is_allocation) {
|
||||||
params.value.syncpoint_id_for_allocation.Assign(static_cast<u16>(fence_id));
|
params.value.syncpoint_id_for_allocation.Assign(static_cast<u16>(fence_id));
|
||||||
params.value.event_allocated.Assign(1);
|
params.value.event_allocated.Assign(1);
|
||||||
|
@ -189,15 +202,17 @@ NvResult nvhost_ctrl::FreeEvent(u32 slot) {
|
||||||
return NvResult::BadParameter;
|
return NvResult::BadParameter;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!events_interface.registered[slot]) {
|
auto& event = events[slot];
|
||||||
|
|
||||||
|
if (!event.registered) {
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (events_interface.IsBeingUsed(slot)) {
|
if (event.IsBeingUsed()) {
|
||||||
return NvResult::Busy;
|
return NvResult::Busy;
|
||||||
}
|
}
|
||||||
|
|
||||||
events_interface.Free(slot);
|
FreeNvEvent(slot);
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -210,15 +225,15 @@ NvResult nvhost_ctrl::IocCtrlEventRegister(const std::vector<u8>& input, std::ve
|
||||||
return NvResult::BadParameter;
|
return NvResult::BadParameter;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto lock = events_interface.Lock();
|
auto lock = NvEventsLock();
|
||||||
|
|
||||||
if (events_interface.registered[event_id]) {
|
if (events[event_id].registered) {
|
||||||
const auto result = FreeEvent(event_id);
|
const auto result = FreeEvent(event_id);
|
||||||
if (result != NvResult::Success) {
|
if (result != NvResult::Success) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
events_interface.Create(event_id);
|
CreateNvEvent(event_id);
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -229,7 +244,7 @@ NvResult nvhost_ctrl::IocCtrlEventUnregister(const std::vector<u8>& input,
|
||||||
const u32 event_id = params.user_event_id & 0x00FF;
|
const u32 event_id = params.user_event_id & 0x00FF;
|
||||||
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
|
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
|
||||||
|
|
||||||
auto lock = events_interface.Lock();
|
auto lock = NvEventsLock();
|
||||||
return FreeEvent(event_id);
|
return FreeEvent(event_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -244,44 +259,121 @@ NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector<u8>& input, std::v
|
||||||
return NvResult::BadParameter;
|
return NvResult::BadParameter;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto lock = events_interface.Lock();
|
auto lock = NvEventsLock();
|
||||||
|
|
||||||
if (events_interface.status[event_id].exchange(
|
auto& event = events[event_id];
|
||||||
EventState::Cancelling, std::memory_order_acq_rel) == EventState::Waiting) {
|
if (event.status.exchange(EventState::Cancelling, std::memory_order_acq_rel) ==
|
||||||
system.GPU().CancelSyncptInterrupt(events_interface.assigned_syncpt[event_id],
|
EventState::Waiting) {
|
||||||
events_interface.assigned_value[event_id]);
|
system.GPU().CancelSyncptInterrupt(event.assigned_syncpt, event.assigned_value);
|
||||||
syncpoint_manager.RefreshSyncpoint(events_interface.assigned_syncpt[event_id]);
|
syncpoint_manager.RefreshSyncpoint(event.assigned_syncpt);
|
||||||
}
|
}
|
||||||
events_interface.fails[event_id]++;
|
event.fails++;
|
||||||
events_interface.status[event_id].store(EventState::Cancelled, std::memory_order_release);
|
event.status.store(EventState::Cancelled, std::memory_order_release);
|
||||||
events_interface.events[event_id]->GetWritableEvent().Clear();
|
event.kevent->GetWritableEvent().Clear();
|
||||||
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
Kernel::KEvent* nvhost_ctrl::QueryEvent(u32 event_id) {
|
Kernel::KEvent* nvhost_ctrl::QueryEvent(u32 event_id) {
|
||||||
const auto event = SyncpointEventValue{.raw = event_id};
|
const auto desired_event = SyncpointEventValue{.raw = event_id};
|
||||||
|
|
||||||
const bool allocated = event.event_allocated.Value() != 0;
|
const bool allocated = desired_event.event_allocated.Value() != 0;
|
||||||
const u32 slot{allocated ? event.partial_slot.Value() : static_cast<u32>(event.slot)};
|
const u32 slot{allocated ? desired_event.partial_slot.Value()
|
||||||
|
: static_cast<u32>(desired_event.slot)};
|
||||||
if (slot >= MaxNvEvents) {
|
if (slot >= MaxNvEvents) {
|
||||||
ASSERT(false);
|
ASSERT(false);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
const u32 syncpoint_id{allocated ? event.syncpoint_id_for_allocation.Value()
|
const u32 syncpoint_id{allocated ? desired_event.syncpoint_id_for_allocation.Value()
|
||||||
: event.syncpoint_id.Value()};
|
: desired_event.syncpoint_id.Value()};
|
||||||
|
|
||||||
auto lock = events_interface.Lock();
|
auto lock = NvEventsLock();
|
||||||
|
|
||||||
if (events_interface.registered[slot] &&
|
auto& event = events[slot];
|
||||||
events_interface.assigned_syncpt[slot] == syncpoint_id) {
|
if (event.registered && event.assigned_syncpt == syncpoint_id) {
|
||||||
ASSERT(events_interface.events[slot]);
|
ASSERT(event.kevent);
|
||||||
return events_interface.events[slot];
|
return event.kevent;
|
||||||
}
|
}
|
||||||
// Is this possible in hardware?
|
// Is this possible in hardware?
|
||||||
ASSERT_MSG(false, "Slot:{}, SyncpointID:{}, requested", slot, syncpoint_id);
|
ASSERT_MSG(false, "Slot:{}, SyncpointID:{}, requested", slot, syncpoint_id);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::unique_lock<std::mutex> nvhost_ctrl::NvEventsLock() {
|
||||||
|
return std::unique_lock<std::mutex>(events_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvhost_ctrl::CreateNvEvent(u32 event_id) {
|
||||||
|
auto& event = events[event_id];
|
||||||
|
ASSERT(!event.kevent);
|
||||||
|
ASSERT(!event.registered);
|
||||||
|
ASSERT(!event.IsBeingUsed());
|
||||||
|
event.kevent = events_interface.CreateEvent(fmt::format("NVCTRL::NvEvent_{}", event_id));
|
||||||
|
event.status = EventState::Available;
|
||||||
|
event.registered = true;
|
||||||
|
const u64 mask = 1ULL << event_id;
|
||||||
|
event.fails = 0;
|
||||||
|
events_mask |= mask;
|
||||||
|
event.assigned_syncpt = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvhost_ctrl::FreeNvEvent(u32 event_id) {
|
||||||
|
auto& event = events[event_id];
|
||||||
|
ASSERT(event.kevent);
|
||||||
|
ASSERT(event.registered);
|
||||||
|
ASSERT(!event.IsBeingUsed());
|
||||||
|
events_interface.FreeEvent(event.kevent);
|
||||||
|
event.kevent = nullptr;
|
||||||
|
event.status = EventState::Available;
|
||||||
|
event.registered = false;
|
||||||
|
const u64 mask = ~(1ULL << event_id);
|
||||||
|
events_mask &= mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 nvhost_ctrl::FindFreeNvEvent(u32 syncpoint_id) {
|
||||||
|
u32 slot{MaxNvEvents};
|
||||||
|
u32 free_slot{MaxNvEvents};
|
||||||
|
for (u32 i = 0; i < MaxNvEvents; i++) {
|
||||||
|
auto& event = events[i];
|
||||||
|
if (event.registered) {
|
||||||
|
if (!event.IsBeingUsed()) {
|
||||||
|
slot = i;
|
||||||
|
if (event.assigned_syncpt == syncpoint_id) {
|
||||||
|
return slot;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (free_slot == MaxNvEvents) {
|
||||||
|
free_slot = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (free_slot < MaxNvEvents) {
|
||||||
|
CreateNvEvent(free_slot);
|
||||||
|
return free_slot;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (slot < MaxNvEvents) {
|
||||||
|
return slot;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_CRITICAL(Service_NVDRV, "Failed to allocate an event");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvhost_ctrl::SignalNvEvent(u32 syncpoint_id, u32 value) {
|
||||||
|
const u32 max = MaxNvEvents - std::countl_zero(events_mask);
|
||||||
|
const u32 min = std::countr_zero(events_mask);
|
||||||
|
for (u32 i = min; i < max; i++) {
|
||||||
|
auto& event = events[i];
|
||||||
|
if (event.assigned_syncpt != syncpoint_id || event.assigned_value != value) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (event.status.exchange(EventState::Signalling, std::memory_order_acq_rel) ==
|
||||||
|
EventState::Waiting) {
|
||||||
|
event.kevent->GetWritableEvent().Signal();
|
||||||
|
}
|
||||||
|
event.status.store(EventState::Signalled, std::memory_order_release);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -53,7 +53,49 @@ public:
|
||||||
};
|
};
|
||||||
static_assert(sizeof(SyncpointEventValue) == sizeof(u32));
|
static_assert(sizeof(SyncpointEventValue) == sizeof(u32));
|
||||||
|
|
||||||
|
void SignalNvEvent(u32 syncpoint_id, u32 value);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
struct InternalEvent {
|
||||||
|
// Mask representing registered events
|
||||||
|
|
||||||
|
// Each kernel event associated to an NV event
|
||||||
|
Kernel::KEvent* kevent{};
|
||||||
|
// The status of the current NVEvent
|
||||||
|
std::atomic<EventState> status{};
|
||||||
|
|
||||||
|
// Tells the NVEvent that it has failed.
|
||||||
|
u32 fails{};
|
||||||
|
// When an NVEvent is waiting on GPU interrupt, this is the sync_point
|
||||||
|
// associated with it.
|
||||||
|
u32 assigned_syncpt{};
|
||||||
|
// This is the value of the GPU interrupt for which the NVEvent is waiting
|
||||||
|
// for.
|
||||||
|
u32 assigned_value{};
|
||||||
|
|
||||||
|
// Tells if an NVEvent is registered or not
|
||||||
|
bool registered{};
|
||||||
|
|
||||||
|
bool IsBeingUsed() {
|
||||||
|
const auto current_status = status.load(std::memory_order_acquire);
|
||||||
|
return current_status == EventState::Waiting ||
|
||||||
|
current_status == EventState::Cancelling ||
|
||||||
|
current_status == EventState::Signalling;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
std::unique_lock<std::mutex> NvEventsLock();
|
||||||
|
|
||||||
|
void CreateNvEvent(u32 event_id);
|
||||||
|
|
||||||
|
void FreeNvEvent(u32 event_id);
|
||||||
|
|
||||||
|
u32 FindFreeNvEvent(u32 syncpoint_id);
|
||||||
|
|
||||||
|
std::array<InternalEvent, MaxNvEvents> events{};
|
||||||
|
std::mutex events_mutex;
|
||||||
|
u64 events_mask{};
|
||||||
|
|
||||||
struct IocSyncptReadParams {
|
struct IocSyncptReadParams {
|
||||||
u32_le id{};
|
u32_le id{};
|
||||||
u32_le value{};
|
u32_le value{};
|
||||||
|
|
|
@ -13,10 +13,13 @@ namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
nvhost_ctrl_gpu::nvhost_ctrl_gpu(Core::System& system_, EventInterface& events_interface_)
|
nvhost_ctrl_gpu::nvhost_ctrl_gpu(Core::System& system_, EventInterface& events_interface_)
|
||||||
: nvdevice{system_}, events_interface{events_interface_} {
|
: nvdevice{system_}, events_interface{events_interface_} {
|
||||||
error_notifier_event = events_interface.CreateNonCtrlEvent("CtrlGpuErrorNotifier");
|
error_notifier_event = events_interface.CreateEvent("CtrlGpuErrorNotifier");
|
||||||
unknown_event = events_interface.CreateNonCtrlEvent("CtrlGpuUknownEvent");
|
unknown_event = events_interface.CreateEvent("CtrlGpuUknownEvent");
|
||||||
|
}
|
||||||
|
nvhost_ctrl_gpu::~nvhost_ctrl_gpu() {
|
||||||
|
events_interface.FreeEvent(error_notifier_event);
|
||||||
|
events_interface.FreeEvent(unknown_event);
|
||||||
}
|
}
|
||||||
nvhost_ctrl_gpu::~nvhost_ctrl_gpu() = default;
|
|
||||||
|
|
||||||
NvResult nvhost_ctrl_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
NvResult nvhost_ctrl_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||||
std::vector<u8>& output) {
|
std::vector<u8>& output) {
|
||||||
|
|
|
@ -30,13 +30,17 @@ nvhost_gpu::nvhost_gpu(Core::System& system_, EventInterface& events_interface_,
|
||||||
channel_fence.id = syncpoint_manager.AllocateSyncpoint();
|
channel_fence.id = syncpoint_manager.AllocateSyncpoint();
|
||||||
channel_fence.value = system_.GPU().GetSyncpointValue(channel_fence.id);
|
channel_fence.value = system_.GPU().GetSyncpointValue(channel_fence.id);
|
||||||
sm_exception_breakpoint_int_report_event =
|
sm_exception_breakpoint_int_report_event =
|
||||||
events_interface.CreateNonCtrlEvent("GpuChannelSMExceptionBreakpointInt");
|
events_interface.CreateEvent("GpuChannelSMExceptionBreakpointInt");
|
||||||
sm_exception_breakpoint_pause_report_event =
|
sm_exception_breakpoint_pause_report_event =
|
||||||
events_interface.CreateNonCtrlEvent("GpuChannelSMExceptionBreakpointPause");
|
events_interface.CreateEvent("GpuChannelSMExceptionBreakpointPause");
|
||||||
error_notifier_event = events_interface.CreateNonCtrlEvent("GpuChannelErrorNotifier");
|
error_notifier_event = events_interface.CreateEvent("GpuChannelErrorNotifier");
|
||||||
}
|
}
|
||||||
|
|
||||||
nvhost_gpu::~nvhost_gpu() = default;
|
nvhost_gpu::~nvhost_gpu() {
|
||||||
|
events_interface.FreeEvent(sm_exception_breakpoint_int_report_event);
|
||||||
|
events_interface.FreeEvent(sm_exception_breakpoint_pause_report_event);
|
||||||
|
events_interface.FreeEvent(error_notifier_event);
|
||||||
|
}
|
||||||
|
|
||||||
NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||||
std::vector<u8>& output) {
|
std::vector<u8>& output) {
|
||||||
|
|
|
@ -10,6 +10,8 @@
|
||||||
|
|
||||||
namespace Service::Nvidia::Devices {
|
namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
|
u32 nvhost_nvdec::next_id{};
|
||||||
|
|
||||||
nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core)
|
nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core)
|
||||||
: nvhost_nvdec_common{system_, core} {}
|
: nvhost_nvdec_common{system_, core} {}
|
||||||
nvhost_nvdec::~nvhost_nvdec() = default;
|
nvhost_nvdec::~nvhost_nvdec() = default;
|
||||||
|
|
|
@ -24,7 +24,7 @@ public:
|
||||||
void OnClose(DeviceFD fd) override;
|
void OnClose(DeviceFD fd) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
u32 next_id{};
|
static u32 next_id;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -45,6 +45,8 @@ std::size_t WriteVectors(std::vector<u8>& dst, const std::vector<T>& src, std::s
|
||||||
}
|
}
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
|
std::unordered_map<DeviceFD, u32> nvhost_nvdec_common::fd_to_id{};
|
||||||
|
|
||||||
nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Container& core_)
|
nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Container& core_)
|
||||||
: nvdevice{system_}, core{core_},
|
: nvdevice{system_}, core{core_},
|
||||||
syncpoint_manager{core.GetSyncpointManager()}, nvmap{core.GetNvMapFile()} {}
|
syncpoint_manager{core.GetSyncpointManager()}, nvmap{core.GetNvMapFile()} {}
|
||||||
|
|
|
@ -115,7 +115,7 @@ protected:
|
||||||
|
|
||||||
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||||
|
|
||||||
std::unordered_map<DeviceFD, u32> fd_to_id{};
|
static std::unordered_map<DeviceFD, u32> fd_to_id;
|
||||||
s32_le nvmap_fd{};
|
s32_le nvmap_fd{};
|
||||||
u32_le submit_timeout{};
|
u32_le submit_timeout{};
|
||||||
NvCore::Container& core;
|
NvCore::Container& core;
|
||||||
|
|
|
@ -8,6 +8,9 @@
|
||||||
#include "video_core/renderer_base.h"
|
#include "video_core/renderer_base.h"
|
||||||
|
|
||||||
namespace Service::Nvidia::Devices {
|
namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
|
u32 nvhost_vic::next_id{};
|
||||||
|
|
||||||
nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core)
|
nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core)
|
||||||
: nvhost_nvdec_common{system_, core} {}
|
: nvhost_nvdec_common{system_, core} {}
|
||||||
|
|
||||||
|
|
|
@ -23,6 +23,6 @@ public:
|
||||||
void OnClose(DeviceFD fd) override;
|
void OnClose(DeviceFD fd) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
u32 next_id{};
|
static u32 next_id;
|
||||||
};
|
};
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -3,7 +3,6 @@
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||||
// or any later version Refer to the license.txt file included.
|
// or any later version Refer to the license.txt file included.
|
||||||
|
|
||||||
#include <bit>
|
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
#include <fmt/format.h>
|
#include <fmt/format.h>
|
||||||
|
@ -30,101 +29,39 @@
|
||||||
|
|
||||||
namespace Service::Nvidia {
|
namespace Service::Nvidia {
|
||||||
|
|
||||||
EventInterface::EventInterface(Module& module_) : module{module_} {
|
EventInterface::EventInterface(Module& module_) : module{module_} {}
|
||||||
events_mask = 0;
|
|
||||||
for (u32 i = 0; i < MaxNvEvents; i++) {
|
EventInterface::~EventInterface() = default;
|
||||||
status[i] = EventState::Available;
|
|
||||||
events[i] = nullptr;
|
void EventInterface::RegisterForSignal(Devices::nvhost_ctrl* device) {
|
||||||
registered[i] = false;
|
std::unique_lock<std::mutex> lk(guard);
|
||||||
|
on_signal.push_back(device);
|
||||||
|
}
|
||||||
|
|
||||||
|
void EventInterface::UnregisterForSignal(Devices::nvhost_ctrl* device) {
|
||||||
|
std::unique_lock<std::mutex> lk(guard);
|
||||||
|
auto it = std::find(on_signal.begin(), on_signal.end(), device);
|
||||||
|
if (it != on_signal.end()) {
|
||||||
|
on_signal.erase(it);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
EventInterface::~EventInterface() {
|
void EventInterface::Signal(u32 syncpoint_id, u32 value) {
|
||||||
auto lk = Lock();
|
std::unique_lock<std::mutex> lk(guard);
|
||||||
for (u32 i = 0; i < MaxNvEvents; i++) {
|
for (auto* device : on_signal) {
|
||||||
if (registered[i]) {
|
device->SignalNvEvent(syncpoint_id, value);
|
||||||
module.service_context.CloseEvent(events[i]);
|
|
||||||
events[i] = nullptr;
|
|
||||||
registered[i] = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (auto* event : basic_events) {
|
|
||||||
module.service_context.CloseEvent(event);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_lock<std::mutex> EventInterface::Lock() {
|
Kernel::KEvent* EventInterface::CreateEvent(std::string name) {
|
||||||
return std::unique_lock<std::mutex>(events_mutex);
|
|
||||||
}
|
|
||||||
|
|
||||||
void EventInterface::Signal(u32 event_id) {
|
|
||||||
if (status[event_id].exchange(EventState::Signalling, std::memory_order_acq_rel) ==
|
|
||||||
EventState::Waiting) {
|
|
||||||
events[event_id]->GetWritableEvent().Signal();
|
|
||||||
}
|
|
||||||
status[event_id].store(EventState::Signalled, std::memory_order_release);
|
|
||||||
}
|
|
||||||
|
|
||||||
void EventInterface::Create(u32 event_id) {
|
|
||||||
ASSERT(!events[event_id]);
|
|
||||||
ASSERT(!registered[event_id]);
|
|
||||||
ASSERT(!IsBeingUsed(event_id));
|
|
||||||
events[event_id] =
|
|
||||||
module.service_context.CreateEvent(fmt::format("NVDRV::NvEvent_{}", event_id));
|
|
||||||
status[event_id] = EventState::Available;
|
|
||||||
registered[event_id] = true;
|
|
||||||
const u64 mask = 1ULL << event_id;
|
|
||||||
fails[event_id] = 0;
|
|
||||||
events_mask |= mask;
|
|
||||||
assigned_syncpt[event_id] = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void EventInterface::Free(u32 event_id) {
|
|
||||||
ASSERT(events[event_id]);
|
|
||||||
ASSERT(registered[event_id]);
|
|
||||||
ASSERT(!IsBeingUsed(event_id));
|
|
||||||
module.service_context.CloseEvent(events[event_id]);
|
|
||||||
events[event_id] = nullptr;
|
|
||||||
status[event_id] = EventState::Available;
|
|
||||||
registered[event_id] = false;
|
|
||||||
const u64 mask = ~(1ULL << event_id);
|
|
||||||
events_mask &= mask;
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 EventInterface::FindFreeEvent(u32 syncpoint_id) {
|
|
||||||
u32 slot{MaxNvEvents};
|
|
||||||
u32 free_slot{MaxNvEvents};
|
|
||||||
for (u32 i = 0; i < MaxNvEvents; i++) {
|
|
||||||
if (registered[i]) {
|
|
||||||
if (!IsBeingUsed(i)) {
|
|
||||||
slot = i;
|
|
||||||
if (assigned_syncpt[i] == syncpoint_id) {
|
|
||||||
return slot;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (free_slot == MaxNvEvents) {
|
|
||||||
free_slot = i;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (free_slot < MaxNvEvents) {
|
|
||||||
Create(free_slot);
|
|
||||||
return free_slot;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (slot < MaxNvEvents) {
|
|
||||||
return slot;
|
|
||||||
}
|
|
||||||
|
|
||||||
LOG_CRITICAL(Service_NVDRV, "Failed to allocate an event");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
Kernel::KEvent* EventInterface::CreateNonCtrlEvent(std::string name) {
|
|
||||||
Kernel::KEvent* new_event = module.service_context.CreateEvent(std::move(name));
|
Kernel::KEvent* new_event = module.service_context.CreateEvent(std::move(name));
|
||||||
basic_events.push_back(new_event);
|
|
||||||
return new_event;
|
return new_event;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void EventInterface::FreeEvent(Kernel::KEvent* event) {
|
||||||
|
module.service_context.CloseEvent(event);
|
||||||
|
}
|
||||||
|
|
||||||
void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger,
|
void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger,
|
||||||
Core::System& system) {
|
Core::System& system) {
|
||||||
auto module_ = std::make_shared<Module>(system);
|
auto module_ = std::make_shared<Module>(system);
|
||||||
|
@ -138,18 +75,50 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger
|
||||||
|
|
||||||
Module::Module(Core::System& system)
|
Module::Module(Core::System& system)
|
||||||
: service_context{system, "nvdrv"}, events_interface{*this}, container{system.GPU()} {
|
: service_context{system, "nvdrv"}, events_interface{*this}, container{system.GPU()} {
|
||||||
devices["/dev/nvhost-as-gpu"] = std::make_shared<Devices::nvhost_as_gpu>(system, container);
|
builders["/dev/nvhost-as-gpu"] = [this, &system](DeviceFD fd) {
|
||||||
devices["/dev/nvhost-gpu"] =
|
std::shared_ptr<Devices::nvdevice> device =
|
||||||
std::make_shared<Devices::nvhost_gpu>(system, events_interface, container);
|
std::make_shared<Devices::nvhost_as_gpu>(system, container);
|
||||||
devices["/dev/nvhost-ctrl-gpu"] =
|
return open_files.emplace(fd, device).first;
|
||||||
std::make_shared<Devices::nvhost_ctrl_gpu>(system, events_interface);
|
};
|
||||||
devices["/dev/nvmap"] = std::make_shared<Devices::nvmap>(system, container);
|
builders["/dev/nvhost-gpu"] = [this, &system](DeviceFD fd) {
|
||||||
devices["/dev/nvdisp_disp0"] = std::make_shared<Devices::nvdisp_disp0>(system, container);
|
std::shared_ptr<Devices::nvdevice> device =
|
||||||
devices["/dev/nvhost-ctrl"] =
|
std::make_shared<Devices::nvhost_gpu>(system, events_interface, container);
|
||||||
std::make_shared<Devices::nvhost_ctrl>(system, events_interface, container);
|
return open_files.emplace(fd, device).first;
|
||||||
devices["/dev/nvhost-nvdec"] = std::make_shared<Devices::nvhost_nvdec>(system, container);
|
};
|
||||||
devices["/dev/nvhost-nvjpg"] = std::make_shared<Devices::nvhost_nvjpg>(system);
|
builders["/dev/nvhost-ctrl-gpu"] = [this, &system](DeviceFD fd) {
|
||||||
devices["/dev/nvhost-vic"] = std::make_shared<Devices::nvhost_vic>(system, container);
|
std::shared_ptr<Devices::nvdevice> device =
|
||||||
|
std::make_shared<Devices::nvhost_ctrl_gpu>(system, events_interface);
|
||||||
|
return open_files.emplace(fd, device).first;
|
||||||
|
};
|
||||||
|
builders["/dev/nvmap"] = [this, &system](DeviceFD fd) {
|
||||||
|
std::shared_ptr<Devices::nvdevice> device =
|
||||||
|
std::make_shared<Devices::nvmap>(system, container);
|
||||||
|
return open_files.emplace(fd, device).first;
|
||||||
|
};
|
||||||
|
builders["/dev/nvdisp_disp0"] = [this, &system](DeviceFD fd) {
|
||||||
|
std::shared_ptr<Devices::nvdevice> device =
|
||||||
|
std::make_shared<Devices::nvdisp_disp0>(system, container);
|
||||||
|
return open_files.emplace(fd, device).first;
|
||||||
|
};
|
||||||
|
builders["/dev/nvhost-ctrl"] = [this, &system](DeviceFD fd) {
|
||||||
|
std::shared_ptr<Devices::nvdevice> device =
|
||||||
|
std::make_shared<Devices::nvhost_ctrl>(system, events_interface, container);
|
||||||
|
return open_files.emplace(fd, device).first;
|
||||||
|
};
|
||||||
|
builders["/dev/nvhost-nvdec"] = [this, &system](DeviceFD fd) {
|
||||||
|
std::shared_ptr<Devices::nvdevice> device =
|
||||||
|
std::make_shared<Devices::nvhost_nvdec>(system, container);
|
||||||
|
return open_files.emplace(fd, device).first;
|
||||||
|
};
|
||||||
|
builders["/dev/nvhost-nvjpg"] = [this, &system](DeviceFD fd) {
|
||||||
|
std::shared_ptr<Devices::nvdevice> device = std::make_shared<Devices::nvhost_nvjpg>(system);
|
||||||
|
return open_files.emplace(fd, device).first;
|
||||||
|
};
|
||||||
|
builders["/dev/nvhost-vic"] = [this, &system](DeviceFD fd) {
|
||||||
|
std::shared_ptr<Devices::nvdevice> device =
|
||||||
|
std::make_shared<Devices::nvhost_vic>(system, container);
|
||||||
|
return open_files.emplace(fd, device).first;
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
Module::~Module() = default;
|
Module::~Module() = default;
|
||||||
|
@ -169,18 +138,18 @@ NvResult Module::VerifyFD(DeviceFD fd) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
DeviceFD Module::Open(const std::string& device_name) {
|
DeviceFD Module::Open(const std::string& device_name) {
|
||||||
if (devices.find(device_name) == devices.end()) {
|
auto it = builders.find(device_name);
|
||||||
|
if (it == builders.end()) {
|
||||||
LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name);
|
LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name);
|
||||||
return INVALID_NVDRV_FD;
|
return INVALID_NVDRV_FD;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto device = devices[device_name];
|
|
||||||
const DeviceFD fd = next_fd++;
|
const DeviceFD fd = next_fd++;
|
||||||
|
auto& builder = it->second;
|
||||||
|
auto device = builder(fd)->second;
|
||||||
|
|
||||||
device->OnOpen(fd);
|
device->OnOpen(fd);
|
||||||
|
|
||||||
open_files[fd] = std::move(device);
|
|
||||||
|
|
||||||
return fd;
|
return fd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -256,14 +225,7 @@ NvResult Module::Close(DeviceFD fd) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Module::SignalSyncpt(const u32 syncpoint_id, const u32 value) {
|
void Module::SignalSyncpt(const u32 syncpoint_id, const u32 value) {
|
||||||
const u32 max = MaxNvEvents - std::countl_zero(events_interface.events_mask);
|
events_interface.Signal(syncpoint_id, value);
|
||||||
const u32 min = std::countr_zero(events_interface.events_mask);
|
|
||||||
for (u32 i = min; i < max; i++) {
|
|
||||||
if (events_interface.assigned_syncpt[i] == syncpoint_id &&
|
|
||||||
events_interface.assigned_value[i] == value) {
|
|
||||||
events_interface.Signal(i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult Module::QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event) {
|
NvResult Module::QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event) {
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
@ -38,7 +39,8 @@ class SyncpointManager;
|
||||||
|
|
||||||
namespace Devices {
|
namespace Devices {
|
||||||
class nvdevice;
|
class nvdevice;
|
||||||
}
|
class nvhost_ctrl;
|
||||||
|
} // namespace Devices
|
||||||
|
|
||||||
class Module;
|
class Module;
|
||||||
|
|
||||||
|
@ -47,47 +49,19 @@ public:
|
||||||
EventInterface(Module& module_);
|
EventInterface(Module& module_);
|
||||||
~EventInterface();
|
~EventInterface();
|
||||||
|
|
||||||
// Mask representing registered events
|
void RegisterForSignal(Devices::nvhost_ctrl*);
|
||||||
u64 events_mask{};
|
void UnregisterForSignal(Devices::nvhost_ctrl*);
|
||||||
// Each kernel event associated to an NV event
|
|
||||||
std::array<Kernel::KEvent*, MaxNvEvents> events{};
|
|
||||||
// The status of the current NVEvent
|
|
||||||
std::array<std::atomic<EventState>, MaxNvEvents> status{};
|
|
||||||
// Tells if an NVEvent is registered or not
|
|
||||||
std::array<bool, MaxNvEvents> registered{};
|
|
||||||
// Tells the NVEvent that it has failed.
|
|
||||||
std::array<u32, MaxNvEvents> fails{};
|
|
||||||
// When an NVEvent is waiting on GPU interrupt, this is the sync_point
|
|
||||||
// associated with it.
|
|
||||||
std::array<u32, MaxNvEvents> assigned_syncpt{};
|
|
||||||
// This is the value of the GPU interrupt for which the NVEvent is waiting
|
|
||||||
// for.
|
|
||||||
std::array<u32, MaxNvEvents> assigned_value{};
|
|
||||||
// Constant to denote an unasigned syncpoint.
|
|
||||||
static constexpr u32 unassigned_syncpt = 0xFFFFFFFF;
|
|
||||||
|
|
||||||
bool IsBeingUsed(u32 event_id) {
|
void Signal(u32 syncpoint_id, u32 value);
|
||||||
const auto current_status = status[event_id].load(std::memory_order_acquire);
|
|
||||||
return current_status == EventState::Waiting || current_status == EventState::Cancelling ||
|
|
||||||
current_status == EventState::Signalling;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unique_lock<std::mutex> Lock();
|
Kernel::KEvent* CreateEvent(std::string name);
|
||||||
|
|
||||||
void Signal(u32 event_id);
|
void FreeEvent(Kernel::KEvent* event);
|
||||||
|
|
||||||
void Create(u32 event_id);
|
|
||||||
|
|
||||||
void Free(u32 event_id);
|
|
||||||
|
|
||||||
u32 FindFreeEvent(u32 syncpoint_id);
|
|
||||||
|
|
||||||
Kernel::KEvent* CreateNonCtrlEvent(std::string name);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::mutex events_mutex;
|
|
||||||
Module& module;
|
Module& module;
|
||||||
std::vector<Kernel::KEvent*> basic_events;
|
std::mutex guard;
|
||||||
|
std::list<Devices::nvhost_ctrl*> on_signal;
|
||||||
};
|
};
|
||||||
|
|
||||||
class Module final {
|
class Module final {
|
||||||
|
@ -97,9 +71,9 @@ public:
|
||||||
|
|
||||||
/// Returns a pointer to one of the available devices, identified by its name.
|
/// Returns a pointer to one of the available devices, identified by its name.
|
||||||
template <typename T>
|
template <typename T>
|
||||||
std::shared_ptr<T> GetDevice(const std::string& name) {
|
std::shared_ptr<T> GetDevice(DeviceFD fd) {
|
||||||
auto itr = devices.find(name);
|
auto itr = open_files.find(fd);
|
||||||
if (itr == devices.end())
|
if (itr == open_files.end())
|
||||||
return nullptr;
|
return nullptr;
|
||||||
return std::static_pointer_cast<T>(itr->second);
|
return std::static_pointer_cast<T>(itr->second);
|
||||||
}
|
}
|
||||||
|
@ -132,8 +106,9 @@ private:
|
||||||
/// Id to use for the next open file descriptor.
|
/// Id to use for the next open file descriptor.
|
||||||
DeviceFD next_fd = 1;
|
DeviceFD next_fd = 1;
|
||||||
|
|
||||||
|
using FilesContainerType = std::unordered_map<DeviceFD, std::shared_ptr<Devices::nvdevice>>;
|
||||||
/// Mapping of file descriptors to the devices they reference.
|
/// Mapping of file descriptors to the devices they reference.
|
||||||
std::unordered_map<DeviceFD, std::shared_ptr<Devices::nvdevice>> open_files;
|
FilesContainerType open_files;
|
||||||
|
|
||||||
/// Mapping of device node names to their implementation.
|
/// Mapping of device node names to their implementation.
|
||||||
std::unordered_map<std::string, std::shared_ptr<Devices::nvdevice>> devices;
|
std::unordered_map<std::string, std::shared_ptr<Devices::nvdevice>> devices;
|
||||||
|
@ -147,6 +122,7 @@ private:
|
||||||
|
|
||||||
void CreateEvent(u32 event_id);
|
void CreateEvent(u32 event_id);
|
||||||
void FreeEvent(u32 event_id);
|
void FreeEvent(u32 event_id);
|
||||||
|
std::unordered_map<std::string, std::function<FilesContainerType::iterator(DeviceFD)>> builders;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Registers all NVDRV services with the specified service manager.
|
/// Registers all NVDRV services with the specified service manager.
|
||||||
|
|
|
@ -105,10 +105,15 @@ NVFlinger::~NVFlinger() {
|
||||||
display.GetLayer(layer).Core().NotifyShutdown();
|
display.GetLayer(layer).Core().NotifyShutdown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (nvdrv) {
|
||||||
|
nvdrv->Close(disp_fd);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) {
|
void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) {
|
||||||
nvdrv = std::move(instance);
|
nvdrv = std::move(instance);
|
||||||
|
disp_fd = nvdrv->Open("/dev/nvdisp_disp0");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) {
|
std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) {
|
||||||
|
@ -276,7 +281,7 @@ void NVFlinger::Compose() {
|
||||||
// Now send the buffer to the GPU for drawing.
|
// Now send the buffer to the GPU for drawing.
|
||||||
// TODO(Subv): Support more than just disp0. The display device selection is probably based
|
// TODO(Subv): Support more than just disp0. The display device selection is probably based
|
||||||
// on which display we're drawing (Default, Internal, External, etc)
|
// on which display we're drawing (Default, Internal, External, etc)
|
||||||
auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>("/dev/nvdisp_disp0");
|
auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>(disp_fd);
|
||||||
ASSERT(nvdisp);
|
ASSERT(nvdisp);
|
||||||
|
|
||||||
Common::Rectangle<int> crop_rect{
|
Common::Rectangle<int> crop_rect{
|
||||||
|
|
|
@ -116,6 +116,7 @@ private:
|
||||||
void SplitVSync(std::stop_token stop_token);
|
void SplitVSync(std::stop_token stop_token);
|
||||||
|
|
||||||
std::shared_ptr<Nvidia::Module> nvdrv;
|
std::shared_ptr<Nvidia::Module> nvdrv;
|
||||||
|
s32 disp_fd;
|
||||||
|
|
||||||
std::list<VI::Display> displays;
|
std::list<VI::Display> displays;
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue