2
1
Fork 0
mirror of https://github.com/yuzu-emu/yuzu.git synced 2024-07-04 23:31:19 +01:00

VideoCore/GPU: Delegate subchannel engines to the dma pusher.

This commit is contained in:
Fernando Sahmkow 2020-04-27 22:07:21 -04:00
parent 90e5694230
commit b87422a86f
3 changed files with 49 additions and 4 deletions

View file

@ -27,6 +27,8 @@ void DmaPusher::DispatchCalls() {
dma_pushbuffer_subindex = 0;
dma_state.is_last_call = true;
while (system.IsPoweredOn()) {
if (!Step()) {
break;
@ -82,9 +84,11 @@ bool DmaPusher::Step() {
index);
CallMultiMethod(&command_header.argument, max_write);
dma_state.method_count -= max_write;
dma_state.is_last_call = true;
index += max_write;
continue;
} else {
dma_state.is_last_call = dma_state.method_count <= 1;
CallMethod(command_header.argument);
}
@ -144,12 +148,22 @@ void DmaPusher::SetState(const CommandHeader& command_header) {
}
void DmaPusher::CallMethod(u32 argument) const {
if (dma_state.method < non_puller_methods) {
gpu.CallMethod({dma_state.method, argument, dma_state.subchannel, dma_state.method_count});
} else {
subchannels[dma_state.subchannel]->CallMethod(dma_state.method, argument,
dma_state.is_last_call);
}
}
void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const {
if (dma_state.method < non_puller_methods) {
gpu.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods,
dma_state.method_count);
} else {
subchannels[dma_state.subchannel]->CallMultiMethod(dma_state.method, base_start,
num_methods, dma_state.method_count);
}
}
} // namespace Tegra

View file

@ -4,11 +4,13 @@
#pragma once
#include <array>
#include <vector>
#include <queue>
#include "common/bit_field.h"
#include "common/common_types.h"
#include "video_core/engines/engine_interface.h"
namespace Core {
class System;
@ -69,7 +71,13 @@ public:
void DispatchCalls();
void BindSubchannel(Tegra::Engines::EngineInterface* engine, u32 subchannel_id) {
subchannels[subchannel_id] = engine;
}
private:
static constexpr u32 non_puller_methods = 0x40;
static constexpr u32 max_subchannels = 8;
bool Step();
void SetState(const CommandHeader& command_header);
@ -88,6 +96,7 @@ private:
u32 method_count; ///< Current method count
u32 length_pending; ///< Large NI command length pending
bool non_incrementing; ///< Current command's NI flag
bool is_last_call;
};
DmaState dma_state{};
@ -96,6 +105,8 @@ private:
GPUVAddr dma_mget{}; ///< main pushbuffer last read address
bool ib_enable{true}; ///< IB mode enabled
std::array<Tegra::Engines::EngineInterface*, max_subchannels> subchannels{};
GPU& gpu;
Core::System& system;
};

View file

@ -347,7 +347,27 @@ void GPU::ProcessBindMethod(const MethodCall& method_call) {
// Bind the current subchannel to the desired engine id.
LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel,
method_call.argument);
bound_engines[method_call.subchannel] = static_cast<EngineID>(method_call.argument);
auto engine_id = static_cast<EngineID>(method_call.argument);
bound_engines[method_call.subchannel] = static_cast<EngineID>(engine_id);
switch (engine_id) {
case EngineID::FERMI_TWOD_A:
dma_pusher->BindSubchannel(fermi_2d.get(), method_call.subchannel);
break;
case EngineID::MAXWELL_B:
dma_pusher->BindSubchannel(maxwell_3d.get(), method_call.subchannel);
break;
case EngineID::KEPLER_COMPUTE_B:
dma_pusher->BindSubchannel(kepler_compute.get(), method_call.subchannel);
break;
case EngineID::MAXWELL_DMA_COPY_A:
dma_pusher->BindSubchannel(maxwell_dma.get(), method_call.subchannel);
break;
case EngineID::KEPLER_INLINE_TO_MEMORY_B:
dma_pusher->BindSubchannel(kepler_memory.get(), method_call.subchannel);
break;
default:
UNIMPLEMENTED_MSG("Unimplemented engine");
}
}
void GPU::ProcessSemaphoreTriggerMethod() {