1
0
Fork 0
mirror of https://github.com/Atmosphere-NX/Atmosphere.git synced 2025-01-05 09:06:06 +00:00
Atmosphere/thermosphere/src/irq.c

282 lines
8.5 KiB
C
Raw Normal View History

2019-08-10 23:56:49 +01:00
/*
* Copyright (c) 2019 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "irq.h"
#include "core_ctx.h"
#include "debug_log.h"
2019-08-17 23:40:47 +01:00
#include "vgic.h"
#include "timer.h"
#include "guest_timers.h"
#include "transport_interface.h"
2020-01-14 02:09:51 +00:00
#include "debug_pause.h"
2019-08-10 23:56:49 +01:00
IrqManager g_irqManager = {0};
static void initGic(void)
{
// Reinits the GICD and GICC (for non-secure mode, obviously)
if (currentCoreCtx->isBootCore && !currentCoreCtx->warmboot) {
// Disable interrupt handling & global interrupt distribution
gicd->ctlr = 0;
2019-08-10 23:56:49 +01:00
// Get some info
g_irqManager.numSharedInterrupts = 32 * (gicd->typer & 0x1F); // number of interrupt lines / 32
2019-08-10 23:56:49 +01:00
// unimplemented priority bits (lowest significant) are RAZ/WI
gicd->ipriorityr[0] = 0xFF;
g_irqManager.priorityShift = 8 - __builtin_popcount(gicd->ipriorityr[0]);
g_irqManager.numPriorityLevels = (u8)BIT(__builtin_popcount(gicd->ipriorityr[0]));
2019-08-10 23:56:49 +01:00
g_irqManager.numCpuInterfaces = (u8)(1 + ((gicd->typer >> 5) & 7));
g_irqManager.numListRegisters = (u8)(1 + (gich->vtr & 0x3F));
2019-08-10 23:56:49 +01:00
}
// Only one core will reset the GIC state for the shared peripheral interrupts
u32 numInterrupts = 32;
if (currentCoreCtx->isBootCore) {
numInterrupts += g_irqManager.numSharedInterrupts;
}
// Filter all interrupts
gicc->pmr = 0;
// Disable interrupt preemption
gicc->bpr = 7;
// Note: the GICD I...n regs are banked for private interrupts
// Disable all interrupts, clear active status, clear pending status
2019-08-10 23:56:49 +01:00
for (u32 i = 0; i < numInterrupts / 32; i++) {
gicd->icenabler[i] = 0xFFFFFFFF;
gicd->icactiver[i] = 0xFFFFFFFF;
gicd->icpendr[i] = 0xFFFFFFFF;
}
// Set priorities to lowest
for (u32 i = 0; i < numInterrupts; i++) {
gicd->ipriorityr[i] = 0xFF;
}
// Reset icfgr, itargetsr for shared peripheral interrupts
for (u32 i = 32 / 16; i < numInterrupts / 16; i++) {
gicd->icfgr[i] = 0x55555555;
2019-08-10 23:56:49 +01:00
}
for (u32 i = 32; i < numInterrupts; i++) {
gicd->itargetsr[i] = 0;
}
// Now, reenable interrupts
// Enable the distributor
if (currentCoreCtx->isBootCore) {
gicd->ctlr = 1;
}
// Enable the CPU interface. Set EOIModeNS=1 (split prio drop & deactivate priority)
gicc->ctlr = BIT(9) | 1;
// Disable interrupt filtering
gicc->pmr = 0xFF;
currentCoreCtx->gicInterfaceMask = gicd->itargetsr[0];
2019-08-10 23:56:49 +01:00
}
static inline bool checkRescheduleEmulatedPtimer(ExceptionStackFrame *frame)
{
// Evaluate if the timer has really expired in the PoV of the guest kernel.
// If not, reschedule (add missed time delta) it & exit early
u64 cval = currentCoreCtx->emulPtimerCval;
u64 vct = computeCntvct(frame);
if (cval > vct) {
// It has not: reschedule the timer
// Note: this isn't 100% precise esp. on QEMU so it may take a few tries...
writeEmulatedPhysicalCompareValue(frame, cval);
return false;
}
return true;
}
static inline bool checkGuestTimerInterrupts(ExceptionStackFrame *frame, u16 irqId)
{
// A thing that might have happened is losing the race vs disabling the guest interrupts
// Another thing is that the virtual timer might have fired before us updating voff when executing a top half?
if (irqId == TIMER_IRQID(NS_VIRT_TIMER)) {
u64 cval = GET_SYSREG(cntp_cval_el0);
return cval <= computeCntvct(frame);
} else if (irqId == TIMER_IRQID(NS_PHYS_TIMER)) {
return checkRescheduleEmulatedPtimer(frame);
} else {
return true;
}
}
static void doConfigureInterrupt(u16 id, u8 prio, bool isLevelSensitive)
2019-08-10 23:56:49 +01:00
{
gicd->icenabler[id / 32] = BIT(id % 32);
2019-08-10 23:56:49 +01:00
if (id >= 32) {
u32 cfgr = gicd->icfgr[id / 16];
cfgr &= ~(3 << IRQ_CFGR_SHIFT(id));
cfgr |= (!isLevelSensitive ? 3 : 1) << IRQ_CFGR_SHIFT(id);
gicd->icfgr[id / 16] = cfgr;
gicd->itargetsr[id] = 0xFF; // all cpu interfaces
2019-08-10 23:56:49 +01:00
}
gicd->icpendr[id / 32] = BIT(id % 32);
gicd->ipriorityr[id] = (prio << g_irqManager.priorityShift) & 0xFF;
gicd->isenabler[id / 32] = BIT(id % 32);
2019-08-10 23:56:49 +01:00
}
void initIrq(void)
{
u64 flags = recursiveSpinlockLockMaskIrq(&g_irqManager.lock);
initGic();
2019-08-17 23:40:47 +01:00
vgicInit();
2019-08-10 23:56:49 +01:00
// Configure the interrupts we use here
for (u32 i = 0; i < ThermosphereSgi_Max; i++) {
doConfigureInterrupt(i, IRQ_PRIORITY_HOST, false);
}
doConfigureInterrupt(GIC_IRQID_MAINTENANCE, IRQ_PRIORITY_HOST, true);
2019-08-10 23:56:49 +01:00
recursiveSpinlockUnlockRestoreIrq(&g_irqManager.lock, flags);
}
void configureInterrupt(u16 id, u8 prio, bool isLevelSensitive)
{
u64 flags = recursiveSpinlockLockMaskIrq(&g_irqManager.lock);
doConfigureInterrupt(id, prio, isLevelSensitive);
recursiveSpinlockUnlockRestoreIrq(&g_irqManager.lock, flags);
}
void irqSetAffinity(u16 id, u8 affinity)
{
u64 flags = recursiveSpinlockLockMaskIrq(&g_irqManager.lock);
gicd->itargetsr[id] = affinity;
recursiveSpinlockUnlockRestoreIrq(&g_irqManager.lock, flags);
}
bool irqIsGuest(u16 id)
{
if (id >= 32 + g_irqManager.numSharedInterrupts) {
DEBUG("vgic: %u not supported by physical distributor\n", (u32)id);
return false;
}
bool ret = true;
ret = ret && id != GIC_IRQID_MAINTENANCE;
ret = ret && id != GIC_IRQID_NS_PHYS_HYP_TIMER;
// If the following interrupts don't exist, that's fine, they're defined as GIC_IRQID_SPURIOUS in that case
// (for which the function isn't called, anyway)
ret = ret && id != GIC_IRQID_NS_VIRT_HYP_TIMER;
ret = ret && id != GIC_IRQID_SEC_PHYS_HYP_TIMER;
ret = ret && id != GIC_IRQID_SEC_VIRT_HYP_TIMER;
ret = ret && transportInterfaceFindByIrqId(id) == NULL;
return ret;
}
2019-08-10 23:56:49 +01:00
void handleIrqException(ExceptionStackFrame *frame, bool isLowerEl, bool isA32)
{
(void)isLowerEl;
2019-08-10 23:56:49 +01:00
(void)isA32;
// Acknowledge the interrupt. Interrupt goes from pending to active.
u32 iar = gicc->iar;
u32 irqId = iar & 0x3FF;
u32 srcCore = (iar >> 10) & 7;
2019-08-10 23:56:49 +01:00
frame->esr_el2.ec = Exception_Uncategorized;
2020-01-14 02:09:51 +00:00
DEBUG("EL2 [core %d]: Received irq %x\n", (int)currentCoreCtx->coreId, irqId);
2019-08-10 23:56:49 +01:00
if (irqId == GIC_IRQID_SPURIOUS) {
// Spurious interrupt received
return;
} else if (!checkGuestTimerInterrupts(frame, irqId)) {
// Deactivate the interrupt, return early
gicc->eoir = iar;
gicc->dir = iar;
return;
2019-08-10 23:56:49 +01:00
}
bool isGuestInterrupt = false;
2019-08-17 23:40:47 +01:00
bool isMaintenanceInterrupt = false;
2020-01-14 02:09:51 +00:00
bool hasBottomHalf = false;
2019-08-10 23:56:49 +01:00
switch (irqId) {
case ThermosphereSgi_ExecuteFunction:
executeFunctionInterruptHandler(srcCore);
break;
2019-08-17 23:40:47 +01:00
case ThermosphereSgi_VgicUpdate:
// Nothing in particular to do here
break;
2020-01-14 02:09:51 +00:00
case ThermosphereSgi_DebugPause:
debugPauseSgiHandler();
2020-01-14 02:09:51 +00:00
break;
case GIC_IRQID_MAINTENANCE:
2019-08-17 23:40:47 +01:00
isMaintenanceInterrupt = true;
break;
case TIMER_IRQID(CURRENT_TIMER):
timerInterruptHandler();
break;
default:
2019-08-17 23:40:47 +01:00
isGuestInterrupt = irqId >= 16;
break;
}
2019-08-10 23:56:49 +01:00
TransportInterface *transportIface = irqId >= 32 ? transportInterfaceIrqHandlerTopHalf(irqId) : NULL;
2020-01-14 02:09:51 +00:00
hasBottomHalf = hasBottomHalf || transportIface != NULL;
2019-08-10 23:56:49 +01:00
// Priority drop
gicc->eoir = iar;
isGuestInterrupt = isGuestInterrupt && transportIface == NULL && irqIsGuest(irqId);
2019-08-17 23:40:47 +01:00
recursiveSpinlockLock(&g_irqManager.lock);
2019-08-10 23:56:49 +01:00
if (!isGuestInterrupt) {
2019-08-17 23:40:47 +01:00
if (isMaintenanceInterrupt) {
vgicMaintenanceInterruptHandler();
}
2019-08-10 23:56:49 +01:00
// Deactivate the interrupt
gicc->dir = iar;
} else {
2019-08-17 23:40:47 +01:00
vgicEnqueuePhysicalIrq(irqId);
2019-08-10 23:56:49 +01:00
}
2019-08-17 23:40:47 +01:00
// Update vgic state
vgicUpdateState();
recursiveSpinlockUnlock(&g_irqManager.lock);
// Bottom half part
2020-01-14 02:09:51 +00:00
if (hasBottomHalf) {
exceptionEnterInterruptibleHypervisorCode();
unmaskIrq();
if (transportIface != NULL) {
2020-01-14 02:09:51 +00:00
transportInterfaceIrqHandlerBottomHalf(transportIface);
}
}
2020-01-14 02:09:51 +00:00
2019-08-10 23:56:49 +01:00
}