Skip to content

Commit

Permalink
irq: inline restore_critical_section
Browse files Browse the repository at this point in the history
Signed-off-by: hujun5 <hujun5@xiaomi.com>
  • Loading branch information
hujun260 committed Aug 29, 2024
1 parent 263f895 commit cbdb555
Show file tree
Hide file tree
Showing 3 changed files with 23 additions and 44 deletions.
14 changes: 13 additions & 1 deletion include/nuttx/irq.h
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,19 @@ void leave_critical_section(irqstate_t flags) noinstrument_function;
****************************************************************************/

#ifdef CONFIG_SMP
void restore_critical_section(void);
# define restore_critical_section() \
do { \
FAR struct tcb_s *tcb; \
int me = this_cpu(); \
tcb = current_task(me); \
if (tcb->irqcount <= 0) \
{\
if ((g_cpu_irqset & (1 << me)) != 0) \
{ \
cpu_irqlock_clear(); \
} \
} \
} while (0)
#else
# define restore_critical_section()
#endif
Expand Down
43 changes: 0 additions & 43 deletions sched/irq/irq_csection.c
Original file line number Diff line number Diff line change
Expand Up @@ -589,47 +589,4 @@ void leave_critical_section(irqstate_t flags)
up_irq_restore(flags);
}
#endif

/****************************************************************************
* Name: restore_critical_section
*
* Description:
* Restore the critical_section
*
* Input Parameters:
* None
*
* Returned Value:
* None
*
****************************************************************************/

#ifdef CONFIG_SMP
void restore_critical_section(void)
{
/* NOTE: The following logic for adjusting global IRQ controls were
* derived from nxsched_add_readytorun() and sched_removedreadytorun()
* Here, we only handles clearing logic to defer unlocking IRQ lock
* followed by context switching.
*/

FAR struct tcb_s *tcb;
int me = this_cpu();

/* Adjust global IRQ controls. If irqcount is greater than zero,
* then this task/this CPU holds the IRQ lock
*/

tcb = current_task(me);
DEBUGASSERT(g_cpu_nestcount[me] <= 0);
if (tcb->irqcount <= 0)
{
if ((g_cpu_irqset & (1 << me)) != 0)
{
cpu_irqlock_clear();
}
}
}
#endif /* CONFIG_SMP */

#endif /* CONFIG_IRQCOUNT */
10 changes: 10 additions & 0 deletions sched/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -281,6 +281,16 @@ extern volatile clock_t g_cpuload_total;

extern volatile cpu_set_t g_cpu_lockset;

/* This is the spinlock that enforces critical sections when interrupts are
* disabled.
*/

extern volatile spinlock_t g_cpu_irqlock;

/* Used to keep track of which CPU(s) hold the IRQ lock. */

extern volatile cpu_set_t g_cpu_irqset;

/* Used to lock tasklist to prevent from concurrent access */

extern volatile spinlock_t g_cpu_tasklistlock;
Expand Down

0 comments on commit cbdb555

Please sign in to comment.