Skip to content

Commit

Permalink
spinlock: Spinlock optimisations
Browse files Browse the repository at this point in the history
Signed-off-by: Pedro Falcato <[email protected]>
  • Loading branch information
heatd committed Mar 8, 2023
1 parent 888da27 commit 4fb5421
Show file tree
Hide file tree
Showing 11 changed files with 236 additions and 124 deletions.
6 changes: 5 additions & 1 deletion kernel/include/onyx/compiler.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016 - 2022 Pedro Falcato
* Copyright (c) 2016 - 2023 Pedro Falcato
* This file is part of Onyx, and is released under the terms of the MIT License
* check LICENSE at the root directory for more information
*
Expand Down Expand Up @@ -45,6 +45,10 @@
#define _strong_alias_c_name(name, aliasname) \
extern "C" __typeof(name) aliasname __attribute__((alias(#name)));

#define __always_inline static inline __attribute__((always_inline))
#define __noinline __attribute__((noinline))
#define __packed __attribute__((packed))

#define NO_ASAN __attribute__((no_sanitize_address))

#define USED_FUNC __attribute__((used))
Expand Down
84 changes: 80 additions & 4 deletions kernel/include/onyx/percpu.h
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
/*
* Copyright (c) 2019 Pedro Falcato
* This file is part of Carbon, and is released under the terms of the MIT License
* Copyright (c) 2019 - 2023 Pedro Falcato
* This file is part of Onyx, and is released under the terms of the MIT License
* check LICENSE at the root directory for more information
*
* SPDX-License-Identifier: MIT
*/
#ifndef _CARBON_PERCPU_H
#define _CARBON_PERCPU_H
#ifndef _ONYX_PERCPU_H
#define _ONYX_PERCPU_H
#include <stdbool.h>

#include <onyx/compiler.h>
Expand Down Expand Up @@ -67,6 +69,80 @@
} \
})

// INC/DEC operations can use the intrinsic ops defined
// by the arch percpu, or they fallback to add_per_cpu
#ifdef inc_per_cpu_1

#define inc_per_cpu(var) \
({ \
switch (sizeof(var)) \
{ \
case 1: \
inc_per_cpu_1(var); \
break; \
case 2: \
inc_per_cpu_2(var); \
break; \
case 4: \
inc_per_cpu_4(var); \
break; \
case 8: \
inc_per_cpu_8(var); \
break; \
} \
})
#else
#define inc_per_cpu(var) add_per_cpu(var, 1)
#endif

#ifdef inc_per_cpu_1

#define inc_per_cpu(var) \
({ \
switch (sizeof(var)) \
{ \
case 1: \
inc_per_cpu_1(var); \
break; \
case 2: \
inc_per_cpu_2(var); \
break; \
case 4: \
inc_per_cpu_4(var); \
break; \
case 8: \
inc_per_cpu_8(var); \
break; \
} \
})
#else
#define inc_per_cpu(var) add_per_cpu(var, 1)
#endif

#ifdef dec_per_cpu_1

#define dec_per_cpu(var) \
({ \
switch (sizeof(var)) \
{ \
case 1: \
dec_per_cpu_1(var); \
break; \
case 2: \
dec_per_cpu_2(var); \
break; \
case 4: \
dec_per_cpu_4(var); \
break; \
case 8: \
dec_per_cpu_8(var); \
break; \
} \
})
#else
#define dec_per_cpu(var) add_per_cpu(var, -1)
#endif

#else

extern "C" unsigned long __raw_asm_get_per_cpu(size_t off, size_t size);
Expand Down
60 changes: 60 additions & 0 deletions kernel/include/onyx/preempt.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
/*
* Copyright (c) 2016 - 2023 Pedro Falcato
* This file is part of Onyx, and is released under the terms of the MIT License
* check LICENSE at the root directory for more information
*
* SPDX-License-Identifier: MIT
*/
#ifndef _ONYX_PREEMPT_H
#define _ONYX_PREEMPT_H

#include <onyx/compiler.h>
#include <onyx/percpu.h>

#include <platform/irq.h>

void sched_enable_pulse();

void sched_handle_preempt(bool may_softirq);

extern unsigned long preemption_counter;

__always_inline bool sched_is_preemption_disabled()
{
return get_per_cpu(preemption_counter) > 0;
}

__always_inline unsigned long sched_get_preempt_counter()
{
return get_per_cpu(preemption_counter);
}

__always_inline void __sched_enable_preempt()
{
dec_per_cpu(preemption_counter);
}

__always_inline void sched_enable_preempt_no_softirq()
{
__sched_enable_preempt();
}

__always_inline void sched_enable_preempt()
{
__sched_enable_preempt();
unsigned long counter = get_per_cpu(preemption_counter);

// If preemption is enabled, try to do various tasks
// softirq, rescheduling, etc
if (counter == 0 && !irq_is_disabled()) [[unlikely]]
{
sched_handle_preempt(true);
}
}

__always_inline void sched_disable_preempt()
{
inc_per_cpu(preemption_counter);
}

#endif
17 changes: 2 additions & 15 deletions kernel/include/onyx/scheduler.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016 - 2022 Pedro Falcato
* Copyright (c) 2016 - 2023 Pedro Falcato
* This file is part of Onyx, and is released under the terms of the MIT License
* check LICENSE at the root directory for more information
*
Expand All @@ -16,6 +16,7 @@
#include <onyx/cputime.h>
#include <onyx/list.h>
#include <onyx/percpu.h>
#include <onyx/preempt.h>
#include <onyx/signal.h>
#include <onyx/spinlock.h>

Expand Down Expand Up @@ -142,8 +143,6 @@ void thread_set_state(thread_t *thread, int state);

void thread_wake_up(thread_t *thread);

bool sched_is_preemption_disabled(void);

void sched_sleep_until_wake(void);

void thread_wake_up_ftx(thread_t *thread);
Expand All @@ -154,16 +153,6 @@ void sched_start_thread(thread_t *thread);

void sched_wake_up_available_threads(void);

void sched_enable_preempt(void);

void sched_disable_preempt(void);

void sched_enable_preempt_no_softirq(void);

void sched_enable_preempt_for_cpu(unsigned int cpu);

void sched_disable_preempt_for_cpu(unsigned int cpu);

void sched_block(struct thread *thread);

void __sched_block(struct thread *thread, unsigned long cpuflags);
Expand Down Expand Up @@ -233,8 +222,6 @@ static inline void sched_sleep_ms(unsigned long ms)
sched_sleep(ms * NS_PER_MS);
}

void sched_enable_pulse(void);

#ifdef __cplusplus

namespace native
Expand Down
29 changes: 21 additions & 8 deletions kernel/include/onyx/spinlock.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016 - 2021 Pedro Falcato
* Copyright (c) 2016 - 2023 Pedro Falcato
* This file is part of Onyx, and is released under the terms of the MIT License
* check LICENSE at the root directory for more information
*
Expand All @@ -17,6 +17,7 @@
#include <stdbool.h>

#include <onyx/compiler.h>
#include <onyx/preempt.h>
#include <onyx/smp.h>
#include <onyx/utils.h>

Expand All @@ -36,13 +37,12 @@ struct spinlock
};

#ifdef __cplusplus
extern "C" {
extern "C"
{
#endif

void spin_lock(struct spinlock *lock);
void spin_unlock(struct spinlock *lock);
void spin_lock_preempt(struct spinlock *lock);
void spin_unlock_preempt(struct spinlock *lock);
void __spin_lock(struct spinlock *lock);
void __spin_unlock(struct spinlock *lock);
int spin_try_lock(struct spinlock *lock);

#ifndef __ONLY_INCLUDE_BASIC_C_DEFS
Expand All @@ -67,13 +67,13 @@ CONSTEXPR static inline void spinlock_init(struct spinlock *s)
static inline FUNC_NO_DISCARD unsigned long spin_lock_irqsave(struct spinlock *lock)
{
unsigned long flags = irq_save_and_disable();
spin_lock_preempt(lock);
__spin_lock(lock);
return flags;
}

static inline void spin_unlock_irqrestore(struct spinlock *lock, unsigned long old_flags)
{
spin_unlock_preempt(lock);
__spin_unlock(lock);
irq_restore(old_flags);
}

Expand All @@ -82,6 +82,19 @@ static inline bool spin_lock_held(struct spinlock *lock)
return lock->lock == get_cpu_nr() + 1;
}

static inline void spin_lock(struct spinlock *lock)
{
sched_disable_preempt();

__spin_lock(lock);
}

static inline void spin_unlock(struct spinlock *lock)
{
__spin_unlock(lock);
sched_enable_preempt();
}

#define MUST_HOLD_LOCK(lock) assert(spin_lock_held(lock) != false)

#ifdef __cplusplus
Expand Down
19 changes: 5 additions & 14 deletions kernel/include/onyx/x86/include/platform/irq.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ struct irq_context
};

#ifdef __cplusplus
extern "C" {
extern "C"
{
#endif

static inline unsigned long x86_save_flags(void)
Expand All @@ -39,12 +40,12 @@ static inline void irq_disable(void)
__asm__ __volatile__("cli");
}

static inline void irq_enable(void)
static inline void irq_enable()
{
__asm__ __volatile__("sti");
}

static inline unsigned long irq_save_and_disable(void)
static inline unsigned long irq_save_and_disable()
{
unsigned long old = x86_save_flags();
irq_disable();
Expand All @@ -54,26 +55,16 @@ static inline unsigned long irq_save_and_disable(void)

#define CPU_FLAGS_NO_IRQ (0)

static inline bool irq_is_disabled(void)
static inline bool irq_is_disabled()
{
return !(x86_save_flags() & EFLAGS_INT_ENABLED);
}

void softirq_try_handle(void);
bool sched_is_preemption_disabled(void);
void sched_try_to_resched_if_needed(void);

static inline void irq_restore(unsigned long flags)
{
if (flags & EFLAGS_INT_ENABLED)
{
irq_enable();

if (!sched_is_preemption_disabled())
{
softirq_try_handle();
sched_try_to_resched_if_needed();
}
}
}

Expand Down
24 changes: 21 additions & 3 deletions kernel/include/onyx/x86/percpu.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2021 Pedro Falcato
* Copyright (c) 2021 - 2023 Pedro Falcato
* This file is part of Onyx, and is released under the terms of the MIT License
* check LICENSE at the root directory for more information
*
Expand All @@ -16,17 +16,19 @@
// Clang doesn't implement %p yet
// TODO: Implement and submit a PR?
// clang-format off
#define __PCPU_VAR " %%gs:%c1"
#define ____PCPU_VAR(index) " %%gs:%c" index
#define __PCPU_CONSTRAINT(var) "i"((unsigned long) &var)
// clang-format on
#else
// GCC rejects the trick we use for clang, so use the "proper" solution here
// clang-format off
#define __PCPU_VAR " %%gs:%p1"
#define ____PCPU_VAR(index) " %%gs:%p" index
#define __PCPU_CONSTRAINT(var) "m"(var)
// clang-format on
#endif

#define __PCPU_VAR ____PCPU_VAR("1")

#define get_per_cpu_x86_internal(var, suffix, type) \
({ \
type val; \
Expand Down Expand Up @@ -89,4 +91,20 @@
#define add_per_cpu_4(var, val) add_per_cpu_internal(var, val, "l", uint32_t)
#define add_per_cpu_8(var, val) add_per_cpu_internal(var, val, "q", uint64_t)

#define inc_per_cpu_internal(var, suffix, type) \
({ __asm__ __volatile__("inc" suffix ____PCPU_VAR("0")::__PCPU_CONSTRAINT(var)); })

#define inc_per_cpu_1(var) inc_per_cpu_internal(var, "b", uint8_t)
#define inc_per_cpu_2(var) inc_per_cpu_internal(var, "w", uint16_t)
#define inc_per_cpu_4(var) inc_per_cpu_internal(var, "l", uint32_t)
#define inc_per_cpu_8(var) inc_per_cpu_internal(var, "q", uint64_t)

#define dec_per_cpu_internal(var, suffix, type) \
({ __asm__ __volatile__("dec" suffix ____PCPU_VAR("0")::__PCPU_CONSTRAINT(var)); })

#define dec_per_cpu_1(var) dec_per_cpu_internal(var, "b", uint8_t)
#define dec_per_cpu_2(var) dec_per_cpu_internal(var, "w", uint16_t)
#define dec_per_cpu_4(var) dec_per_cpu_internal(var, "l", uint32_t)
#define dec_per_cpu_8(var) dec_per_cpu_internal(var, "q", uint64_t)

#endif
File renamed without changes.
Loading

0 comments on commit 4fb5421

Please sign in to comment.