Changeset 78de83de in mainline


Ignore:
Timestamp:
2018-09-07T15:41:29Z (6 years ago)
Author:
Jiří Zárevúcky <jiri.zarevucky@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
077842c
Parents:
508b0df1
Message:

Improve kernel spinlock and AS refcount.

Location:
kernel/generic
Files:
1 added
5 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/adt/cht.h

    r508b0df1 r78de83de  
    3636#define KERN_CONC_HASH_TABLE_H_
    3737
     38#include <atomic.h>
    3839#include <stdint.h>
    3940#include <adt/list.h>
  • kernel/generic/include/mm/as.h

    r508b0df1 r78de83de  
    4848#include <lib/elf.h>
    4949#include <arch.h>
     50#include <lib/refcount.h>
    5051
    5152#define AS                   THE->as
     
    111112
    112113        /** Number of references (i.e. tasks that reference this as). */
    113         atomic_t refcount;
     114        atomic_refcount_t refcount;
    114115
    115116        mutex_t lock;
  • kernel/generic/include/synch/spinlock.h

    r508b0df1 r78de83de  
    3636#define KERN_SPINLOCK_H_
    3737
     38#include <assert.h>
     39#include <stdatomic.h>
    3840#include <stdbool.h>
    39 #include <barrier.h>
    40 #include <assert.h>
    4141#include <preemption.h>
    42 #include <atomic.h>
    4342#include <arch/asm.h>
    4443
     
    4645
    4746typedef struct spinlock {
    48         atomic_t val;
     47        atomic_flag flag;
    4948
    5049#ifdef CONFIG_DEBUG_SPINLOCK
     
    7069        spinlock_t lock_name = { \
    7170                .name = desc_name, \
    72                 .val = { 0 } \
     71                .flag = ATOMIC_FLAG_INIT \
    7372        }
    7473
     
    7675        static spinlock_t lock_name = { \
    7776                .name = desc_name, \
    78                 .val = { 0 } \
     77                .flag = ATOMIC_FLAG_INIT \
    7978        }
    8079
     
    8988#define SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \
    9089        spinlock_t lock_name = { \
    91                 .val = { 0 } \
     90                .flag = ATOMIC_FLAG_INIT \
    9291        }
    9392
    9493#define SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \
    9594        static spinlock_t lock_name = { \
    96                 .val = { 0 } \
     95                .flag = ATOMIC_FLAG_INIT \
    9796        }
    9897
     
    126125NO_TRACE static inline void spinlock_unlock_nondebug(spinlock_t *lock)
    127126{
    128         /*
    129          * Prevent critical section code from bleeding out this way down.
    130          */
    131         CS_LEAVE_BARRIER();
    132 
    133         atomic_set(&lock->val, 0);
     127        atomic_flag_clear_explicit(&lock->flag, memory_order_release);
    134128        preemption_enable();
    135129}
     
    215209                .lock = { \
    216210                        .name = desc_name, \
    217                         .val = { 0 } \
     211                        .flag = ATOMIC_FLAG_INIT \
    218212                }, \
    219213                .guard = false, \
     
    225219                .lock = { \
    226220                        .name = desc_name, \
    227                         .val = { 0 } \
     221                        .flag = ATOMIC_FLAG_INIT \
    228222                }, \
    229223                .guard = false, \
     
    236230        irq_spinlock_t lock_name = { \
    237231                .lock = { \
    238                         .val = { 0 } \
     232                        .flag = ATOMIC_FLAG_INIT \
    239233                }, \
    240234                .guard = false, \
     
    245239        static irq_spinlock_t lock_name = { \
    246240                .lock = { \
    247                         .val = { 0 } \
     241                        .flag = ATOMIC_FLAG_INIT \
    248242                }, \
    249243                .guard = false, \
  • kernel/generic/src/mm/as.c

    r508b0df1 r78de83de  
    163163                as->asid = ASID_INVALID;
    164164
    165         atomic_set(&as->refcount, 0);
     165        refcount_init(&as->refcount);
    166166        as->cpu_refcount = 0;
    167167
     
    190190
    191191        assert(as != AS);
    192         assert(atomic_get(&as->refcount) == 0);
     192        assert(refcount_unique(&as->refcount));
    193193
    194194        /*
     
    267267NO_TRACE void as_hold(as_t *as)
    268268{
    269         atomic_inc(&as->refcount);
     269        refcount_up(&as->refcount);
    270270}
    271271
     
    275275 * destroys the address space.
    276276 *
    277  * @param asAddress space to be released.
     277 * @param as Address space to be released.
    278278 *
    279279 */
    280280NO_TRACE void as_release(as_t *as)
    281281{
    282         if (atomic_predec(&as->refcount) == 0)
     282        if (refcount_down(&as->refcount))
    283283                as_destroy(as);
    284284}
  • kernel/generic/src/synch/spinlock.c

    r508b0df1 r78de83de  
    5656void spinlock_initialize(spinlock_t *lock, const char *name)
    5757{
    58         atomic_set(&lock->val, 0);
     58        atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed);
    5959#ifdef CONFIG_DEBUG_SPINLOCK
    6060        lock->name = name;
     
    7979
    8080        preemption_disable();
    81         while (test_and_set(&lock->val)) {
     81        while (atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire)) {
    8282                /*
    8383                 * We need to be careful about particular locks
     
    115115        if (deadlock_reported)
    116116                printf("cpu%u: not deadlocked\n", CPU->id);
    117 
    118         /*
    119          * Prevent critical section code from bleeding out this way up.
    120          */
    121         CS_ENTER_BARRIER();
    122117}
    123118
     
    132127        ASSERT_SPINLOCK(spinlock_locked(lock), lock);
    133128
    134         /*
    135          * Prevent critical section code from bleeding out this way down.
    136          */
    137         CS_LEAVE_BARRIER();
    138 
    139         atomic_set(&lock->val, 0);
     129        atomic_flag_clear_explicit(&lock->flag, memory_order_release);
    140130        preemption_enable();
    141131}
     
    156146{
    157147        preemption_disable();
    158         bool ret = !test_and_set(&lock->val);
    159 
    160         /*
    161          * Prevent critical section code from bleeding out this way up.
    162          */
    163         CS_ENTER_BARRIER();
     148        bool ret = !atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire);
    164149
    165150        if (!ret)
     
    176161bool spinlock_locked(spinlock_t *lock)
    177162{
    178         return atomic_get(&lock->val) != 0;
     163        // XXX: Atomic flag doesn't support simple atomic read (by design),
     164        //      so instead we test_and_set and then clear if necessary.
     165        //      This function is only used inside assert, so we don't need
     166        //      any preemption_disable/enable here.
     167
     168        bool ret = atomic_flag_test_and_set_explicit(&lock->flag, memory_order_relaxed);
     169        if (!ret)
     170                atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed);
     171        return ret;
    179172}
    180173
Note: See TracChangeset for help on using the changeset viewer.