Changeset 515f1b1 in mainline


Ignore:
Timestamp:
2024-01-21T16:01:39Z (3 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master
Children:
3fa4e22a
Parents:
11909ce3
Message:

Organize thread_t fields by access constraints

File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/proc/thread.h

    r11909ce3 r515f1b1  
    9595        waitq_t join_wq;
    9696
     97        /** Thread accounting. */
     98        atomic_time_stat_t ucycles;
     99        atomic_time_stat_t kcycles;
     100
    97101        /** Lock protecting thread structure.
    98102         *
     
    101105        IRQ_SPINLOCK_DECLARE(lock);
    102106
    103         char name[THREAD_NAME_BUFLEN];
     107        /** Architecture-specific data. */
     108        thread_arch_t arch;
     109
     110#ifdef CONFIG_UDEBUG
     111        /**
     112         * If true, the scheduler will print a stack trace
     113         * to the kernel console upon scheduling this thread.
     114         */
     115        atomic_int_fast8_t btrace;
     116
     117        /** Debugging stuff */
     118        udebug_thread_t udebug;
     119#endif /* CONFIG_UDEBUG */
     120
     121        /*
     122         * Immutable fields.
     123         *
     124         * These fields are only modified during initialization, and are not
     125         * changed at any time between initialization and destruction.
     126         * Can be accessed without synchronization in most places.
     127         */
     128
     129        /** Thread ID. */
     130        thread_id_t tid;
    104131
    105132        /** Function implementing the thread. */
     
    108135        void *thread_arg;
    109136
     137        char name[THREAD_NAME_BUFLEN];
     138
     139        /** Thread is executed in user space. */
     140        bool uspace;
     141
     142        /** Thread doesn't affect accumulated accounting. */
     143        bool uncounted;
     144
     145        /** Containing task. */
     146        task_t *task;
     147
     148        /** Thread's kernel stack. */
     149        uint8_t *kstack;
     150
     151        /*
     152         * Local fields.
     153         *
     154         * These fields can be safely accessed from code that _controls execution_
     155         * of this thread. Code controls execution of a thread if either:
     156         *  - it runs in the context of said thread AND interrupts are disabled
     157         *    (interrupts can and will access these fields)
     158         *  - the thread is not running, and the code accessing it can legally
     159         *    add/remove the thread to/from a runqueue, i.e., either:
     160         *    - it is allowed to enqueue thread in a new runqueue
     161         *    - it holds the lock to the runqueue containing the thread
     162         *
     163         */
     164
    110165        /**
    111166         * From here, the stored context is restored
     
    114169        context_t saved_context;
    115170
     171        // TODO: we only need one of the two bools below
     172
    116173        /**
    117174         * True if this thread is executing copy_from_uspace().
     
    126183        bool in_copy_to_uspace;
    127184
     185        /*
     186         * FPU context is a special case. If lazy FPU switching is disabled,
     187         * it acts as a regular local field. However, if lazy switching is enabled,
     188         * the context is synchronized via CPU->fpu_lock
     189         */
    128190#ifdef CONFIG_FPU
    129191        fpu_context_t fpu_context;
     
    134196        unsigned int nomigrate;
    135197
     198        /** Thread was migrated to another CPU and has not run yet. */
     199        bool stolen;
     200
    136201        /** Thread state. */
    137202        atomic_int_fast32_t state;
     
    139204        /** Thread CPU. */
    140205        _Atomic(cpu_t *) cpu;
    141         /** Containing task. */
    142         task_t *task;
    143         /** Thread was migrated to another CPU and has not run yet. */
    144         bool stolen;
    145         /** Thread is executed in user space. */
    146         bool uspace;
    147 
    148         /** Thread accounting. */
    149         atomic_time_stat_t ucycles;
    150         atomic_time_stat_t kcycles;
     206
     207        /** Thread's priority. Implemented as index to CPU->rq */
     208        atomic_int_fast32_t priority;
     209
    151210        /** Last sampled cycle. */
    152211        uint64_t last_cycle;
    153         /** Thread doesn't affect accumulated accounting. */
    154         bool uncounted;
    155 
    156         /** Thread's priority. Implemented as index to CPU->rq */
    157         atomic_int_fast32_t priority;
    158         /** Thread ID. */
    159         thread_id_t tid;
    160 
    161         /** Architecture-specific data. */
    162         thread_arch_t arch;
    163 
    164         /** Thread's kernel stack. */
    165         uint8_t *kstack;
    166 
    167 #ifdef CONFIG_UDEBUG
    168         /**
    169          * If true, the scheduler will print a stack trace
    170          * to the kernel console upon scheduling this thread.
    171          */
    172         atomic_int_fast8_t btrace;
    173 
    174         /** Debugging stuff */
    175         udebug_thread_t udebug;
    176 #endif /* CONFIG_UDEBUG */
    177212} thread_t;
    178213
Note: See TracChangeset for help on using the changeset viewer.