在某些时候,softirq和tasklet很容易让人迷惑,因为两者实现机制极其相像,其间的差异异常细微(其实本质上根本没有差异,但是如果你使用系统缺省的代码行为,还是有些极其细微的差异,但是这种差异完全可以通过代码轻易地予以调整)。两者都和中断发生的延后处理有关,也即所谓的bottom half。搞清楚两者间的差异,对中断的延后处理时使用softirq还是tasklet就会更易做出决策。另一个中断延后处理是work queue,不过它是在进程上下文,这与softirq和 tasklet运行在中断上下文有本质性的区别。 在系统初始化期间: void __init softirq_init(void) { int cpu; for_each_possible_cpu(cpu) { per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; } open_softirq(TASKLET_SOFTIRQ, tasklet_action); open_softirq(HI_SOFTIRQ, tasklet_hi_action); } void open_softirq(int nr, void (*action)(struct softirq_action *)) { softirq_vec[nr].action = action; } 所以当发现TASKLET_SOFTIRQ, HI_SOFTIRQ需要处理时,系统首先调用的是tasklet_action和tasklet_hi_action。 1. tasklet在driver中的使用 1. struct tasklet_struct 2. { 3. struct tasklet_struct *next; 4. unsigned long state; 5. atomic_t count; 6. void (*func)(unsigned long); 7. unsigned long data; 8. }; 有两种方式来初始化这个变量,静态的和动态的,分别是: 1. #define DECLARE_TASKLET(name, func, data) \ 2. struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
1. void tasklet_init(struct tasklet_struct *t, 2. void (*func)(unsigned long), unsigned long data) 3. { 4. t->next = NULL; 5. t->state = 0; 6. atomic_set(&t->count, 0); 7. t->func = func; 8. t->data = data; 9. } 在驱动程序的中断处理函数中加入tasklet的延迟操作: 1. static inline void tasklet_schedule(struct tasklet_struct *t) 2. { 3. if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) 4. __tasklet_schedule(t); 5. } 其中test_and_set_bit是个原子型操作,如果这个tasklet的count值为0(表明这是个处于enable状态的tasklet),那么test_and_set_bit将会返回0,同时t->state被设定为TASKLET_STATE_SCHED, test和set之间不会被中断掉。 1. void __tasklet_schedule(struct tasklet_struct *t) 2. { 3. unsigned long flags; 4. 5. local_irq_save(flags); 6. t->next = NULL; 7. *__get_cpu_var(tasklet_vec).tail = t; 8. __get_cpu_var(tasklet_vec).tail = &(t->next); 9. raise_softirq_irqoff(TASKLET_SOFTIRQ); 10. local_irq_restore(flags); 11. } tasklet_vec属于per_cpu型变量,用以管理特定cpu上的tasklet队列,__tasklet_schedule将t加到tasklet_vec管理队列的末尾,同时将pending的第TASKLET_SOFTIRQ置1. 当延后处理的时机成熟时,tasklet_action被调用: 1. static void tasklet_action(struct softirq_action *a) 2. { 3. struct tasklet_struct *list; 4. 5. local_irq_disable(); 6. list = __get_cpu_var(tasklet_vec).head; 7. __get_cpu_var(tasklet_vec).head = NULL; 8. __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head; 9. local_irq_enable(); 10. 11. while (list) { 12. struct tasklet_struct *t = list; 13. 14. list = list->next; 15. 16. if (tasklet_trylock(t)) { 17. if (!atomic_read(&t->count)) { 18. if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) 19. BUG(); 20. t->func(t->data); 21. tasklet_unlock(t); 22. continue; 23. } 24. tasklet_unlock(t); 25. } 26. 27. local_irq_disable(); 28. t->next = NULL; 29. *__get_cpu_var(tasklet_vec).tail = t; 30. __get_cpu_var(tasklet_vec).tail = &(t->next); 31. __raise_softirq_irqoff(TASKLET_SOFTIRQ); 32. local_irq_enable(); 33. } 34. } 由此可见,tasklet只能在当初提交它的CPU上运行。而且同一tasklet在同一时刻,只可能在一个cpu上运行,从这个角度,相对于其他softirq而言,无需考虑并行处理所带来的线程安全问题。 2. softirq在driver中的使用 1. void open_softirq(int nr, void (*action)(struct softirq_action *)) 2. { 3. softirq_vec[nr].action = action; 4. } softirq_vec是一全局数组: 1. static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; 在驱动程序的中断处理函数中加入softirq的延迟操作raise_softirq: 1. void raise_softirq(unsigned int nr) 2. { 3. unsigned long flags; 4. 5. local_irq_save(flags); 6. raise_softirq_irqoff(nr); 7. local_irq_restore(flags); 8. } 3. 非中断上下文中调用tasklet_schedule和raise_softirq 1. inline void raise_softirq_irqoff(unsigned int nr) 2. { 3. __raise_softirq_irqoff(nr); 4. 5. /* 6. * If we're in an interrupt or softirq, we're done 7. * (this also catches softirq-disabled code). We will 8. * actually run the softirq once we return from 9. * the irq or softirq. 10. * 11. * Otherwise we wake up ksoftirqd to make sure we 12. * schedule the softirq soon. 13. */ 14. if (!in_interrupt()) 15. wakeup_softirqd(); 16. } 所以如果不是在中断上下文中调用的tasklet_schedule或者raise_softirq,那么将会唤醒ksoftirqd来处理延后的操作。ksoftirqd的执行函数是run_ksoftirqd: 1. static int run_ksoftirqd(void * __bind_cpu) 2. { 3. set_current_state(TASK_INTERRUPTIBLE); 4. 5. while (!kthread_should_stop()) { 6. preempt_disable(); 7. if (!local_softirq_pending()) { 8. preempt_enable_no_resched(); 9. schedule(); 10. preempt_disable(); 11. } 12. 13. __set_current_state(TASK_RUNNING); 14. 15. while (local_softirq_pending()) { 16. /* Preempt disable stops cpu going offline. 17. If already offline, we'll be on wrong CPU: 18. don't process */ 19. if (cpu_is_offline((long)__bind_cpu)) 20. goto wait_to_die; 21. do_softirq(); 22. preempt_enable_no_resched(); 23. cond_resched(); 24. preempt_disable(); 25. rcu_note_context_switch((long)__bind_cpu); 26. } 27. preempt_enable(); 28. set_current_state(TASK_INTERRUPTIBLE); 29. } 30. __set_current_state(TASK_RUNNING); 31. return 0; 32. 33. wait_to_die: 34. preempt_enable(); 35. /* Wait for kthread_stop */ 36. set_current_state(TASK_INTERRUPTIBLE); 37. while (!kthread_should_stop()) { 38. schedule(); 39. set_current_state(TASK_INTERRUPTIBLE); 40. } 41. __set_current_state(TASK_RUNNING); 42. return 0; 43. } 如果pending上没有softirq需要处理,进程睡眠。当tasklet_schedule或者是raise_softirq在非中断上下文中唤醒该进程时,run_ksoftirqd会来检查pending上是否有softirq需要处理,如果有就处理之,当处理完所有的softirq时,进程再次睡眠。 经常说中断上下文啥的,那么内核究竟是如何来判断当前运行环境是否是中断上下文呢? 从下面的代码来看,内核复用了thread_struct 的 prempt_count 变量来记录context 是否在硬中断或软中断处理上下文。 1. #define hardirq_count() (preempt_count() & HARDIRQ_MASK) 2. #define softirq_count() (preempt_count() & SOFTIRQ_MASK) 3. #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) 4. 5. /* 6. * Are we doing bottom half or hardware interrupt processing? 7. * Are we in a softirq context? Interrupt context? 8. */ 9. #define in_irq() (hardirq_count()) 10. #define in_softirq() (softirq_count()) 11. #define in_interrupt() (irq_count()) |
|