分享

semaphore

 zhouxitaa 2012-04-12

这类东东直接上内核代码:


 

#ifndef __LINUX_SEMAPHORE_H

#define __LINUX_SEMAPHORE_H

 

#include <linux/list.h>

#include <linux/spinlock.h>

 

/* Please don't access any members of this structure directly */

struct semaphore {

       spinlock_t              lock;

       unsigned int           count;

       struct list_head       wait_list;

};

 

#define __SEMAPHORE_INITIALIZER(name, n)                          \

{                                                             \

       .lock              = __SPIN_LOCK_UNLOCKED((name).lock),           \

       .count            = n,                                    \

       .wait_list = LIST_HEAD_INIT((name).wait_list),             \

}

 

#define DECLARE_MUTEX(name)     \      //37版改为:DEFINE_SEMAPHORE(name)

                                                                      建立并初始化信号量 1.

       struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)

 

static inline void sema_init(struct semaphore *sem, int val)

{

       static struct lock_class_key __key;

       *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);

       lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0);

}

 

#define init_MUTEX(sem)           sema_init(sem, 1)     //已有 sem 类型的变量情况下,初始

sem 1.

#define init_MUTEX_LOCKED(sem)  sema_init(sem, 0) //初始化 sem 0 ,初始化时已锁住

                                                                                    资源.

 

extern void down(struct semaphore *sem);

extern int __must_check down_interruptible(struct semaphore *sem);

extern int __must_check down_killable(struct semaphore *sem);

extern int __must_check down_trylock(struct semaphore *sem); 

extern int __must_check down_timeout(struct semaphore *sem, long jiffies);

extern void up(struct semaphore *sem);

 

#endif /* __LINUX_SEMAPHORE_H */

 

void down(struct semaphore *sem)

{

       unsigned long flags;

 

       spin_lock_irqsave(&sem->lock, flags);  //上自旋锁(关中断)

       if (likely(sem->count > 0))  // likely 表示大多数情况会进入这条件,(编译优化的东东)

              sem->count--;

       else

              __down(sem);         // down() --à__down() ---à__down_common()

       spin_unlock_irqrestore(&sem->lock, flags); //解自旋锁

}

EXPORT_SYMBOL(down);

 

static noinline void __sched __down(struct semaphore *sem)

{

       __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);

}

 

 

struct semaphore_waiter {

       struct list_head list;

       struct task_struct *task;

       int up;

};

 

 

 

static inline int __sched __down_common(struct semaphore *sem, long state,

                                                        long timeout)

{

       struct task_struct *task = current;    //取得当前进程的task 结构.

       struct semaphore_waiter waiter;

 

       list_add_tail(&waiter.list, &sem->wait_list); //把当前进程加入到 wait_list(sema_init()

                                                                             时初始化)).

       waiter.task = task;

       waiter.up = 0;

 

       for (;;) {

              if (signal_pending_state(state, task))    //当前进程是否有信号打断!

                     goto interrupted;

              if (timeout <= 0)

                     goto timed_out;

              __set_task_state(task, state);   //设定当前进程的状态此时为 task_uninterrupt,

                                                               如用的是 down_interrupt() 则为 :task_interrupt

              spin_unlock_irq(&sem->lock);

              timeout = schedule_timeout(timeout); //timeout =

                                                 MAX_SCHEDULE_TIMEOUT 就会调用schedule();

              spin_lock_irq(&sem->lock);

              if (waiter.up)

                     return 0;

       }

 

 timed_out:

       list_del(&waiter.list);

       return -ETIME;

 

 interrupted:

       list_del(&waiter.list);

       return -EINTR;

}

 

// 综上所述: down() 只是做了在无法取得信号量时 ,把当前进程加入到了semaphore 的等待队列中,   并设置了进程为 可中断休眠, 或不可中断休眠 的状态. 执行到schedule_timeout() 调度开进入休眠.

 

 

 

 

 

static noinline void __sched __up(struct semaphore *sem)

{

    struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,

                                   struct semaphore_waiter, list);

    list_del(&waiter->list);

    waiter->up = 1;

    wake_up_process(waiter->task);

}

 

 

static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)

{

    int cpu, orig_cpu, this_cpu, success = 0;

    unsigned long flags;

    long old_state;

    struct rq *rq;

 

    if (!sched_feat(SYNC_WAKEUPS))

       sync = 0;

 

#ifdef CONFIG_SMP

    if (sched_feat(LB_WAKEUP_UPDATE)) {

       struct sched_domain *sd;

 

       this_cpu = raw_smp_processor_id();

       cpu = task_cpu(p);

 

       for_each_domain(this_cpu, sd) {

              if (cpu_isset(cpu, sd->span)) {

                     update_shares(sd);

                     break;

              }

       }

    }

#endif

 

    smp_wmb();

    rq = task_rq_lock(p, &flags);

    old_state = p->state;

    if (!(old_state & state))

       goto out;

 

    if (p->se.on_rq)

       goto out_running;

 

    cpu = task_cpu(p);

    orig_cpu = cpu;

    this_cpu = smp_processor_id();

 

#ifdef CONFIG_SMP

    if (unlikely(task_running(rq, p)))

       goto out_activate;

 

    cpu = p->sched_class->select_task_rq(p, sync);

    if (cpu != orig_cpu) {

       set_task_cpu(p, cpu);

       task_rq_unlock(rq, &flags);

       /* might preempt at this point */

       rq = task_rq_lock(p, &flags);

       old_state = p->state;

       if (!(old_state & state))

              goto out;

       if (p->se.on_rq)

              goto out_running;

 

       this_cpu = smp_processor_id();

       cpu = task_cpu(p);

    }

 

#ifdef CONFIG_SCHEDSTATS

    schedstat_inc(rq, ttwu_count);

    if (cpu == this_cpu)

       schedstat_inc(rq, ttwu_local);

    else {

       struct sched_domain *sd;

       for_each_domain(this_cpu, sd) {

              if (cpu_isset(cpu, sd->span)) {

                     schedstat_inc(sd, ttwu_wake_remote);

                     break;

              }

       }

    }

#endif /* CONFIG_SCHEDSTATS */

 

out_activate:

#endif /* CONFIG_SMP */

    schedstat_inc(p, se.nr_wakeups);

    if (sync)

       schedstat_inc(p, se.nr_wakeups_sync);

    if (orig_cpu != cpu)

       schedstat_inc(p, se.nr_wakeups_migrate);

    if (cpu == this_cpu)

       schedstat_inc(p, se.nr_wakeups_local);

    else

       schedstat_inc(p, se.nr_wakeups_remote);

    update_rq_clock(rq);

    activate_task(rq, p, 1);

    success = 1;

 

out_running:

    trace_sched_wakeup(rq, p);

    check_preempt_curr(rq, p, sync);

 

    p->state = TASK_RUNNING;

#ifdef CONFIG_SMP

    if (p->sched_class->task_wake_up)

       p->sched_class->task_wake_up(rq, p);

#endif

out:

    current->se.last_wakeup = current->se.sum_exec_runtime;

 

    task_rq_unlock(rq, &flags);

 

    return success;

}

 

 

    本站是提供个人知识管理的网络存储空间,所有内容均由用户发布,不代表本站观点。请注意甄别内容中的联系方式、诱导购买等信息,谨防诈骗。如发现有害或侵权内容,请点击一键举报。
    转藏 分享 献花(0

    0条评论

    发表

    请遵守用户 评论公约

    类似文章 更多