commit-hurd
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[hurd] 20/26: really add files


From: Samuel Thibault
Subject: [hurd] 20/26: really add files
Date: Tue, 22 Sep 2015 21:51:49 +0000

This is an automated email from the git hooks/post-receive script.

sthibault pushed a commit to branch dde
in repository hurd.

commit abe264a29caec5c85962b86761e2ed3b9780958a
Author: Samuel Thibault <address@hidden>
Date:   Fri Aug 28 19:59:22 2015 +0200

    really add files
---
 libdde_linux26/contrib/kernel/rcuclassic.c | 788 +++++++++++++++++++++++++++++
 libdde_linux26/contrib/lib/devres.c        | 351 +++++++++++++
 2 files changed, 1139 insertions(+)

diff --git a/libdde_linux26/contrib/kernel/rcuclassic.c 
b/libdde_linux26/contrib/kernel/rcuclassic.c
new file mode 100644
index 0000000..654c640
--- /dev/null
+++ b/libdde_linux26/contrib/kernel/rcuclassic.c
@@ -0,0 +1,788 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2001
+ *
+ * Authors: Dipankar Sarma <address@hidden>
+ *         Manfred Spraul <address@hidden>
+ *
+ * Based on the original work by Paul McKenney <address@hidden>
+ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
+ * Papers:
+ * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
+ * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ *             Documentation/RCU
+ *
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/mutex.h>
+#include <linux/time.h>
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key rcu_lock_key;
+struct lockdep_map rcu_lock_map =
+       STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
+EXPORT_SYMBOL_GPL(rcu_lock_map);
+#endif
+
+
+/* Definition for rcupdate control block. */
+static struct rcu_ctrlblk rcu_ctrlblk = {
+       .cur = -300,
+       .completed = -300,
+       .pending = -300,
+       .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
+       .cpumask = CPU_BITS_NONE,
+};
+static struct rcu_ctrlblk rcu_bh_ctrlblk = {
+       .cur = -300,
+       .completed = -300,
+       .pending = -300,
+       .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
+       .cpumask = CPU_BITS_NONE,
+};
+
+DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
+DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
+
+static int blimit = 10;
+static int qhimark = 10000;
+static int qlowmark = 100;
+
+#ifdef CONFIG_SMP
+static void force_quiescent_state(struct rcu_data *rdp,
+                       struct rcu_ctrlblk *rcp)
+{
+       int cpu;
+       unsigned long flags;
+
+       set_need_resched();
+       spin_lock_irqsave(&rcp->lock, flags);
+       if (unlikely(!rcp->signaled)) {
+               rcp->signaled = 1;
+               /*
+                * Don't send IPI to itself. With irqs disabled,
+                * rdp->cpu is the current cpu.
+                *
+                * cpu_online_mask is updated by the _cpu_down()
+                * using __stop_machine(). Since we're in irqs disabled
+                * section, __stop_machine() is not exectuting, hence
+                * the cpu_online_mask is stable.
+                *
+                * However,  a cpu might have been offlined _just_ before
+                * we disabled irqs while entering here.
+                * And rcu subsystem might not yet have handled the CPU_DEAD
+                * notification, leading to the offlined cpu's bit
+                * being set in the rcp->cpumask.
+                *
+                * Hence cpumask = (rcp->cpumask & cpu_online_mask) to prevent
+                * sending smp_reschedule() to an offlined CPU.
+                */
+               for_each_cpu_and(cpu,
+                                 to_cpumask(rcp->cpumask), cpu_online_mask) {
+                       if (cpu != rdp->cpu)
+                               smp_send_reschedule(cpu);
+               }
+       }
+       spin_unlock_irqrestore(&rcp->lock, flags);
+}
+#else
+static inline void force_quiescent_state(struct rcu_data *rdp,
+                       struct rcu_ctrlblk *rcp)
+{
+       set_need_resched();
+}
+#endif
+
+static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp,
+               struct rcu_data *rdp)
+{
+       long batch;
+
+       head->next = NULL;
+       smp_mb(); /* Read of rcu->cur must happen after any change by caller. */
+
+       /*
+        * Determine the batch number of this callback.
+        *
+        * Using ACCESS_ONCE to avoid the following error when gcc eliminates
+        * local variable "batch" and emits codes like this:
+        *      1) rdp->batch = rcp->cur + 1 # gets old value
+        *      ......
+        *      2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value
+        * then [*nxttail[0], *nxttail[1]) may contain callbacks
+        * that batch# = rdp->batch, see the comment of struct rcu_data.
+        */
+       batch = ACCESS_ONCE(rcp->cur) + 1;
+
+       if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) {
+               /* process callbacks */
+               rdp->nxttail[0] = rdp->nxttail[1];
+               rdp->nxttail[1] = rdp->nxttail[2];
+               if (rcu_batch_after(batch - 1, rdp->batch))
+                       rdp->nxttail[0] = rdp->nxttail[2];
+       }
+
+       rdp->batch = batch;
+       *rdp->nxttail[2] = head;
+       rdp->nxttail[2] = &head->next;
+
+       if (unlikely(++rdp->qlen > qhimark)) {
+               rdp->blimit = INT_MAX;
+               force_quiescent_state(rdp, &rcu_ctrlblk);
+       }
+}
+
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+
+static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
+{
+       rcp->gp_start = jiffies;
+       rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
+}
+
+static void print_other_cpu_stall(struct rcu_ctrlblk *rcp)
+{
+       int cpu;
+       long delta;
+       unsigned long flags;
+
+       /* Only let one CPU complain about others per time interval. */
+
+       spin_lock_irqsave(&rcp->lock, flags);
+       delta = jiffies - rcp->jiffies_stall;
+       if (delta < 2 || rcp->cur != rcp->completed) {
+               spin_unlock_irqrestore(&rcp->lock, flags);
+               return;
+       }
+       rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
+       spin_unlock_irqrestore(&rcp->lock, flags);
+
+       /* OK, time to rat on our buddy... */
+
+       printk(KERN_ERR "INFO: RCU detected CPU stalls:");
+       for_each_possible_cpu(cpu) {
+               if (cpumask_test_cpu(cpu, to_cpumask(rcp->cpumask)))
+                       printk(" %d", cpu);
+       }
+       printk(" (detected by %d, t=%ld jiffies)\n",
+              smp_processor_id(), (long)(jiffies - rcp->gp_start));
+}
+
+static void print_cpu_stall(struct rcu_ctrlblk *rcp)
+{
+       unsigned long flags;
+
+       printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu/%lu jiffies)\n",
+                       smp_processor_id(), jiffies,
+                       jiffies - rcp->gp_start);
+       dump_stack();
+       spin_lock_irqsave(&rcp->lock, flags);
+       if ((long)(jiffies - rcp->jiffies_stall) >= 0)
+               rcp->jiffies_stall =
+                       jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
+       spin_unlock_irqrestore(&rcp->lock, flags);
+       set_need_resched();  /* kick ourselves to get things going. */
+}
+
+static void check_cpu_stall(struct rcu_ctrlblk *rcp)
+{
+       long delta;
+
+       delta = jiffies - rcp->jiffies_stall;
+       if (cpumask_test_cpu(smp_processor_id(), to_cpumask(rcp->cpumask)) &&
+               delta >= 0) {
+
+               /* We haven't checked in, so go dump stack. */
+               print_cpu_stall(rcp);
+
+       } else if (rcp->cur != rcp->completed && delta >= 2) {
+
+               /* They had two seconds to dump stack, so complain. */
+               print_other_cpu_stall(rcp);
+       }
+}
+
+#else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+
+static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
+{
+}
+
+static inline void check_cpu_stall(struct rcu_ctrlblk *rcp)
+{
+}
+
+#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+
+/**
+ * call_rcu - Queue an RCU callback for invocation after a grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual update function to be invoked after the grace period
+ *
+ * The update function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed.  RCU read-side critical
+ * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
+ * and may be nested.
+ */
+void call_rcu(struct rcu_head *head,
+                               void (*func)(struct rcu_head *rcu))
+{
+       unsigned long flags;
+
+       head->func = func;
+       local_irq_save(flags);
+       __call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data));
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(call_rcu);
+
+/**
+ * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual update function to be invoked after the grace period
+ *
+ * The update function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. call_rcu_bh() assumes
+ * that the read-side critical sections end on completion of a softirq
+ * handler. This means that read-side critical sections in process
+ * context must not be interrupted by softirqs. This interface is to be
+ * used when most of the read-side critical sections are in softirq context.
+ * RCU read-side critical sections are delimited by rcu_read_lock() and
+ * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh()
+ * and rcu_read_unlock_bh(), if in process context. These may be nested.
+ */
+void call_rcu_bh(struct rcu_head *head,
+                               void (*func)(struct rcu_head *rcu))
+{
+       unsigned long flags;
+
+       head->func = func;
+       local_irq_save(flags);
+       __call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(call_rcu_bh);
+
+/*
+ * Return the number of RCU batches processed thus far.  Useful
+ * for debug and statistics.
+ */
+long rcu_batches_completed(void)
+{
+       return rcu_ctrlblk.completed;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed);
+
+/*
+ * Return the number of RCU batches processed thus far.  Useful
+ * for debug and statistics.
+ */
+long rcu_batches_completed_bh(void)
+{
+       return rcu_bh_ctrlblk.completed;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
+
+/* Raises the softirq for processing rcu_callbacks. */
+static inline void raise_rcu_softirq(void)
+{
+       raise_softirq(RCU_SOFTIRQ);
+}
+
+/*
+ * Invoke the completed RCU callbacks. They are expected to be in
+ * a per-cpu list.
+ */
+static void rcu_do_batch(struct rcu_data *rdp)
+{
+       unsigned long flags;
+       struct rcu_head *next, *list;
+       int count = 0;
+
+       list = rdp->donelist;
+       while (list) {
+               next = list->next;
+               prefetch(next);
+               list->func(list);
+               list = next;
+               if (++count >= rdp->blimit)
+                       break;
+       }
+       rdp->donelist = list;
+
+       local_irq_save(flags);
+       rdp->qlen -= count;
+       local_irq_restore(flags);
+       if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
+               rdp->blimit = blimit;
+
+       if (!rdp->donelist)
+               rdp->donetail = &rdp->donelist;
+       else
+               raise_rcu_softirq();
+}
+
+/*
+ * Grace period handling:
+ * The grace period handling consists out of two steps:
+ * - A new grace period is started.
+ *   This is done by rcu_start_batch. The start is not broadcasted to
+ *   all cpus, they must pick this up by comparing rcp->cur with
+ *   rdp->quiescbatch. All cpus are recorded  in the
+ *   rcu_ctrlblk.cpumask bitmap.
+ * - All cpus must go through a quiescent state.
+ *   Since the start of the grace period is not broadcasted, at least two
+ *   calls to rcu_check_quiescent_state are required:
+ *   The first call just notices that a new grace period is running. The
+ *   following calls check if there was a quiescent state since the beginning
+ *   of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
+ *   the bitmap is empty, then the grace period is completed.
+ *   rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
+ *   period (if necessary).
+ */
+
+/*
+ * Register a new batch of callbacks, and start it up if there is currently no
+ * active batch and the batch to be registered has not already occurred.
+ * Caller must hold rcu_ctrlblk.lock.
+ */
+static void rcu_start_batch(struct rcu_ctrlblk *rcp)
+{
+       if (rcp->cur != rcp->pending &&
+                       rcp->completed == rcp->cur) {
+               rcp->cur++;
+               record_gp_stall_check_time(rcp);
+
+               /*
+                * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
+                * Barrier  Otherwise it can cause tickless idle CPUs to be
+                * included in rcp->cpumask, which will extend graceperiods
+                * unnecessarily.
+                */
+               smp_mb();
+               cpumask_andnot(to_cpumask(rcp->cpumask),
+                              cpu_online_mask, nohz_cpu_mask);
+
+               rcp->signaled = 0;
+       }
+}
+
+/*
+ * cpu went through a quiescent state since the beginning of the grace period.
+ * Clear it from the cpu mask and complete the grace period if it was the last
+ * cpu. Start another grace period if someone has further entries pending
+ */
+static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
+{
+       cpumask_clear_cpu(cpu, to_cpumask(rcp->cpumask));
+       if (cpumask_empty(to_cpumask(rcp->cpumask))) {
+               /* batch completed ! */
+               rcp->completed = rcp->cur;
+               rcu_start_batch(rcp);
+       }
+}
+
+/*
+ * Check if the cpu has gone through a quiescent state (say context
+ * switch). If so and if it already hasn't done so in this RCU
+ * quiescent cycle, then indicate that it has done so.
+ */
+static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
+                                       struct rcu_data *rdp)
+{
+       unsigned long flags;
+
+       if (rdp->quiescbatch != rcp->cur) {
+               /* start new grace period: */
+               rdp->qs_pending = 1;
+               rdp->passed_quiesc = 0;
+               rdp->quiescbatch = rcp->cur;
+               return;
+       }
+
+       /* Grace period already completed for this cpu?
+        * qs_pending is checked instead of the actual bitmap to avoid
+        * cacheline trashing.
+        */
+       if (!rdp->qs_pending)
+               return;
+
+       /*
+        * Was there a quiescent state since the beginning of the grace
+        * period? If no, then exit and wait for the next call.
+        */
+       if (!rdp->passed_quiesc)
+               return;
+       rdp->qs_pending = 0;
+
+       spin_lock_irqsave(&rcp->lock, flags);
+       /*
+        * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
+        * during cpu startup. Ignore the quiescent state.
+        */
+       if (likely(rdp->quiescbatch == rcp->cur))
+               cpu_quiet(rdp->cpu, rcp);
+
+       spin_unlock_irqrestore(&rcp->lock, flags);
+}
+
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
+ * locking requirements, the list it's pulling from has to belong to a cpu
+ * which is dead and hence not processing interrupts.
+ */
+static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
+                               struct rcu_head **tail, long batch)
+{
+       unsigned long flags;
+
+       if (list) {
+               local_irq_save(flags);
+               this_rdp->batch = batch;
+               *this_rdp->nxttail[2] = list;
+               this_rdp->nxttail[2] = tail;
+               local_irq_restore(flags);
+       }
+}
+
+static void __rcu_offline_cpu(struct rcu_data *this_rdp,
+                               struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
+{
+       unsigned long flags;
+
+       /*
+        * if the cpu going offline owns the grace period
+        * we can block indefinitely waiting for it, so flush
+        * it here
+        */
+       spin_lock_irqsave(&rcp->lock, flags);
+       if (rcp->cur != rcp->completed)
+               cpu_quiet(rdp->cpu, rcp);
+       rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1);
+       rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1);
+       spin_unlock(&rcp->lock);
+
+       this_rdp->qlen += rdp->qlen;
+       local_irq_restore(flags);
+}
+
+static void rcu_offline_cpu(int cpu)
+{
+       struct rcu_data *this_rdp = &get_cpu_var(rcu_data);
+       struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data);
+
+       __rcu_offline_cpu(this_rdp, &rcu_ctrlblk,
+                                       &per_cpu(rcu_data, cpu));
+       __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk,
+                                       &per_cpu(rcu_bh_data, cpu));
+       put_cpu_var(rcu_data);
+       put_cpu_var(rcu_bh_data);
+}
+
+#else
+
+static void rcu_offline_cpu(int cpu)
+{
+}
+
+#endif
+
+/*
+ * This does the RCU processing work from softirq context.
+ */
+static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
+                                       struct rcu_data *rdp)
+{
+       unsigned long flags;
+       long completed_snap;
+
+       if (rdp->nxtlist) {
+               local_irq_save(flags);
+               completed_snap = ACCESS_ONCE(rcp->completed);
+
+               /*
+                * move the other grace-period-completed entries to
+                * [rdp->nxtlist, *rdp->nxttail[0]) temporarily
+                */
+               if (!rcu_batch_before(completed_snap, rdp->batch))
+                       rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2];
+               else if (!rcu_batch_before(completed_snap, rdp->batch - 1))
+                       rdp->nxttail[0] = rdp->nxttail[1];
+
+               /*
+                * the grace period for entries in
+                * [rdp->nxtlist, *rdp->nxttail[0]) has completed and
+                * move these entries to donelist
+                */
+               if (rdp->nxttail[0] != &rdp->nxtlist) {
+                       *rdp->donetail = rdp->nxtlist;
+                       rdp->donetail = rdp->nxttail[0];
+                       rdp->nxtlist = *rdp->nxttail[0];
+                       *rdp->donetail = NULL;
+
+                       if (rdp->nxttail[1] == rdp->nxttail[0])
+                               rdp->nxttail[1] = &rdp->nxtlist;
+                       if (rdp->nxttail[2] == rdp->nxttail[0])
+                               rdp->nxttail[2] = &rdp->nxtlist;
+                       rdp->nxttail[0] = &rdp->nxtlist;
+               }
+
+               local_irq_restore(flags);
+
+               if (rcu_batch_after(rdp->batch, rcp->pending)) {
+                       unsigned long flags2;
+
+                       /* and start it/schedule start if it's a new batch */
+                       spin_lock_irqsave(&rcp->lock, flags2);
+                       if (rcu_batch_after(rdp->batch, rcp->pending)) {
+                               rcp->pending = rdp->batch;
+                               rcu_start_batch(rcp);
+                       }
+                       spin_unlock_irqrestore(&rcp->lock, flags2);
+               }
+       }
+
+       rcu_check_quiescent_state(rcp, rdp);
+       if (rdp->donelist)
+               rcu_do_batch(rdp);
+}
+
+static void rcu_process_callbacks(struct softirq_action *unused)
+{
+       /*
+        * Memory references from any prior RCU read-side critical sections
+        * executed by the interrupted code must be see before any RCU
+        * grace-period manupulations below.
+        */
+
+       smp_mb(); /* See above block comment. */
+
+       __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
+       __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
+
+       /*
+        * Memory references from any later RCU read-side critical sections
+        * executed by the interrupted code must be see after any RCU
+        * grace-period manupulations above.
+        */
+
+       smp_mb(); /* See above block comment. */
+}
+
+static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
+{
+       /* Check for CPU stalls, if enabled. */
+       check_cpu_stall(rcp);
+
+       if (rdp->nxtlist) {
+               long completed_snap = ACCESS_ONCE(rcp->completed);
+
+               /*
+                * This cpu has pending rcu entries and the grace period
+                * for them has completed.
+                */
+               if (!rcu_batch_before(completed_snap, rdp->batch))
+                       return 1;
+               if (!rcu_batch_before(completed_snap, rdp->batch - 1) &&
+                               rdp->nxttail[0] != rdp->nxttail[1])
+                       return 1;
+               if (rdp->nxttail[0] != &rdp->nxtlist)
+                       return 1;
+
+               /*
+                * This cpu has pending rcu entries and the new batch
+                * for then hasn't been started nor scheduled start
+                */
+               if (rcu_batch_after(rdp->batch, rcp->pending))
+                       return 1;
+       }
+
+       /* This cpu has finished callbacks to invoke */
+       if (rdp->donelist)
+               return 1;
+
+       /* The rcu core waits for a quiescent state from the cpu */
+       if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
+               return 1;
+
+       /* nothing to do */
+       return 0;
+}
+
+/*
+ * Check to see if there is any immediate RCU-related work to be done
+ * by the current CPU, returning 1 if so.  This function is part of the
+ * RCU implementation; it is -not- an exported member of the RCU API.
+ */
+int rcu_pending(int cpu)
+{
+       return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
+               __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
+}
+
+/*
+ * Check to see if any future RCU-related work will need to be done
+ * by the current CPU, even if none need be done immediately, returning
+ * 1 if so.  This function is part of the RCU implementation; it is -not-
+ * an exported member of the RCU API.
+ */
+int rcu_needs_cpu(int cpu)
+{
+       struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+       struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
+
+       return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu);
+}
+
+/*
+ * Top-level function driving RCU grace-period detection, normally
+ * invoked from the scheduler-clock interrupt.  This function simply
+ * increments counters that are read only from softirq by this same
+ * CPU, so there are no memory barriers required.
+ */
+void rcu_check_callbacks(int cpu, int user)
+{
+       if (user ||
+           (idle_cpu(cpu) && rcu_scheduler_active &&
+            !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
+
+               /*
+                * Get here if this CPU took its interrupt from user
+                * mode or from the idle loop, and if this is not a
+                * nested interrupt.  In this case, the CPU is in
+                * a quiescent state, so count it.
+                *
+                * Also do a memory barrier.  This is needed to handle
+                * the case where writes from a preempt-disable section
+                * of code get reordered into schedule() by this CPU's
+                * write buffer.  The memory barrier makes sure that
+                * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see
+                * by other CPUs to happen after any such write.
+                */
+
+               smp_mb();  /* See above block comment. */
+               rcu_qsctr_inc(cpu);
+               rcu_bh_qsctr_inc(cpu);
+
+       } else if (!in_softirq()) {
+
+               /*
+                * Get here if this CPU did not take its interrupt from
+                * softirq, in other words, if it is not interrupting
+                * a rcu_bh read-side critical section.  This is an _bh
+                * critical section, so count it.  The memory barrier
+                * is needed for the same reason as is the above one.
+                */
+
+               smp_mb();  /* See above block comment. */
+               rcu_bh_qsctr_inc(cpu);
+       }
+       raise_rcu_softirq();
+}
+
+static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
+                                               struct rcu_data *rdp)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&rcp->lock, flags);
+       memset(rdp, 0, sizeof(*rdp));
+       rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist;
+       rdp->donetail = &rdp->donelist;
+       rdp->quiescbatch = rcp->completed;
+       rdp->qs_pending = 0;
+       rdp->cpu = cpu;
+       rdp->blimit = blimit;
+       spin_unlock_irqrestore(&rcp->lock, flags);
+}
+
+static void __cpuinit rcu_online_cpu(int cpu)
+{
+       struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+       struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
+
+       rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
+       rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
+       open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
+}
+
+static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
+                               unsigned long action, void *hcpu)
+{
+       long cpu = (long)hcpu;
+
+       switch (action) {
+       case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
+               rcu_online_cpu(cpu);
+               break;
+       case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
+               rcu_offline_cpu(cpu);
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata rcu_nb = {
+       .notifier_call  = rcu_cpu_notify,
+};
+
+/*
+ * Initializes rcu mechanism.  Assumed to be called early.
+ * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
+ * Note that rcu_qsctr and friends are implicitly
+ * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
+ */
+void __init __rcu_init(void)
+{
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+       printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+       rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
+                       (void *)(long)smp_processor_id());
+       /* Register notifier for non-boot CPUs */
+       register_cpu_notifier(&rcu_nb);
+}
+
+module_param(blimit, int, 0);
+module_param(qhimark, int, 0);
+module_param(qlowmark, int, 0);
diff --git a/libdde_linux26/contrib/lib/devres.c 
b/libdde_linux26/contrib/lib/devres.c
new file mode 100644
index 0000000..72c8909
--- /dev/null
+++ b/libdde_linux26/contrib/lib/devres.c
@@ -0,0 +1,351 @@
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/module.h>
+
+void devm_ioremap_release(struct device *dev, void *res)
+{
+       iounmap(*(void __iomem **)res);
+}
+
+static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
+{
+       return *(void **)res == match_data;
+}
+
+/**
+ * devm_ioremap - Managed ioremap()
+ * @dev: Generic device to remap IO address for
+ * @offset: BUS offset to map
+ * @size: Size of map
+ *
+ * Managed ioremap().  Map is automatically unmapped on driver detach.
+ */
+void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
+                          unsigned long size)
+{
+       void __iomem **ptr, *addr;
+
+       ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return NULL;
+
+       addr = ioremap(offset, size);
+       if (addr) {
+               *ptr = addr;
+               devres_add(dev, ptr);
+       } else
+               devres_free(ptr);
+
+       return addr;
+}
+EXPORT_SYMBOL(devm_ioremap);
+
+/**
+ * devm_ioremap_nocache - Managed ioremap_nocache()
+ * @dev: Generic device to remap IO address for
+ * @offset: BUS offset to map
+ * @size: Size of map
+ *
+ * Managed ioremap_nocache().  Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
+                                  unsigned long size)
+{
+       void __iomem **ptr, *addr;
+
+       ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return NULL;
+
+       addr = ioremap_nocache(offset, size);
+       if (addr) {
+               *ptr = addr;
+               devres_add(dev, ptr);
+       } else
+               devres_free(ptr);
+
+       return addr;
+}
+EXPORT_SYMBOL(devm_ioremap_nocache);
+
+/**
+ * devm_iounmap - Managed iounmap()
+ * @dev: Generic device to unmap for
+ * @addr: Address to unmap
+ *
+ * Managed iounmap().  @addr must have been mapped using devm_ioremap*().
+ */
+void devm_iounmap(struct device *dev, void __iomem *addr)
+{
+       iounmap(addr);
+       WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
+                              (void *)addr));
+}
+EXPORT_SYMBOL(devm_iounmap);
+
+#ifdef CONFIG_HAS_IOPORT
+/*
+ * Generic iomap devres
+ */
+static void devm_ioport_map_release(struct device *dev, void *res)
+{
+       ioport_unmap(*(void __iomem **)res);
+}
+
+static int devm_ioport_map_match(struct device *dev, void *res,
+                                void *match_data)
+{
+       return *(void **)res == match_data;
+}
+
+/**
+ * devm_ioport_map - Managed ioport_map()
+ * @dev: Generic device to map ioport for
+ * @port: Port to map
+ * @nr: Number of ports to map
+ *
+ * Managed ioport_map().  Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
+                              unsigned int nr)
+{
+       void __iomem **ptr, *addr;
+
+       ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return NULL;
+
+       addr = ioport_map(port, nr);
+       if (addr) {
+               *ptr = addr;
+               devres_add(dev, ptr);
+       } else
+               devres_free(ptr);
+
+       return addr;
+}
+EXPORT_SYMBOL(devm_ioport_map);
+
+/**
+ * devm_ioport_unmap - Managed ioport_unmap()
+ * @dev: Generic device to unmap for
+ * @addr: Address to unmap
+ *
+ * Managed ioport_unmap().  @addr must have been mapped using
+ * devm_ioport_map().
+ */
+void devm_ioport_unmap(struct device *dev, void __iomem *addr)
+{
+       ioport_unmap(addr);
+       WARN_ON(devres_destroy(dev, devm_ioport_map_release,
+                              devm_ioport_map_match, (void *)addr));
+}
+EXPORT_SYMBOL(devm_ioport_unmap);
+
+#ifdef CONFIG_PCI
+/*
+ * PCI iomap devres
+ */
+#define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
+
+struct pcim_iomap_devres {
+       void __iomem *table[PCIM_IOMAP_MAX];
+};
+
+static void pcim_iomap_release(struct device *gendev, void *res)
+{
+       struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
+       struct pcim_iomap_devres *this = res;
+       int i;
+
+       for (i = 0; i < PCIM_IOMAP_MAX; i++)
+               if (this->table[i])
+                       pci_iounmap(dev, this->table[i]);
+}
+
+/**
+ * pcim_iomap_table - access iomap allocation table
+ * @pdev: PCI device to access iomap table for
+ *
+ * Access iomap allocation table for @dev.  If iomap table doesn't
+ * exist and @pdev is managed, it will be allocated.  All iomaps
+ * recorded in the iomap table are automatically unmapped on driver
+ * detach.
+ *
+ * This function might sleep when the table is first allocated but can
+ * be safely called without context and guaranteed to succed once
+ * allocated.
+ */
+void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
+{
+       struct pcim_iomap_devres *dr, *new_dr;
+
+       dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
+       if (dr)
+               return dr->table;
+
+       new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
+       if (!new_dr)
+               return NULL;
+       dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
+       return dr->table;
+}
+EXPORT_SYMBOL(pcim_iomap_table);
+
+/**
+ * pcim_iomap - Managed pcim_iomap()
+ * @pdev: PCI device to iomap for
+ * @bar: BAR to iomap
+ * @maxlen: Maximum length of iomap
+ *
+ * Managed pci_iomap().  Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
+{
+       void __iomem **tbl;
+
+       BUG_ON(bar >= PCIM_IOMAP_MAX);
+
+       tbl = (void __iomem **)pcim_iomap_table(pdev);
+       if (!tbl || tbl[bar])   /* duplicate mappings not allowed */
+               return NULL;
+
+       tbl[bar] = pci_iomap(pdev, bar, maxlen);
+       return tbl[bar];
+}
+EXPORT_SYMBOL(pcim_iomap);
+
+/**
+ * pcim_iounmap - Managed pci_iounmap()
+ * @pdev: PCI device to iounmap for
+ * @addr: Address to unmap
+ *
+ * Managed pci_iounmap().  @addr must have been mapped using pcim_iomap().
+ */
+void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
+{
+       void __iomem **tbl;
+       int i;
+
+       pci_iounmap(pdev, addr);
+
+       tbl = (void __iomem **)pcim_iomap_table(pdev);
+       BUG_ON(!tbl);
+
+       for (i = 0; i < PCIM_IOMAP_MAX; i++)
+               if (tbl[i] == addr) {
+                       tbl[i] = NULL;
+                       return;
+               }
+       WARN_ON(1);
+}
+EXPORT_SYMBOL(pcim_iounmap);
+
+/**
+ * pcim_iomap_regions - Request and iomap PCI BARs
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to request and iomap
+ * @name: Name used when requesting regions
+ *
+ * Request and iomap regions specified by @mask.
+ */
+int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name)
+{
+       void __iomem * const *iomap;
+       int i, rc;
+
+       iomap = pcim_iomap_table(pdev);
+       if (!iomap)
+               return -ENOMEM;
+
+       for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+               unsigned long len;
+
+               if (!(mask & (1 << i)))
+                       continue;
+
+               rc = -EINVAL;
+               len = pci_resource_len(pdev, i);
+               if (!len)
+                       goto err_inval;
+
+               rc = pci_request_region(pdev, i, name);
+               if (rc)
+                       goto err_inval;
+
+               rc = -ENOMEM;
+               if (!pcim_iomap(pdev, i, 0))
+                       goto err_region;
+       }
+
+       return 0;
+
+ err_region:
+       pci_release_region(pdev, i);
+ err_inval:
+       while (--i >= 0) {
+               if (!(mask & (1 << i)))
+                       continue;
+               pcim_iounmap(pdev, iomap[i]);
+               pci_release_region(pdev, i);
+       }
+
+       return rc;
+}
+EXPORT_SYMBOL(pcim_iomap_regions);
+
+/**
+ * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to iomap
+ * @name: Name used when requesting regions
+ *
+ * Request all PCI BARs and iomap regions specified by @mask.
+ */
+int pcim_iomap_regions_request_all(struct pci_dev *pdev, u16 mask,
+                                  const char *name)
+{
+       int request_mask = ((1 << 6) - 1) & ~mask;
+       int rc;
+
+       rc = pci_request_selected_regions(pdev, request_mask, name);
+       if (rc)
+               return rc;
+
+       rc = pcim_iomap_regions(pdev, mask, name);
+       if (rc)
+               pci_release_selected_regions(pdev, request_mask);
+       return rc;
+}
+EXPORT_SYMBOL(pcim_iomap_regions_request_all);
+
+/**
+ * pcim_iounmap_regions - Unmap and release PCI BARs
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to unmap and release
+ *
+ * Unamp and release regions specified by @mask.
+ */
+void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask)
+{
+       void __iomem * const *iomap;
+       int i;
+
+       iomap = pcim_iomap_table(pdev);
+       if (!iomap)
+               return;
+
+       for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+               if (!(mask & (1 << i)))
+                       continue;
+
+               pcim_iounmap(pdev, iomap[i]);
+               pci_release_region(pdev, i);
+       }
+}
+EXPORT_SYMBOL(pcim_iounmap_regions);
+#endif
+#endif

-- 
Alioth's /usr/local/bin/git-commit-notice on 
/srv/git.debian.org/git/pkg-hurd/hurd.git



reply via email to

[Prev in Thread] Current Thread [Next in Thread]