| Yinghai Lu | 48a1b10 | 2008-12-11 00:15:01 -0800 | [diff] [blame] | 1 | /* | 
| Yinghai Lu | b909895 | 2008-12-19 13:48:34 -0800 | [diff] [blame] | 2 | * NUMA irq-desc migration code | 
| Yinghai Lu | 48a1b10 | 2008-12-11 00:15:01 -0800 | [diff] [blame] | 3 | * | 
| Yinghai Lu | b909895 | 2008-12-19 13:48:34 -0800 | [diff] [blame] | 4 | * Migrate IRQ data structures (irq_desc, chip_data, etc.) over to | 
|  | 5 | * the new "home node" of the IRQ. | 
| Yinghai Lu | 48a1b10 | 2008-12-11 00:15:01 -0800 | [diff] [blame] | 6 | */ | 
|  | 7 |  | 
|  | 8 | #include <linux/irq.h> | 
|  | 9 | #include <linux/module.h> | 
|  | 10 | #include <linux/random.h> | 
|  | 11 | #include <linux/interrupt.h> | 
|  | 12 | #include <linux/kernel_stat.h> | 
|  | 13 |  | 
|  | 14 | #include "internals.h" | 
|  | 15 |  | 
|  | 16 | static void init_copy_kstat_irqs(struct irq_desc *old_desc, | 
|  | 17 | struct irq_desc *desc, | 
|  | 18 | int cpu, int nr) | 
|  | 19 | { | 
|  | 20 | unsigned long bytes; | 
|  | 21 |  | 
|  | 22 | init_kstat_irqs(desc, cpu, nr); | 
|  | 23 |  | 
|  | 24 | if (desc->kstat_irqs != old_desc->kstat_irqs) { | 
|  | 25 | /* Compute how many bytes we need per irq and allocate them */ | 
|  | 26 | bytes = nr * sizeof(unsigned int); | 
|  | 27 |  | 
|  | 28 | memcpy(desc->kstat_irqs, old_desc->kstat_irqs, bytes); | 
|  | 29 | } | 
|  | 30 | } | 
|  | 31 |  | 
|  | 32 | static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) | 
|  | 33 | { | 
|  | 34 | if (old_desc->kstat_irqs == desc->kstat_irqs) | 
|  | 35 | return; | 
|  | 36 |  | 
|  | 37 | kfree(old_desc->kstat_irqs); | 
|  | 38 | old_desc->kstat_irqs = NULL; | 
|  | 39 | } | 
|  | 40 |  | 
|  | 41 | static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, | 
|  | 42 | struct irq_desc *desc, int cpu) | 
|  | 43 | { | 
|  | 44 | memcpy(desc, old_desc, sizeof(struct irq_desc)); | 
| Ingo Molnar | 793f7b1 | 2008-12-26 19:02:20 +0100 | [diff] [blame] | 45 | spin_lock_init(&desc->lock); | 
| Yinghai Lu | 48a1b10 | 2008-12-11 00:15:01 -0800 | [diff] [blame] | 46 | desc->cpu = cpu; | 
|  | 47 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 
|  | 48 | init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); | 
|  | 49 | arch_init_copy_chip_data(old_desc, desc, cpu); | 
|  | 50 | } | 
|  | 51 |  | 
|  | 52 | static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) | 
|  | 53 | { | 
|  | 54 | free_kstat_irqs(old_desc, desc); | 
|  | 55 | arch_free_chip_data(old_desc, desc); | 
|  | 56 | } | 
|  | 57 |  | 
|  | 58 | static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | 
|  | 59 | int cpu) | 
|  | 60 | { | 
|  | 61 | struct irq_desc *desc; | 
|  | 62 | unsigned int irq; | 
|  | 63 | unsigned long flags; | 
|  | 64 | int node; | 
|  | 65 |  | 
|  | 66 | irq = old_desc->irq; | 
|  | 67 |  | 
|  | 68 | spin_lock_irqsave(&sparse_irq_lock, flags); | 
|  | 69 |  | 
|  | 70 | /* We have to check it to avoid races with another CPU */ | 
|  | 71 | desc = irq_desc_ptrs[irq]; | 
|  | 72 |  | 
|  | 73 | if (desc && old_desc != desc) | 
|  | 74 | goto out_unlock; | 
|  | 75 |  | 
|  | 76 | node = cpu_to_node(cpu); | 
|  | 77 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | 
| Yinghai Lu | 48a1b10 | 2008-12-11 00:15:01 -0800 | [diff] [blame] | 78 | if (!desc) { | 
| Ingo Molnar | 8b07cd4 | 2008-12-26 19:10:04 +0100 | [diff] [blame] | 79 | printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq); | 
| Yinghai Lu | 48a1b10 | 2008-12-11 00:15:01 -0800 | [diff] [blame] | 80 | /* still use old one */ | 
|  | 81 | desc = old_desc; | 
|  | 82 | goto out_unlock; | 
|  | 83 | } | 
|  | 84 | init_copy_one_irq_desc(irq, old_desc, desc, cpu); | 
|  | 85 |  | 
|  | 86 | irq_desc_ptrs[irq] = desc; | 
|  | 87 |  | 
|  | 88 | /* free the old one */ | 
|  | 89 | free_one_irq_desc(old_desc, desc); | 
|  | 90 | kfree(old_desc); | 
|  | 91 |  | 
|  | 92 | out_unlock: | 
|  | 93 | spin_unlock_irqrestore(&sparse_irq_lock, flags); | 
|  | 94 |  | 
|  | 95 | return desc; | 
|  | 96 | } | 
|  | 97 |  | 
|  | 98 | struct irq_desc *move_irq_desc(struct irq_desc *desc, int cpu) | 
|  | 99 | { | 
|  | 100 | int old_cpu; | 
|  | 101 | int node, old_node; | 
|  | 102 |  | 
|  | 103 | /* those all static, do move them */ | 
|  | 104 | if (desc->irq < NR_IRQS_LEGACY) | 
|  | 105 | return desc; | 
|  | 106 |  | 
|  | 107 | old_cpu = desc->cpu; | 
| Yinghai Lu | 48a1b10 | 2008-12-11 00:15:01 -0800 | [diff] [blame] | 108 | if (old_cpu != cpu) { | 
|  | 109 | node = cpu_to_node(cpu); | 
|  | 110 | old_node = cpu_to_node(old_cpu); | 
|  | 111 | if (old_node != node) | 
|  | 112 | desc = __real_move_irq_desc(desc, cpu); | 
|  | 113 | else | 
|  | 114 | desc->cpu = cpu; | 
|  | 115 | } | 
|  | 116 |  | 
|  | 117 | return desc; | 
|  | 118 | } | 
|  | 119 |  |