| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * IUCV base infrastructure. | 
|  | 3 | * | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 4 | * Copyright IBM Corp. 2001, 2009 | 
|  | 5 | * | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 6 | * Author(s): | 
|  | 7 | *    Original source: | 
|  | 8 | *	Alan Altmark (Alan_Altmark@us.ibm.com)	Sept. 2000 | 
|  | 9 | *	Xenia Tkatschow (xenia@us.ibm.com) | 
|  | 10 | *    2Gb awareness and general cleanup: | 
|  | 11 | *	Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) | 
|  | 12 | *    Rewritten for af_iucv: | 
|  | 13 | *	Martin Schwidefsky <schwidefsky@de.ibm.com> | 
| Ursula Braun | 672e405 | 2009-06-16 10:30:42 +0200 | [diff] [blame] | 14 | *    PM functions: | 
|  | 15 | *	Ursula Braun (ursula.braun@de.ibm.com) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 16 | * | 
|  | 17 | * Documentation used: | 
|  | 18 | *    The original source | 
|  | 19 | *    CP Programming Service, IBM document # SC24-5760 | 
|  | 20 | * | 
|  | 21 | * This program is free software; you can redistribute it and/or modify | 
|  | 22 | * it under the terms of the GNU General Public License as published by | 
|  | 23 | * the Free Software Foundation; either version 2, or (at your option) | 
|  | 24 | * any later version. | 
|  | 25 | * | 
|  | 26 | * This program is distributed in the hope that it will be useful, | 
|  | 27 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 28 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 29 | * GNU General Public License for more details. | 
|  | 30 | * | 
|  | 31 | * You should have received a copy of the GNU General Public License | 
|  | 32 | * along with this program; if not, write to the Free Software | 
|  | 33 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 
|  | 34 | */ | 
|  | 35 |  | 
| Ursula Braun | 8f7c502 | 2008-12-25 13:39:47 +0100 | [diff] [blame] | 36 | #define KMSG_COMPONENT "iucv" | 
|  | 37 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 
|  | 38 |  | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 39 | #include <linux/module.h> | 
|  | 40 | #include <linux/moduleparam.h> | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 41 | #include <linux/spinlock.h> | 
|  | 42 | #include <linux/kernel.h> | 
|  | 43 | #include <linux/slab.h> | 
|  | 44 | #include <linux/init.h> | 
|  | 45 | #include <linux/interrupt.h> | 
|  | 46 | #include <linux/list.h> | 
|  | 47 | #include <linux/errno.h> | 
|  | 48 | #include <linux/err.h> | 
|  | 49 | #include <linux/device.h> | 
|  | 50 | #include <linux/cpu.h> | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 51 | #include <linux/reboot.h> | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 52 | #include <net/iucv/iucv.h> | 
|  | 53 | #include <asm/atomic.h> | 
|  | 54 | #include <asm/ebcdic.h> | 
|  | 55 | #include <asm/io.h> | 
|  | 56 | #include <asm/s390_ext.h> | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 57 | #include <asm/smp.h> | 
|  | 58 |  | 
|  | 59 | /* | 
|  | 60 | * FLAGS: | 
|  | 61 | * All flags are defined in the field IPFLAGS1 of each function | 
|  | 62 | * and can be found in CP Programming Services. | 
|  | 63 | * IPSRCCLS - Indicates you have specified a source class. | 
|  | 64 | * IPTRGCLS - Indicates you have specified a target class. | 
|  | 65 | * IPFGPID  - Indicates you have specified a pathid. | 
|  | 66 | * IPFGMID  - Indicates you have specified a message ID. | 
|  | 67 | * IPNORPY  - Indicates a one-way message. No reply expected. | 
|  | 68 | * IPALL    - Indicates that all paths are affected. | 
|  | 69 | */ | 
|  | 70 | #define IUCV_IPSRCCLS	0x01 | 
|  | 71 | #define IUCV_IPTRGCLS	0x01 | 
|  | 72 | #define IUCV_IPFGPID	0x02 | 
|  | 73 | #define IUCV_IPFGMID	0x04 | 
|  | 74 | #define IUCV_IPNORPY	0x10 | 
|  | 75 | #define IUCV_IPALL	0x80 | 
|  | 76 |  | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 77 | static int iucv_bus_match(struct device *dev, struct device_driver *drv) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 78 | { | 
|  | 79 | return 0; | 
|  | 80 | } | 
|  | 81 |  | 
| Ursula Braun | 4c89d86 | 2009-09-16 04:37:22 +0000 | [diff] [blame] | 82 | enum iucv_pm_states { | 
|  | 83 | IUCV_PM_INITIAL = 0, | 
|  | 84 | IUCV_PM_FREEZING = 1, | 
|  | 85 | IUCV_PM_THAWING = 2, | 
|  | 86 | IUCV_PM_RESTORING = 3, | 
|  | 87 | }; | 
|  | 88 | static enum iucv_pm_states iucv_pm_state; | 
|  | 89 |  | 
| Ursula Braun | 672e405 | 2009-06-16 10:30:42 +0200 | [diff] [blame] | 90 | static int iucv_pm_prepare(struct device *); | 
|  | 91 | static void iucv_pm_complete(struct device *); | 
|  | 92 | static int iucv_pm_freeze(struct device *); | 
|  | 93 | static int iucv_pm_thaw(struct device *); | 
|  | 94 | static int iucv_pm_restore(struct device *); | 
|  | 95 |  | 
| Alexey Dobriyan | 4714521 | 2009-12-14 18:00:08 -0800 | [diff] [blame] | 96 | static const struct dev_pm_ops iucv_pm_ops = { | 
| Ursula Braun | 672e405 | 2009-06-16 10:30:42 +0200 | [diff] [blame] | 97 | .prepare = iucv_pm_prepare, | 
|  | 98 | .complete = iucv_pm_complete, | 
|  | 99 | .freeze = iucv_pm_freeze, | 
|  | 100 | .thaw = iucv_pm_thaw, | 
|  | 101 | .restore = iucv_pm_restore, | 
|  | 102 | }; | 
|  | 103 |  | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 104 | struct bus_type iucv_bus = { | 
|  | 105 | .name = "iucv", | 
|  | 106 | .match = iucv_bus_match, | 
| Ursula Braun | 672e405 | 2009-06-16 10:30:42 +0200 | [diff] [blame] | 107 | .pm = &iucv_pm_ops, | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 108 | }; | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 109 | EXPORT_SYMBOL(iucv_bus); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 110 |  | 
|  | 111 | struct device *iucv_root; | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 112 | EXPORT_SYMBOL(iucv_root); | 
|  | 113 |  | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 114 | static int iucv_available; | 
|  | 115 |  | 
|  | 116 | /* General IUCV interrupt structure */ | 
|  | 117 | struct iucv_irq_data { | 
|  | 118 | u16 ippathid; | 
|  | 119 | u8  ipflags1; | 
|  | 120 | u8  iptype; | 
|  | 121 | u32 res2[8]; | 
|  | 122 | }; | 
|  | 123 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 124 | struct iucv_irq_list { | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 125 | struct list_head list; | 
|  | 126 | struct iucv_irq_data data; | 
|  | 127 | }; | 
|  | 128 |  | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 129 | static struct iucv_irq_data *iucv_irq_data[NR_CPUS]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 130 | static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE; | 
|  | 131 | static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE; | 
|  | 132 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 133 | /* | 
|  | 134 | * Queue of interrupt buffers lock for delivery via the tasklet | 
|  | 135 | * (fast but can't call smp_call_function). | 
|  | 136 | */ | 
|  | 137 | static LIST_HEAD(iucv_task_queue); | 
|  | 138 |  | 
|  | 139 | /* | 
|  | 140 | * The tasklet for fast delivery of iucv interrupts. | 
|  | 141 | */ | 
|  | 142 | static void iucv_tasklet_fn(unsigned long); | 
|  | 143 | static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0); | 
|  | 144 |  | 
|  | 145 | /* | 
|  | 146 | * Queue of interrupt buffers for delivery via a work queue | 
|  | 147 | * (slower but can call smp_call_function). | 
|  | 148 | */ | 
|  | 149 | static LIST_HEAD(iucv_work_queue); | 
|  | 150 |  | 
|  | 151 | /* | 
|  | 152 | * The work element to deliver path pending interrupts. | 
|  | 153 | */ | 
|  | 154 | static void iucv_work_fn(struct work_struct *work); | 
|  | 155 | static DECLARE_WORK(iucv_work, iucv_work_fn); | 
|  | 156 |  | 
|  | 157 | /* | 
|  | 158 | * Spinlock protecting task and work queue. | 
|  | 159 | */ | 
|  | 160 | static DEFINE_SPINLOCK(iucv_queue_lock); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 161 |  | 
|  | 162 | enum iucv_command_codes { | 
|  | 163 | IUCV_QUERY = 0, | 
|  | 164 | IUCV_RETRIEVE_BUFFER = 2, | 
|  | 165 | IUCV_SEND = 4, | 
|  | 166 | IUCV_RECEIVE = 5, | 
|  | 167 | IUCV_REPLY = 6, | 
|  | 168 | IUCV_REJECT = 8, | 
|  | 169 | IUCV_PURGE = 9, | 
|  | 170 | IUCV_ACCEPT = 10, | 
|  | 171 | IUCV_CONNECT = 11, | 
|  | 172 | IUCV_DECLARE_BUFFER = 12, | 
|  | 173 | IUCV_QUIESCE = 13, | 
|  | 174 | IUCV_RESUME = 14, | 
|  | 175 | IUCV_SEVER = 15, | 
|  | 176 | IUCV_SETMASK = 16, | 
| Ursula Braun | 672e405 | 2009-06-16 10:30:42 +0200 | [diff] [blame] | 177 | IUCV_SETCONTROLMASK = 17, | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 178 | }; | 
|  | 179 |  | 
|  | 180 | /* | 
|  | 181 | * Error messages that are used with the iucv_sever function. They get | 
|  | 182 | * converted to EBCDIC. | 
|  | 183 | */ | 
|  | 184 | static char iucv_error_no_listener[16] = "NO LISTENER"; | 
|  | 185 | static char iucv_error_no_memory[16] = "NO MEMORY"; | 
|  | 186 | static char iucv_error_pathid[16] = "INVALID PATHID"; | 
|  | 187 |  | 
|  | 188 | /* | 
|  | 189 | * iucv_handler_list: List of registered handlers. | 
|  | 190 | */ | 
|  | 191 | static LIST_HEAD(iucv_handler_list); | 
|  | 192 |  | 
|  | 193 | /* | 
|  | 194 | * iucv_path_table: an array of iucv_path structures. | 
|  | 195 | */ | 
|  | 196 | static struct iucv_path **iucv_path_table; | 
|  | 197 | static unsigned long iucv_max_pathid; | 
|  | 198 |  | 
|  | 199 | /* | 
|  | 200 | * iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table | 
|  | 201 | */ | 
|  | 202 | static DEFINE_SPINLOCK(iucv_table_lock); | 
|  | 203 |  | 
|  | 204 | /* | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 205 | * iucv_active_cpu: contains the number of the cpu executing the tasklet | 
|  | 206 | * or the work handler. Needed for iucv_path_sever called from tasklet. | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 207 | */ | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 208 | static int iucv_active_cpu = -1; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 209 |  | 
|  | 210 | /* | 
|  | 211 | * Mutex and wait queue for iucv_register/iucv_unregister. | 
|  | 212 | */ | 
|  | 213 | static DEFINE_MUTEX(iucv_register_mutex); | 
|  | 214 |  | 
|  | 215 | /* | 
|  | 216 | * Counter for number of non-smp capable handlers. | 
|  | 217 | */ | 
|  | 218 | static int iucv_nonsmp_handler; | 
|  | 219 |  | 
|  | 220 | /* | 
|  | 221 | * IUCV control data structure. Used by iucv_path_accept, iucv_path_connect, | 
|  | 222 | * iucv_path_quiesce and iucv_path_sever. | 
|  | 223 | */ | 
|  | 224 | struct iucv_cmd_control { | 
|  | 225 | u16 ippathid; | 
|  | 226 | u8  ipflags1; | 
|  | 227 | u8  iprcode; | 
|  | 228 | u16 ipmsglim; | 
|  | 229 | u16 res1; | 
|  | 230 | u8  ipvmid[8]; | 
|  | 231 | u8  ipuser[16]; | 
|  | 232 | u8  iptarget[8]; | 
|  | 233 | } __attribute__ ((packed,aligned(8))); | 
|  | 234 |  | 
|  | 235 | /* | 
|  | 236 | * Data in parameter list iucv structure. Used by iucv_message_send, | 
|  | 237 | * iucv_message_send2way and iucv_message_reply. | 
|  | 238 | */ | 
|  | 239 | struct iucv_cmd_dpl { | 
|  | 240 | u16 ippathid; | 
|  | 241 | u8  ipflags1; | 
|  | 242 | u8  iprcode; | 
|  | 243 | u32 ipmsgid; | 
|  | 244 | u32 iptrgcls; | 
|  | 245 | u8  iprmmsg[8]; | 
|  | 246 | u32 ipsrccls; | 
|  | 247 | u32 ipmsgtag; | 
|  | 248 | u32 ipbfadr2; | 
|  | 249 | u32 ipbfln2f; | 
|  | 250 | u32 res; | 
|  | 251 | } __attribute__ ((packed,aligned(8))); | 
|  | 252 |  | 
|  | 253 | /* | 
|  | 254 | * Data in buffer iucv structure. Used by iucv_message_receive, | 
|  | 255 | * iucv_message_reject, iucv_message_send, iucv_message_send2way | 
|  | 256 | * and iucv_declare_cpu. | 
|  | 257 | */ | 
|  | 258 | struct iucv_cmd_db { | 
|  | 259 | u16 ippathid; | 
|  | 260 | u8  ipflags1; | 
|  | 261 | u8  iprcode; | 
|  | 262 | u32 ipmsgid; | 
|  | 263 | u32 iptrgcls; | 
|  | 264 | u32 ipbfadr1; | 
|  | 265 | u32 ipbfln1f; | 
|  | 266 | u32 ipsrccls; | 
|  | 267 | u32 ipmsgtag; | 
|  | 268 | u32 ipbfadr2; | 
|  | 269 | u32 ipbfln2f; | 
|  | 270 | u32 res; | 
|  | 271 | } __attribute__ ((packed,aligned(8))); | 
|  | 272 |  | 
|  | 273 | /* | 
|  | 274 | * Purge message iucv structure. Used by iucv_message_purge. | 
|  | 275 | */ | 
|  | 276 | struct iucv_cmd_purge { | 
|  | 277 | u16 ippathid; | 
|  | 278 | u8  ipflags1; | 
|  | 279 | u8  iprcode; | 
|  | 280 | u32 ipmsgid; | 
|  | 281 | u8  ipaudit[3]; | 
|  | 282 | u8  res1[5]; | 
|  | 283 | u32 res2; | 
|  | 284 | u32 ipsrccls; | 
|  | 285 | u32 ipmsgtag; | 
|  | 286 | u32 res3[3]; | 
|  | 287 | } __attribute__ ((packed,aligned(8))); | 
|  | 288 |  | 
|  | 289 | /* | 
|  | 290 | * Set mask iucv structure. Used by iucv_enable_cpu. | 
|  | 291 | */ | 
|  | 292 | struct iucv_cmd_set_mask { | 
|  | 293 | u8  ipmask; | 
|  | 294 | u8  res1[2]; | 
|  | 295 | u8  iprcode; | 
|  | 296 | u32 res2[9]; | 
|  | 297 | } __attribute__ ((packed,aligned(8))); | 
|  | 298 |  | 
|  | 299 | union iucv_param { | 
|  | 300 | struct iucv_cmd_control ctrl; | 
|  | 301 | struct iucv_cmd_dpl dpl; | 
|  | 302 | struct iucv_cmd_db db; | 
|  | 303 | struct iucv_cmd_purge purge; | 
|  | 304 | struct iucv_cmd_set_mask set_mask; | 
|  | 305 | }; | 
|  | 306 |  | 
|  | 307 | /* | 
|  | 308 | * Anchor for per-cpu IUCV command parameter block. | 
|  | 309 | */ | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 310 | static union iucv_param *iucv_param[NR_CPUS]; | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 311 | static union iucv_param *iucv_param_irq[NR_CPUS]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 312 |  | 
|  | 313 | /** | 
|  | 314 | * iucv_call_b2f0 | 
|  | 315 | * @code: identifier of IUCV call to CP. | 
|  | 316 | * @parm: pointer to a struct iucv_parm block | 
|  | 317 | * | 
|  | 318 | * Calls CP to execute IUCV commands. | 
|  | 319 | * | 
|  | 320 | * Returns the result of the CP IUCV call. | 
|  | 321 | */ | 
|  | 322 | static inline int iucv_call_b2f0(int command, union iucv_param *parm) | 
|  | 323 | { | 
|  | 324 | register unsigned long reg0 asm ("0"); | 
|  | 325 | register unsigned long reg1 asm ("1"); | 
|  | 326 | int ccode; | 
|  | 327 |  | 
|  | 328 | reg0 = command; | 
|  | 329 | reg1 = virt_to_phys(parm); | 
|  | 330 | asm volatile( | 
|  | 331 | "	.long 0xb2f01000\n" | 
|  | 332 | "	ipm	%0\n" | 
|  | 333 | "	srl	%0,28\n" | 
|  | 334 | : "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1) | 
|  | 335 | :  "m" (*parm) : "cc"); | 
|  | 336 | return (ccode == 1) ? parm->ctrl.iprcode : ccode; | 
|  | 337 | } | 
|  | 338 |  | 
|  | 339 | /** | 
|  | 340 | * iucv_query_maxconn | 
|  | 341 | * | 
|  | 342 | * Determines the maximum number of connections that may be established. | 
|  | 343 | * | 
|  | 344 | * Returns the maximum number of connections or -EPERM is IUCV is not | 
|  | 345 | * available. | 
|  | 346 | */ | 
|  | 347 | static int iucv_query_maxconn(void) | 
|  | 348 | { | 
|  | 349 | register unsigned long reg0 asm ("0"); | 
|  | 350 | register unsigned long reg1 asm ("1"); | 
|  | 351 | void *param; | 
|  | 352 | int ccode; | 
|  | 353 |  | 
|  | 354 | param = kzalloc(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA); | 
|  | 355 | if (!param) | 
|  | 356 | return -ENOMEM; | 
|  | 357 | reg0 = IUCV_QUERY; | 
|  | 358 | reg1 = (unsigned long) param; | 
|  | 359 | asm volatile ( | 
|  | 360 | "	.long	0xb2f01000\n" | 
|  | 361 | "	ipm	%0\n" | 
|  | 362 | "	srl	%0,28\n" | 
|  | 363 | : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc"); | 
|  | 364 | if (ccode == 0) | 
| Hendrik Brueckner | b29e4da | 2009-09-16 04:37:24 +0000 | [diff] [blame] | 365 | iucv_max_pathid = reg1; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 366 | kfree(param); | 
|  | 367 | return ccode ? -EPERM : 0; | 
|  | 368 | } | 
|  | 369 |  | 
|  | 370 | /** | 
|  | 371 | * iucv_allow_cpu | 
|  | 372 | * @data: unused | 
|  | 373 | * | 
|  | 374 | * Allow iucv interrupts on this cpu. | 
|  | 375 | */ | 
|  | 376 | static void iucv_allow_cpu(void *data) | 
|  | 377 | { | 
|  | 378 | int cpu = smp_processor_id(); | 
|  | 379 | union iucv_param *parm; | 
|  | 380 |  | 
|  | 381 | /* | 
|  | 382 | * Enable all iucv interrupts. | 
|  | 383 | * ipmask contains bits for the different interrupts | 
|  | 384 | *	0x80 - Flag to allow nonpriority message pending interrupts | 
|  | 385 | *	0x40 - Flag to allow priority message pending interrupts | 
|  | 386 | *	0x20 - Flag to allow nonpriority message completion interrupts | 
|  | 387 | *	0x10 - Flag to allow priority message completion interrupts | 
|  | 388 | *	0x08 - Flag to allow IUCV control interrupts | 
|  | 389 | */ | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 390 | parm = iucv_param_irq[cpu]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 391 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 392 | parm->set_mask.ipmask = 0xf8; | 
|  | 393 | iucv_call_b2f0(IUCV_SETMASK, parm); | 
|  | 394 |  | 
| Ursula Braun | 672e405 | 2009-06-16 10:30:42 +0200 | [diff] [blame] | 395 | /* | 
|  | 396 | * Enable all iucv control interrupts. | 
|  | 397 | * ipmask contains bits for the different interrupts | 
|  | 398 | *	0x80 - Flag to allow pending connections interrupts | 
|  | 399 | *	0x40 - Flag to allow connection complete interrupts | 
|  | 400 | *	0x20 - Flag to allow connection severed interrupts | 
|  | 401 | *	0x10 - Flag to allow connection quiesced interrupts | 
|  | 402 | *	0x08 - Flag to allow connection resumed interrupts | 
|  | 403 | */ | 
|  | 404 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 405 | parm->set_mask.ipmask = 0xf8; | 
|  | 406 | iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 407 | /* Set indication that iucv interrupts are allowed for this cpu. */ | 
|  | 408 | cpu_set(cpu, iucv_irq_cpumask); | 
|  | 409 | } | 
|  | 410 |  | 
|  | 411 | /** | 
|  | 412 | * iucv_block_cpu | 
|  | 413 | * @data: unused | 
|  | 414 | * | 
|  | 415 | * Block iucv interrupts on this cpu. | 
|  | 416 | */ | 
|  | 417 | static void iucv_block_cpu(void *data) | 
|  | 418 | { | 
|  | 419 | int cpu = smp_processor_id(); | 
|  | 420 | union iucv_param *parm; | 
|  | 421 |  | 
|  | 422 | /* Disable all iucv interrupts. */ | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 423 | parm = iucv_param_irq[cpu]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 424 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 425 | iucv_call_b2f0(IUCV_SETMASK, parm); | 
|  | 426 |  | 
|  | 427 | /* Clear indication that iucv interrupts are allowed for this cpu. */ | 
|  | 428 | cpu_clear(cpu, iucv_irq_cpumask); | 
|  | 429 | } | 
|  | 430 |  | 
|  | 431 | /** | 
| Ursula Braun | 672e405 | 2009-06-16 10:30:42 +0200 | [diff] [blame] | 432 | * iucv_block_cpu_almost | 
|  | 433 | * @data: unused | 
|  | 434 | * | 
|  | 435 | * Allow connection-severed interrupts only on this cpu. | 
|  | 436 | */ | 
|  | 437 | static void iucv_block_cpu_almost(void *data) | 
|  | 438 | { | 
|  | 439 | int cpu = smp_processor_id(); | 
|  | 440 | union iucv_param *parm; | 
|  | 441 |  | 
|  | 442 | /* Allow iucv control interrupts only */ | 
|  | 443 | parm = iucv_param_irq[cpu]; | 
|  | 444 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 445 | parm->set_mask.ipmask = 0x08; | 
|  | 446 | iucv_call_b2f0(IUCV_SETMASK, parm); | 
|  | 447 | /* Allow iucv-severed interrupt only */ | 
|  | 448 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 449 | parm->set_mask.ipmask = 0x20; | 
|  | 450 | iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); | 
|  | 451 |  | 
|  | 452 | /* Clear indication that iucv interrupts are allowed for this cpu. */ | 
|  | 453 | cpu_clear(cpu, iucv_irq_cpumask); | 
|  | 454 | } | 
|  | 455 |  | 
|  | 456 | /** | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 457 | * iucv_declare_cpu | 
|  | 458 | * @data: unused | 
|  | 459 | * | 
| Robert P. J. Day | 3a4fa0a | 2007-10-19 23:10:43 +0200 | [diff] [blame] | 460 | * Declare a interrupt buffer on this cpu. | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 461 | */ | 
|  | 462 | static void iucv_declare_cpu(void *data) | 
|  | 463 | { | 
|  | 464 | int cpu = smp_processor_id(); | 
|  | 465 | union iucv_param *parm; | 
|  | 466 | int rc; | 
|  | 467 |  | 
|  | 468 | if (cpu_isset(cpu, iucv_buffer_cpumask)) | 
|  | 469 | return; | 
|  | 470 |  | 
|  | 471 | /* Declare interrupt buffer. */ | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 472 | parm = iucv_param_irq[cpu]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 473 | memset(parm, 0, sizeof(union iucv_param)); | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 474 | parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 475 | rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); | 
|  | 476 | if (rc) { | 
|  | 477 | char *err = "Unknown"; | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 478 | switch (rc) { | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 479 | case 0x03: | 
|  | 480 | err = "Directory error"; | 
|  | 481 | break; | 
|  | 482 | case 0x0a: | 
|  | 483 | err = "Invalid length"; | 
|  | 484 | break; | 
|  | 485 | case 0x13: | 
|  | 486 | err = "Buffer already exists"; | 
|  | 487 | break; | 
|  | 488 | case 0x3e: | 
|  | 489 | err = "Buffer overlap"; | 
|  | 490 | break; | 
|  | 491 | case 0x5c: | 
|  | 492 | err = "Paging or storage error"; | 
|  | 493 | break; | 
|  | 494 | } | 
| Ursula Braun | 8f7c502 | 2008-12-25 13:39:47 +0100 | [diff] [blame] | 495 | pr_warning("Defining an interrupt buffer on CPU %i" | 
|  | 496 | " failed with 0x%02x (%s)\n", cpu, rc, err); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 497 | return; | 
|  | 498 | } | 
|  | 499 |  | 
|  | 500 | /* Set indication that an iucv buffer exists for this cpu. */ | 
|  | 501 | cpu_set(cpu, iucv_buffer_cpumask); | 
|  | 502 |  | 
|  | 503 | if (iucv_nonsmp_handler == 0 || cpus_empty(iucv_irq_cpumask)) | 
|  | 504 | /* Enable iucv interrupts on this cpu. */ | 
|  | 505 | iucv_allow_cpu(NULL); | 
|  | 506 | else | 
|  | 507 | /* Disable iucv interrupts on this cpu. */ | 
|  | 508 | iucv_block_cpu(NULL); | 
|  | 509 | } | 
|  | 510 |  | 
|  | 511 | /** | 
|  | 512 | * iucv_retrieve_cpu | 
|  | 513 | * @data: unused | 
|  | 514 | * | 
|  | 515 | * Retrieve interrupt buffer on this cpu. | 
|  | 516 | */ | 
|  | 517 | static void iucv_retrieve_cpu(void *data) | 
|  | 518 | { | 
|  | 519 | int cpu = smp_processor_id(); | 
|  | 520 | union iucv_param *parm; | 
|  | 521 |  | 
|  | 522 | if (!cpu_isset(cpu, iucv_buffer_cpumask)) | 
|  | 523 | return; | 
|  | 524 |  | 
|  | 525 | /* Block iucv interrupts. */ | 
|  | 526 | iucv_block_cpu(NULL); | 
|  | 527 |  | 
|  | 528 | /* Retrieve interrupt buffer. */ | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 529 | parm = iucv_param_irq[cpu]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 530 | iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); | 
|  | 531 |  | 
|  | 532 | /* Clear indication that an iucv buffer exists for this cpu. */ | 
|  | 533 | cpu_clear(cpu, iucv_buffer_cpumask); | 
|  | 534 | } | 
|  | 535 |  | 
|  | 536 | /** | 
|  | 537 | * iucv_setmask_smp | 
|  | 538 | * | 
|  | 539 | * Allow iucv interrupts on all cpus. | 
|  | 540 | */ | 
|  | 541 | static void iucv_setmask_mp(void) | 
|  | 542 | { | 
|  | 543 | int cpu; | 
|  | 544 |  | 
| Heiko Carstens | 7b9d1b2 | 2008-06-09 15:50:30 -0700 | [diff] [blame] | 545 | get_online_cpus(); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 546 | for_each_online_cpu(cpu) | 
|  | 547 | /* Enable all cpus with a declared buffer. */ | 
|  | 548 | if (cpu_isset(cpu, iucv_buffer_cpumask) && | 
|  | 549 | !cpu_isset(cpu, iucv_irq_cpumask)) | 
| Heiko Carstens | 3bb447f | 2007-07-27 12:29:08 +0200 | [diff] [blame] | 550 | smp_call_function_single(cpu, iucv_allow_cpu, | 
| Jens Axboe | 8691e5a | 2008-06-06 11:18:06 +0200 | [diff] [blame] | 551 | NULL, 1); | 
| Heiko Carstens | 7b9d1b2 | 2008-06-09 15:50:30 -0700 | [diff] [blame] | 552 | put_online_cpus(); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 553 | } | 
|  | 554 |  | 
|  | 555 | /** | 
|  | 556 | * iucv_setmask_up | 
|  | 557 | * | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 558 | * Allow iucv interrupts on a single cpu. | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 559 | */ | 
|  | 560 | static void iucv_setmask_up(void) | 
|  | 561 | { | 
|  | 562 | cpumask_t cpumask; | 
|  | 563 | int cpu; | 
|  | 564 |  | 
|  | 565 | /* Disable all cpu but the first in cpu_irq_cpumask. */ | 
|  | 566 | cpumask = iucv_irq_cpumask; | 
|  | 567 | cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); | 
| Mike Travis | 0e12f84 | 2008-05-12 21:21:13 +0200 | [diff] [blame] | 568 | for_each_cpu_mask_nr(cpu, cpumask) | 
| Jens Axboe | 8691e5a | 2008-06-06 11:18:06 +0200 | [diff] [blame] | 569 | smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 570 | } | 
|  | 571 |  | 
|  | 572 | /** | 
|  | 573 | * iucv_enable | 
|  | 574 | * | 
|  | 575 | * This function makes iucv ready for use. It allocates the pathid | 
|  | 576 | * table, declares an iucv interrupt buffer and enables the iucv | 
|  | 577 | * interrupts. Called when the first user has registered an iucv | 
|  | 578 | * handler. | 
|  | 579 | */ | 
|  | 580 | static int iucv_enable(void) | 
|  | 581 | { | 
|  | 582 | size_t alloc_size; | 
|  | 583 | int cpu, rc; | 
|  | 584 |  | 
| Heiko Carstens | f1d3e4d | 2009-01-05 18:09:02 -0800 | [diff] [blame] | 585 | get_online_cpus(); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 586 | rc = -ENOMEM; | 
|  | 587 | alloc_size = iucv_max_pathid * sizeof(struct iucv_path); | 
|  | 588 | iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); | 
|  | 589 | if (!iucv_path_table) | 
|  | 590 | goto out; | 
|  | 591 | /* Declare per cpu buffers. */ | 
|  | 592 | rc = -EIO; | 
|  | 593 | for_each_online_cpu(cpu) | 
| Jens Axboe | 8691e5a | 2008-06-06 11:18:06 +0200 | [diff] [blame] | 594 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 595 | if (cpus_empty(iucv_buffer_cpumask)) | 
|  | 596 | /* No cpu could declare an iucv buffer. */ | 
| Heiko Carstens | f1d3e4d | 2009-01-05 18:09:02 -0800 | [diff] [blame] | 597 | goto out; | 
| Heiko Carstens | 7b9d1b2 | 2008-06-09 15:50:30 -0700 | [diff] [blame] | 598 | put_online_cpus(); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 599 | return 0; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 600 | out: | 
| Heiko Carstens | f1d3e4d | 2009-01-05 18:09:02 -0800 | [diff] [blame] | 601 | kfree(iucv_path_table); | 
|  | 602 | iucv_path_table = NULL; | 
|  | 603 | put_online_cpus(); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 604 | return rc; | 
|  | 605 | } | 
|  | 606 |  | 
|  | 607 | /** | 
|  | 608 | * iucv_disable | 
|  | 609 | * | 
|  | 610 | * This function shuts down iucv. It disables iucv interrupts, retrieves | 
|  | 611 | * the iucv interrupt buffer and frees the pathid table. Called after the | 
|  | 612 | * last user unregister its iucv handler. | 
|  | 613 | */ | 
|  | 614 | static void iucv_disable(void) | 
|  | 615 | { | 
| Heiko Carstens | 8b122ef | 2008-09-30 03:03:35 -0700 | [diff] [blame] | 616 | get_online_cpus(); | 
| Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 617 | on_each_cpu(iucv_retrieve_cpu, NULL, 1); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 618 | kfree(iucv_path_table); | 
| Heiko Carstens | f1d3e4d | 2009-01-05 18:09:02 -0800 | [diff] [blame] | 619 | iucv_path_table = NULL; | 
|  | 620 | put_online_cpus(); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 621 | } | 
|  | 622 |  | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 623 | static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | 
|  | 624 | unsigned long action, void *hcpu) | 
|  | 625 | { | 
|  | 626 | cpumask_t cpumask; | 
|  | 627 | long cpu = (long) hcpu; | 
|  | 628 |  | 
|  | 629 | switch (action) { | 
|  | 630 | case CPU_UP_PREPARE: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 631 | case CPU_UP_PREPARE_FROZEN: | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 632 | iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), | 
|  | 633 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 
|  | 634 | if (!iucv_irq_data[cpu]) | 
| Akinobu Mita | 92e99a9 | 2010-05-26 14:43:33 -0700 | [diff] [blame] | 635 | return notifier_from_errno(-ENOMEM); | 
|  | 636 |  | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 637 | iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), | 
|  | 638 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 
| Akinobu Mita | d0236f8 | 2008-07-15 02:09:53 -0700 | [diff] [blame] | 639 | if (!iucv_param[cpu]) { | 
|  | 640 | kfree(iucv_irq_data[cpu]); | 
|  | 641 | iucv_irq_data[cpu] = NULL; | 
| Akinobu Mita | 92e99a9 | 2010-05-26 14:43:33 -0700 | [diff] [blame] | 642 | return notifier_from_errno(-ENOMEM); | 
| Akinobu Mita | d0236f8 | 2008-07-15 02:09:53 -0700 | [diff] [blame] | 643 | } | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 644 | iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), | 
|  | 645 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 
|  | 646 | if (!iucv_param_irq[cpu]) { | 
|  | 647 | kfree(iucv_param[cpu]); | 
|  | 648 | iucv_param[cpu] = NULL; | 
|  | 649 | kfree(iucv_irq_data[cpu]); | 
|  | 650 | iucv_irq_data[cpu] = NULL; | 
| Akinobu Mita | 92e99a9 | 2010-05-26 14:43:33 -0700 | [diff] [blame] | 651 | return notifier_from_errno(-ENOMEM); | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 652 | } | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 653 | break; | 
|  | 654 | case CPU_UP_CANCELED: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 655 | case CPU_UP_CANCELED_FROZEN: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 656 | case CPU_DEAD: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 657 | case CPU_DEAD_FROZEN: | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 658 | kfree(iucv_param_irq[cpu]); | 
|  | 659 | iucv_param_irq[cpu] = NULL; | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 660 | kfree(iucv_param[cpu]); | 
|  | 661 | iucv_param[cpu] = NULL; | 
|  | 662 | kfree(iucv_irq_data[cpu]); | 
|  | 663 | iucv_irq_data[cpu] = NULL; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 664 | break; | 
|  | 665 | case CPU_ONLINE: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 666 | case CPU_ONLINE_FROZEN: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 667 | case CPU_DOWN_FAILED: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 668 | case CPU_DOWN_FAILED_FROZEN: | 
| Heiko Carstens | f1d3e4d | 2009-01-05 18:09:02 -0800 | [diff] [blame] | 669 | if (!iucv_path_table) | 
|  | 670 | break; | 
| Jens Axboe | 8691e5a | 2008-06-06 11:18:06 +0200 | [diff] [blame] | 671 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 672 | break; | 
|  | 673 | case CPU_DOWN_PREPARE: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 674 | case CPU_DOWN_PREPARE_FROZEN: | 
| Heiko Carstens | f1d3e4d | 2009-01-05 18:09:02 -0800 | [diff] [blame] | 675 | if (!iucv_path_table) | 
|  | 676 | break; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 677 | cpumask = iucv_buffer_cpumask; | 
|  | 678 | cpu_clear(cpu, cpumask); | 
|  | 679 | if (cpus_empty(cpumask)) | 
|  | 680 | /* Can't offline last IUCV enabled cpu. */ | 
| Akinobu Mita | 92e99a9 | 2010-05-26 14:43:33 -0700 | [diff] [blame] | 681 | return notifier_from_errno(-EINVAL); | 
| Jens Axboe | 8691e5a | 2008-06-06 11:18:06 +0200 | [diff] [blame] | 682 | smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 683 | if (cpus_empty(iucv_irq_cpumask)) | 
| Heiko Carstens | 3bb447f | 2007-07-27 12:29:08 +0200 | [diff] [blame] | 684 | smp_call_function_single(first_cpu(iucv_buffer_cpumask), | 
| Jens Axboe | 8691e5a | 2008-06-06 11:18:06 +0200 | [diff] [blame] | 685 | iucv_allow_cpu, NULL, 1); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 686 | break; | 
|  | 687 | } | 
|  | 688 | return NOTIFY_OK; | 
|  | 689 | } | 
|  | 690 |  | 
| Heiko Carstens | f1494ed | 2008-06-09 15:49:57 -0700 | [diff] [blame] | 691 | static struct notifier_block __refdata iucv_cpu_notifier = { | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 692 | .notifier_call = iucv_cpu_notify, | 
|  | 693 | }; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 694 |  | 
|  | 695 | /** | 
|  | 696 | * iucv_sever_pathid | 
|  | 697 | * @pathid: path identification number. | 
|  | 698 | * @userdata: 16-bytes of user data. | 
|  | 699 | * | 
|  | 700 | * Sever an iucv path to free up the pathid. Used internally. | 
|  | 701 | */ | 
|  | 702 | static int iucv_sever_pathid(u16 pathid, u8 userdata[16]) | 
|  | 703 | { | 
|  | 704 | union iucv_param *parm; | 
|  | 705 |  | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 706 | parm = iucv_param_irq[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 707 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 708 | if (userdata) | 
|  | 709 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | 
|  | 710 | parm->ctrl.ippathid = pathid; | 
|  | 711 | return iucv_call_b2f0(IUCV_SEVER, parm); | 
|  | 712 | } | 
|  | 713 |  | 
|  | 714 | /** | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 715 | * __iucv_cleanup_queue | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 716 | * @dummy: unused dummy argument | 
|  | 717 | * | 
|  | 718 | * Nop function called via smp_call_function to force work items from | 
|  | 719 | * pending external iucv interrupts to the work queue. | 
|  | 720 | */ | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 721 | static void __iucv_cleanup_queue(void *dummy) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 722 | { | 
|  | 723 | } | 
|  | 724 |  | 
|  | 725 | /** | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 726 | * iucv_cleanup_queue | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 727 | * | 
|  | 728 | * Function called after a path has been severed to find all remaining | 
|  | 729 | * work items for the now stale pathid. The caller needs to hold the | 
|  | 730 | * iucv_table_lock. | 
|  | 731 | */ | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 732 | static void iucv_cleanup_queue(void) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 733 | { | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 734 | struct iucv_irq_list *p, *n; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 735 |  | 
|  | 736 | /* | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 737 | * When a path is severed, the pathid can be reused immediatly | 
|  | 738 | * on a iucv connect or a connection pending interrupt. Remove | 
|  | 739 | * all entries from the task queue that refer to a stale pathid | 
|  | 740 | * (iucv_path_table[ix] == NULL). Only then do the iucv connect | 
|  | 741 | * or deliver the connection pending interrupt. To get all the | 
|  | 742 | * pending interrupts force them to the work queue by calling | 
|  | 743 | * an empty function on all cpus. | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 744 | */ | 
| Jens Axboe | 8691e5a | 2008-06-06 11:18:06 +0200 | [diff] [blame] | 745 | smp_call_function(__iucv_cleanup_queue, NULL, 1); | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 746 | spin_lock_irq(&iucv_queue_lock); | 
|  | 747 | list_for_each_entry_safe(p, n, &iucv_task_queue, list) { | 
|  | 748 | /* Remove stale work items from the task queue. */ | 
|  | 749 | if (iucv_path_table[p->data.ippathid] == NULL) { | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 750 | list_del(&p->list); | 
|  | 751 | kfree(p); | 
|  | 752 | } | 
|  | 753 | } | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 754 | spin_unlock_irq(&iucv_queue_lock); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 755 | } | 
|  | 756 |  | 
|  | 757 | /** | 
|  | 758 | * iucv_register: | 
|  | 759 | * @handler: address of iucv handler structure | 
|  | 760 | * @smp: != 0 indicates that the handler can deal with out of order messages | 
|  | 761 | * | 
|  | 762 | * Registers a driver with IUCV. | 
|  | 763 | * | 
|  | 764 | * Returns 0 on success, -ENOMEM if the memory allocation for the pathid | 
|  | 765 | * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus. | 
|  | 766 | */ | 
|  | 767 | int iucv_register(struct iucv_handler *handler, int smp) | 
|  | 768 | { | 
|  | 769 | int rc; | 
|  | 770 |  | 
|  | 771 | if (!iucv_available) | 
|  | 772 | return -ENOSYS; | 
|  | 773 | mutex_lock(&iucv_register_mutex); | 
|  | 774 | if (!smp) | 
|  | 775 | iucv_nonsmp_handler++; | 
|  | 776 | if (list_empty(&iucv_handler_list)) { | 
|  | 777 | rc = iucv_enable(); | 
|  | 778 | if (rc) | 
|  | 779 | goto out_mutex; | 
|  | 780 | } else if (!smp && iucv_nonsmp_handler == 1) | 
|  | 781 | iucv_setmask_up(); | 
|  | 782 | INIT_LIST_HEAD(&handler->paths); | 
|  | 783 |  | 
| Ursula Braun | 435bc9d | 2008-02-07 18:06:52 -0800 | [diff] [blame] | 784 | spin_lock_bh(&iucv_table_lock); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 785 | list_add_tail(&handler->list, &iucv_handler_list); | 
| Ursula Braun | 435bc9d | 2008-02-07 18:06:52 -0800 | [diff] [blame] | 786 | spin_unlock_bh(&iucv_table_lock); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 787 | rc = 0; | 
|  | 788 | out_mutex: | 
|  | 789 | mutex_unlock(&iucv_register_mutex); | 
|  | 790 | return rc; | 
|  | 791 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 792 | EXPORT_SYMBOL(iucv_register); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 793 |  | 
|  | 794 | /** | 
|  | 795 | * iucv_unregister | 
|  | 796 | * @handler:  address of iucv handler structure | 
|  | 797 | * @smp: != 0 indicates that the handler can deal with out of order messages | 
|  | 798 | * | 
|  | 799 | * Unregister driver from IUCV. | 
|  | 800 | */ | 
|  | 801 | void iucv_unregister(struct iucv_handler *handler, int smp) | 
|  | 802 | { | 
|  | 803 | struct iucv_path *p, *n; | 
|  | 804 |  | 
|  | 805 | mutex_lock(&iucv_register_mutex); | 
|  | 806 | spin_lock_bh(&iucv_table_lock); | 
|  | 807 | /* Remove handler from the iucv_handler_list. */ | 
|  | 808 | list_del_init(&handler->list); | 
|  | 809 | /* Sever all pathids still refering to the handler. */ | 
|  | 810 | list_for_each_entry_safe(p, n, &handler->paths, list) { | 
|  | 811 | iucv_sever_pathid(p->pathid, NULL); | 
|  | 812 | iucv_path_table[p->pathid] = NULL; | 
|  | 813 | list_del(&p->list); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 814 | iucv_path_free(p); | 
|  | 815 | } | 
|  | 816 | spin_unlock_bh(&iucv_table_lock); | 
|  | 817 | if (!smp) | 
|  | 818 | iucv_nonsmp_handler--; | 
|  | 819 | if (list_empty(&iucv_handler_list)) | 
|  | 820 | iucv_disable(); | 
|  | 821 | else if (!smp && iucv_nonsmp_handler == 0) | 
|  | 822 | iucv_setmask_mp(); | 
|  | 823 | mutex_unlock(&iucv_register_mutex); | 
|  | 824 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 825 | EXPORT_SYMBOL(iucv_unregister); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 826 |  | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 827 | static int iucv_reboot_event(struct notifier_block *this, | 
|  | 828 | unsigned long event, void *ptr) | 
|  | 829 | { | 
|  | 830 | int i, rc; | 
|  | 831 |  | 
|  | 832 | get_online_cpus(); | 
|  | 833 | on_each_cpu(iucv_block_cpu, NULL, 1); | 
|  | 834 | preempt_disable(); | 
|  | 835 | for (i = 0; i < iucv_max_pathid; i++) { | 
|  | 836 | if (iucv_path_table[i]) | 
|  | 837 | rc = iucv_sever_pathid(i, NULL); | 
|  | 838 | } | 
|  | 839 | preempt_enable(); | 
|  | 840 | put_online_cpus(); | 
|  | 841 | iucv_disable(); | 
|  | 842 | return NOTIFY_DONE; | 
|  | 843 | } | 
|  | 844 |  | 
|  | 845 | static struct notifier_block iucv_reboot_notifier = { | 
|  | 846 | .notifier_call = iucv_reboot_event, | 
|  | 847 | }; | 
|  | 848 |  | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 849 | /** | 
|  | 850 | * iucv_path_accept | 
|  | 851 | * @path: address of iucv path structure | 
|  | 852 | * @handler: address of iucv handler structure | 
|  | 853 | * @userdata: 16 bytes of data reflected to the communication partner | 
|  | 854 | * @private: private data passed to interrupt handlers for this path | 
|  | 855 | * | 
|  | 856 | * This function is issued after the user received a connection pending | 
|  | 857 | * external interrupt and now wishes to complete the IUCV communication path. | 
|  | 858 | * | 
|  | 859 | * Returns the result of the CP IUCV call. | 
|  | 860 | */ | 
|  | 861 | int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, | 
|  | 862 | u8 userdata[16], void *private) | 
|  | 863 | { | 
|  | 864 | union iucv_param *parm; | 
|  | 865 | int rc; | 
|  | 866 |  | 
|  | 867 | local_bh_disable(); | 
| Hendrik Brueckner | d28ecab | 2009-09-16 04:37:23 +0000 | [diff] [blame] | 868 | if (cpus_empty(iucv_buffer_cpumask)) { | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 869 | rc = -EIO; | 
|  | 870 | goto out; | 
|  | 871 | } | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 872 | /* Prepare parameter block. */ | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 873 | parm = iucv_param[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 874 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 875 | parm->ctrl.ippathid = path->pathid; | 
|  | 876 | parm->ctrl.ipmsglim = path->msglim; | 
|  | 877 | if (userdata) | 
|  | 878 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | 
|  | 879 | parm->ctrl.ipflags1 = path->flags; | 
|  | 880 |  | 
|  | 881 | rc = iucv_call_b2f0(IUCV_ACCEPT, parm); | 
|  | 882 | if (!rc) { | 
|  | 883 | path->private = private; | 
|  | 884 | path->msglim = parm->ctrl.ipmsglim; | 
|  | 885 | path->flags = parm->ctrl.ipflags1; | 
|  | 886 | } | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 887 | out: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 888 | local_bh_enable(); | 
|  | 889 | return rc; | 
|  | 890 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 891 | EXPORT_SYMBOL(iucv_path_accept); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 892 |  | 
|  | 893 | /** | 
|  | 894 | * iucv_path_connect | 
|  | 895 | * @path: address of iucv path structure | 
|  | 896 | * @handler: address of iucv handler structure | 
|  | 897 | * @userid: 8-byte user identification | 
|  | 898 | * @system: 8-byte target system identification | 
|  | 899 | * @userdata: 16 bytes of data reflected to the communication partner | 
|  | 900 | * @private: private data passed to interrupt handlers for this path | 
|  | 901 | * | 
|  | 902 | * This function establishes an IUCV path. Although the connect may complete | 
|  | 903 | * successfully, you are not able to use the path until you receive an IUCV | 
|  | 904 | * Connection Complete external interrupt. | 
|  | 905 | * | 
|  | 906 | * Returns the result of the CP IUCV call. | 
|  | 907 | */ | 
|  | 908 | int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, | 
|  | 909 | u8 userid[8], u8 system[8], u8 userdata[16], | 
|  | 910 | void *private) | 
|  | 911 | { | 
|  | 912 | union iucv_param *parm; | 
|  | 913 | int rc; | 
|  | 914 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 915 | spin_lock_bh(&iucv_table_lock); | 
|  | 916 | iucv_cleanup_queue(); | 
| Hendrik Brueckner | d28ecab | 2009-09-16 04:37:23 +0000 | [diff] [blame] | 917 | if (cpus_empty(iucv_buffer_cpumask)) { | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 918 | rc = -EIO; | 
|  | 919 | goto out; | 
|  | 920 | } | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 921 | parm = iucv_param[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 922 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 923 | parm->ctrl.ipmsglim = path->msglim; | 
|  | 924 | parm->ctrl.ipflags1 = path->flags; | 
|  | 925 | if (userid) { | 
|  | 926 | memcpy(parm->ctrl.ipvmid, userid, sizeof(parm->ctrl.ipvmid)); | 
|  | 927 | ASCEBC(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); | 
|  | 928 | EBC_TOUPPER(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); | 
|  | 929 | } | 
|  | 930 | if (system) { | 
|  | 931 | memcpy(parm->ctrl.iptarget, system, | 
|  | 932 | sizeof(parm->ctrl.iptarget)); | 
|  | 933 | ASCEBC(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); | 
|  | 934 | EBC_TOUPPER(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); | 
|  | 935 | } | 
|  | 936 | if (userdata) | 
|  | 937 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | 
|  | 938 |  | 
|  | 939 | rc = iucv_call_b2f0(IUCV_CONNECT, parm); | 
|  | 940 | if (!rc) { | 
|  | 941 | if (parm->ctrl.ippathid < iucv_max_pathid) { | 
|  | 942 | path->pathid = parm->ctrl.ippathid; | 
|  | 943 | path->msglim = parm->ctrl.ipmsglim; | 
|  | 944 | path->flags = parm->ctrl.ipflags1; | 
|  | 945 | path->handler = handler; | 
|  | 946 | path->private = private; | 
|  | 947 | list_add_tail(&path->list, &handler->paths); | 
|  | 948 | iucv_path_table[path->pathid] = path; | 
|  | 949 | } else { | 
|  | 950 | iucv_sever_pathid(parm->ctrl.ippathid, | 
|  | 951 | iucv_error_pathid); | 
|  | 952 | rc = -EIO; | 
|  | 953 | } | 
|  | 954 | } | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 955 | out: | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 956 | spin_unlock_bh(&iucv_table_lock); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 957 | return rc; | 
|  | 958 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 959 | EXPORT_SYMBOL(iucv_path_connect); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 960 |  | 
|  | 961 | /** | 
|  | 962 | * iucv_path_quiesce: | 
|  | 963 | * @path: address of iucv path structure | 
|  | 964 | * @userdata: 16 bytes of data reflected to the communication partner | 
|  | 965 | * | 
|  | 966 | * This function temporarily suspends incoming messages on an IUCV path. | 
|  | 967 | * You can later reactivate the path by invoking the iucv_resume function. | 
|  | 968 | * | 
|  | 969 | * Returns the result from the CP IUCV call. | 
|  | 970 | */ | 
|  | 971 | int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]) | 
|  | 972 | { | 
|  | 973 | union iucv_param *parm; | 
|  | 974 | int rc; | 
|  | 975 |  | 
|  | 976 | local_bh_disable(); | 
| Hendrik Brueckner | d28ecab | 2009-09-16 04:37:23 +0000 | [diff] [blame] | 977 | if (cpus_empty(iucv_buffer_cpumask)) { | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 978 | rc = -EIO; | 
|  | 979 | goto out; | 
|  | 980 | } | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 981 | parm = iucv_param[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 982 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 983 | if (userdata) | 
|  | 984 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | 
|  | 985 | parm->ctrl.ippathid = path->pathid; | 
|  | 986 | rc = iucv_call_b2f0(IUCV_QUIESCE, parm); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 987 | out: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 988 | local_bh_enable(); | 
|  | 989 | return rc; | 
|  | 990 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 991 | EXPORT_SYMBOL(iucv_path_quiesce); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 992 |  | 
|  | 993 | /** | 
|  | 994 | * iucv_path_resume: | 
|  | 995 | * @path: address of iucv path structure | 
|  | 996 | * @userdata: 16 bytes of data reflected to the communication partner | 
|  | 997 | * | 
|  | 998 | * This function resumes incoming messages on an IUCV path that has | 
|  | 999 | * been stopped with iucv_path_quiesce. | 
|  | 1000 | * | 
|  | 1001 | * Returns the result from the CP IUCV call. | 
|  | 1002 | */ | 
|  | 1003 | int iucv_path_resume(struct iucv_path *path, u8 userdata[16]) | 
|  | 1004 | { | 
|  | 1005 | union iucv_param *parm; | 
|  | 1006 | int rc; | 
|  | 1007 |  | 
|  | 1008 | local_bh_disable(); | 
| Hendrik Brueckner | d28ecab | 2009-09-16 04:37:23 +0000 | [diff] [blame] | 1009 | if (cpus_empty(iucv_buffer_cpumask)) { | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1010 | rc = -EIO; | 
|  | 1011 | goto out; | 
|  | 1012 | } | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 1013 | parm = iucv_param[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1014 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 1015 | if (userdata) | 
|  | 1016 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | 
|  | 1017 | parm->ctrl.ippathid = path->pathid; | 
|  | 1018 | rc = iucv_call_b2f0(IUCV_RESUME, parm); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1019 | out: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1020 | local_bh_enable(); | 
|  | 1021 | return rc; | 
|  | 1022 | } | 
|  | 1023 |  | 
|  | 1024 | /** | 
|  | 1025 | * iucv_path_sever | 
|  | 1026 | * @path: address of iucv path structure | 
|  | 1027 | * @userdata: 16 bytes of data reflected to the communication partner | 
|  | 1028 | * | 
|  | 1029 | * This function terminates an IUCV path. | 
|  | 1030 | * | 
|  | 1031 | * Returns the result from the CP IUCV call. | 
|  | 1032 | */ | 
|  | 1033 | int iucv_path_sever(struct iucv_path *path, u8 userdata[16]) | 
|  | 1034 | { | 
|  | 1035 | int rc; | 
|  | 1036 |  | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1037 | preempt_disable(); | 
| Hendrik Brueckner | d28ecab | 2009-09-16 04:37:23 +0000 | [diff] [blame] | 1038 | if (cpus_empty(iucv_buffer_cpumask)) { | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1039 | rc = -EIO; | 
|  | 1040 | goto out; | 
|  | 1041 | } | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1042 | if (iucv_active_cpu != smp_processor_id()) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1043 | spin_lock_bh(&iucv_table_lock); | 
|  | 1044 | rc = iucv_sever_pathid(path->pathid, userdata); | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 1045 | iucv_path_table[path->pathid] = NULL; | 
|  | 1046 | list_del_init(&path->list); | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1047 | if (iucv_active_cpu != smp_processor_id()) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1048 | spin_unlock_bh(&iucv_table_lock); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1049 | out: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1050 | preempt_enable(); | 
|  | 1051 | return rc; | 
|  | 1052 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 1053 | EXPORT_SYMBOL(iucv_path_sever); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1054 |  | 
|  | 1055 | /** | 
|  | 1056 | * iucv_message_purge | 
|  | 1057 | * @path: address of iucv path structure | 
|  | 1058 | * @msg: address of iucv msg structure | 
|  | 1059 | * @srccls: source class of message | 
|  | 1060 | * | 
|  | 1061 | * Cancels a message you have sent. | 
|  | 1062 | * | 
|  | 1063 | * Returns the result from the CP IUCV call. | 
|  | 1064 | */ | 
|  | 1065 | int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, | 
|  | 1066 | u32 srccls) | 
|  | 1067 | { | 
|  | 1068 | union iucv_param *parm; | 
|  | 1069 | int rc; | 
|  | 1070 |  | 
|  | 1071 | local_bh_disable(); | 
| Hendrik Brueckner | d28ecab | 2009-09-16 04:37:23 +0000 | [diff] [blame] | 1072 | if (cpus_empty(iucv_buffer_cpumask)) { | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1073 | rc = -EIO; | 
|  | 1074 | goto out; | 
|  | 1075 | } | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 1076 | parm = iucv_param[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1077 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 1078 | parm->purge.ippathid = path->pathid; | 
|  | 1079 | parm->purge.ipmsgid = msg->id; | 
|  | 1080 | parm->purge.ipsrccls = srccls; | 
|  | 1081 | parm->purge.ipflags1 = IUCV_IPSRCCLS | IUCV_IPFGMID | IUCV_IPFGPID; | 
|  | 1082 | rc = iucv_call_b2f0(IUCV_PURGE, parm); | 
|  | 1083 | if (!rc) { | 
|  | 1084 | msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8; | 
|  | 1085 | msg->tag = parm->purge.ipmsgtag; | 
|  | 1086 | } | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1087 | out: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1088 | local_bh_enable(); | 
|  | 1089 | return rc; | 
|  | 1090 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 1091 | EXPORT_SYMBOL(iucv_message_purge); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1092 |  | 
|  | 1093 | /** | 
| Hendrik Brueckner | 91d5d45 | 2008-12-25 13:38:58 +0100 | [diff] [blame] | 1094 | * iucv_message_receive_iprmdata | 
|  | 1095 | * @path: address of iucv path structure | 
|  | 1096 | * @msg: address of iucv msg structure | 
|  | 1097 | * @flags: how the message is received (IUCV_IPBUFLST) | 
|  | 1098 | * @buffer: address of data buffer or address of struct iucv_array | 
|  | 1099 | * @size: length of data buffer | 
|  | 1100 | * @residual: | 
|  | 1101 | * | 
|  | 1102 | * Internal function used by iucv_message_receive and __iucv_message_receive | 
|  | 1103 | * to receive RMDATA data stored in struct iucv_message. | 
|  | 1104 | */ | 
|  | 1105 | static int iucv_message_receive_iprmdata(struct iucv_path *path, | 
|  | 1106 | struct iucv_message *msg, | 
|  | 1107 | u8 flags, void *buffer, | 
|  | 1108 | size_t size, size_t *residual) | 
|  | 1109 | { | 
|  | 1110 | struct iucv_array *array; | 
|  | 1111 | u8 *rmmsg; | 
|  | 1112 | size_t copy; | 
|  | 1113 |  | 
|  | 1114 | /* | 
|  | 1115 | * Message is 8 bytes long and has been stored to the | 
|  | 1116 | * message descriptor itself. | 
|  | 1117 | */ | 
|  | 1118 | if (residual) | 
|  | 1119 | *residual = abs(size - 8); | 
|  | 1120 | rmmsg = msg->rmmsg; | 
|  | 1121 | if (flags & IUCV_IPBUFLST) { | 
|  | 1122 | /* Copy to struct iucv_array. */ | 
|  | 1123 | size = (size < 8) ? size : 8; | 
|  | 1124 | for (array = buffer; size > 0; array++) { | 
|  | 1125 | copy = min_t(size_t, size, array->length); | 
|  | 1126 | memcpy((u8 *)(addr_t) array->address, | 
|  | 1127 | rmmsg, copy); | 
|  | 1128 | rmmsg += copy; | 
|  | 1129 | size -= copy; | 
|  | 1130 | } | 
|  | 1131 | } else { | 
|  | 1132 | /* Copy to direct buffer. */ | 
|  | 1133 | memcpy(buffer, rmmsg, min_t(size_t, size, 8)); | 
|  | 1134 | } | 
|  | 1135 | return 0; | 
|  | 1136 | } | 
|  | 1137 |  | 
|  | 1138 | /** | 
|  | 1139 | * __iucv_message_receive | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1140 | * @path: address of iucv path structure | 
|  | 1141 | * @msg: address of iucv msg structure | 
|  | 1142 | * @flags: how the message is received (IUCV_IPBUFLST) | 
|  | 1143 | * @buffer: address of data buffer or address of struct iucv_array | 
|  | 1144 | * @size: length of data buffer | 
|  | 1145 | * @residual: | 
|  | 1146 | * | 
|  | 1147 | * This function receives messages that are being sent to you over | 
|  | 1148 | * established paths. This function will deal with RMDATA messages | 
|  | 1149 | * embedded in struct iucv_message as well. | 
|  | 1150 | * | 
| Hendrik Brueckner | 91d5d45 | 2008-12-25 13:38:58 +0100 | [diff] [blame] | 1151 | * Locking:	no locking | 
|  | 1152 | * | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1153 | * Returns the result from the CP IUCV call. | 
|  | 1154 | */ | 
| Hendrik Brueckner | 91d5d45 | 2008-12-25 13:38:58 +0100 | [diff] [blame] | 1155 | int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, | 
|  | 1156 | u8 flags, void *buffer, size_t size, size_t *residual) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1157 | { | 
|  | 1158 | union iucv_param *parm; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1159 | int rc; | 
|  | 1160 |  | 
| Hendrik Brueckner | 91d5d45 | 2008-12-25 13:38:58 +0100 | [diff] [blame] | 1161 | if (msg->flags & IUCV_IPRMDATA) | 
|  | 1162 | return iucv_message_receive_iprmdata(path, msg, flags, | 
|  | 1163 | buffer, size, residual); | 
| Hendrik Brueckner | d28ecab | 2009-09-16 04:37:23 +0000 | [diff] [blame] | 1164 | if (cpus_empty(iucv_buffer_cpumask)) { | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1165 | rc = -EIO; | 
|  | 1166 | goto out; | 
|  | 1167 | } | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 1168 | parm = iucv_param[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1169 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 1170 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; | 
|  | 1171 | parm->db.ipbfln1f = (u32) size; | 
|  | 1172 | parm->db.ipmsgid = msg->id; | 
|  | 1173 | parm->db.ippathid = path->pathid; | 
|  | 1174 | parm->db.iptrgcls = msg->class; | 
|  | 1175 | parm->db.ipflags1 = (flags | IUCV_IPFGPID | | 
|  | 1176 | IUCV_IPFGMID | IUCV_IPTRGCLS); | 
|  | 1177 | rc = iucv_call_b2f0(IUCV_RECEIVE, parm); | 
|  | 1178 | if (!rc || rc == 5) { | 
|  | 1179 | msg->flags = parm->db.ipflags1; | 
|  | 1180 | if (residual) | 
|  | 1181 | *residual = parm->db.ipbfln1f; | 
|  | 1182 | } | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1183 | out: | 
| Hendrik Brueckner | 91d5d45 | 2008-12-25 13:38:58 +0100 | [diff] [blame] | 1184 | return rc; | 
|  | 1185 | } | 
|  | 1186 | EXPORT_SYMBOL(__iucv_message_receive); | 
|  | 1187 |  | 
|  | 1188 | /** | 
|  | 1189 | * iucv_message_receive | 
|  | 1190 | * @path: address of iucv path structure | 
|  | 1191 | * @msg: address of iucv msg structure | 
|  | 1192 | * @flags: how the message is received (IUCV_IPBUFLST) | 
|  | 1193 | * @buffer: address of data buffer or address of struct iucv_array | 
|  | 1194 | * @size: length of data buffer | 
|  | 1195 | * @residual: | 
|  | 1196 | * | 
|  | 1197 | * This function receives messages that are being sent to you over | 
|  | 1198 | * established paths. This function will deal with RMDATA messages | 
|  | 1199 | * embedded in struct iucv_message as well. | 
|  | 1200 | * | 
|  | 1201 | * Locking:	local_bh_enable/local_bh_disable | 
|  | 1202 | * | 
|  | 1203 | * Returns the result from the CP IUCV call. | 
|  | 1204 | */ | 
|  | 1205 | int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, | 
|  | 1206 | u8 flags, void *buffer, size_t size, size_t *residual) | 
|  | 1207 | { | 
|  | 1208 | int rc; | 
|  | 1209 |  | 
|  | 1210 | if (msg->flags & IUCV_IPRMDATA) | 
|  | 1211 | return iucv_message_receive_iprmdata(path, msg, flags, | 
|  | 1212 | buffer, size, residual); | 
|  | 1213 | local_bh_disable(); | 
|  | 1214 | rc = __iucv_message_receive(path, msg, flags, buffer, size, residual); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1215 | local_bh_enable(); | 
|  | 1216 | return rc; | 
|  | 1217 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 1218 | EXPORT_SYMBOL(iucv_message_receive); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1219 |  | 
|  | 1220 | /** | 
|  | 1221 | * iucv_message_reject | 
|  | 1222 | * @path: address of iucv path structure | 
|  | 1223 | * @msg: address of iucv msg structure | 
|  | 1224 | * | 
|  | 1225 | * The reject function refuses a specified message. Between the time you | 
|  | 1226 | * are notified of a message and the time that you complete the message, | 
|  | 1227 | * the message may be rejected. | 
|  | 1228 | * | 
|  | 1229 | * Returns the result from the CP IUCV call. | 
|  | 1230 | */ | 
|  | 1231 | int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) | 
|  | 1232 | { | 
|  | 1233 | union iucv_param *parm; | 
|  | 1234 | int rc; | 
|  | 1235 |  | 
|  | 1236 | local_bh_disable(); | 
| Hendrik Brueckner | d28ecab | 2009-09-16 04:37:23 +0000 | [diff] [blame] | 1237 | if (cpus_empty(iucv_buffer_cpumask)) { | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1238 | rc = -EIO; | 
|  | 1239 | goto out; | 
|  | 1240 | } | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 1241 | parm = iucv_param[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1242 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 1243 | parm->db.ippathid = path->pathid; | 
|  | 1244 | parm->db.ipmsgid = msg->id; | 
|  | 1245 | parm->db.iptrgcls = msg->class; | 
|  | 1246 | parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID); | 
|  | 1247 | rc = iucv_call_b2f0(IUCV_REJECT, parm); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1248 | out: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1249 | local_bh_enable(); | 
|  | 1250 | return rc; | 
|  | 1251 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 1252 | EXPORT_SYMBOL(iucv_message_reject); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1253 |  | 
|  | 1254 | /** | 
|  | 1255 | * iucv_message_reply | 
|  | 1256 | * @path: address of iucv path structure | 
|  | 1257 | * @msg: address of iucv msg structure | 
|  | 1258 | * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) | 
|  | 1259 | * @reply: address of reply data buffer or address of struct iucv_array | 
|  | 1260 | * @size: length of reply data buffer | 
|  | 1261 | * | 
|  | 1262 | * This function responds to the two-way messages that you receive. You | 
|  | 1263 | * must identify completely the message to which you wish to reply. ie, | 
|  | 1264 | * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into | 
|  | 1265 | * the parameter list. | 
|  | 1266 | * | 
|  | 1267 | * Returns the result from the CP IUCV call. | 
|  | 1268 | */ | 
|  | 1269 | int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, | 
|  | 1270 | u8 flags, void *reply, size_t size) | 
|  | 1271 | { | 
|  | 1272 | union iucv_param *parm; | 
|  | 1273 | int rc; | 
|  | 1274 |  | 
|  | 1275 | local_bh_disable(); | 
| Hendrik Brueckner | d28ecab | 2009-09-16 04:37:23 +0000 | [diff] [blame] | 1276 | if (cpus_empty(iucv_buffer_cpumask)) { | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1277 | rc = -EIO; | 
|  | 1278 | goto out; | 
|  | 1279 | } | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 1280 | parm = iucv_param[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1281 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 1282 | if (flags & IUCV_IPRMDATA) { | 
|  | 1283 | parm->dpl.ippathid = path->pathid; | 
|  | 1284 | parm->dpl.ipflags1 = flags; | 
|  | 1285 | parm->dpl.ipmsgid = msg->id; | 
|  | 1286 | parm->dpl.iptrgcls = msg->class; | 
|  | 1287 | memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8)); | 
|  | 1288 | } else { | 
|  | 1289 | parm->db.ipbfadr1 = (u32)(addr_t) reply; | 
|  | 1290 | parm->db.ipbfln1f = (u32) size; | 
|  | 1291 | parm->db.ippathid = path->pathid; | 
|  | 1292 | parm->db.ipflags1 = flags; | 
|  | 1293 | parm->db.ipmsgid = msg->id; | 
|  | 1294 | parm->db.iptrgcls = msg->class; | 
|  | 1295 | } | 
|  | 1296 | rc = iucv_call_b2f0(IUCV_REPLY, parm); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1297 | out: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1298 | local_bh_enable(); | 
|  | 1299 | return rc; | 
|  | 1300 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 1301 | EXPORT_SYMBOL(iucv_message_reply); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1302 |  | 
|  | 1303 | /** | 
| Hendrik Brueckner | 91d5d45 | 2008-12-25 13:38:58 +0100 | [diff] [blame] | 1304 | * __iucv_message_send | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1305 | * @path: address of iucv path structure | 
|  | 1306 | * @msg: address of iucv msg structure | 
|  | 1307 | * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) | 
|  | 1308 | * @srccls: source class of message | 
|  | 1309 | * @buffer: address of send buffer or address of struct iucv_array | 
|  | 1310 | * @size: length of send buffer | 
|  | 1311 | * | 
|  | 1312 | * This function transmits data to another application. Data to be | 
|  | 1313 | * transmitted is in a buffer and this is a one-way message and the | 
|  | 1314 | * receiver will not reply to the message. | 
|  | 1315 | * | 
| Hendrik Brueckner | 91d5d45 | 2008-12-25 13:38:58 +0100 | [diff] [blame] | 1316 | * Locking:	no locking | 
|  | 1317 | * | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1318 | * Returns the result from the CP IUCV call. | 
|  | 1319 | */ | 
| Hendrik Brueckner | 91d5d45 | 2008-12-25 13:38:58 +0100 | [diff] [blame] | 1320 | int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1321 | u8 flags, u32 srccls, void *buffer, size_t size) | 
|  | 1322 | { | 
|  | 1323 | union iucv_param *parm; | 
|  | 1324 | int rc; | 
|  | 1325 |  | 
| Hendrik Brueckner | d28ecab | 2009-09-16 04:37:23 +0000 | [diff] [blame] | 1326 | if (cpus_empty(iucv_buffer_cpumask)) { | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1327 | rc = -EIO; | 
|  | 1328 | goto out; | 
|  | 1329 | } | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 1330 | parm = iucv_param[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1331 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 1332 | if (flags & IUCV_IPRMDATA) { | 
|  | 1333 | /* Message of 8 bytes can be placed into the parameter list. */ | 
|  | 1334 | parm->dpl.ippathid = path->pathid; | 
|  | 1335 | parm->dpl.ipflags1 = flags | IUCV_IPNORPY; | 
|  | 1336 | parm->dpl.iptrgcls = msg->class; | 
|  | 1337 | parm->dpl.ipsrccls = srccls; | 
|  | 1338 | parm->dpl.ipmsgtag = msg->tag; | 
|  | 1339 | memcpy(parm->dpl.iprmmsg, buffer, 8); | 
|  | 1340 | } else { | 
|  | 1341 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; | 
|  | 1342 | parm->db.ipbfln1f = (u32) size; | 
|  | 1343 | parm->db.ippathid = path->pathid; | 
|  | 1344 | parm->db.ipflags1 = flags | IUCV_IPNORPY; | 
|  | 1345 | parm->db.iptrgcls = msg->class; | 
|  | 1346 | parm->db.ipsrccls = srccls; | 
|  | 1347 | parm->db.ipmsgtag = msg->tag; | 
|  | 1348 | } | 
|  | 1349 | rc = iucv_call_b2f0(IUCV_SEND, parm); | 
|  | 1350 | if (!rc) | 
|  | 1351 | msg->id = parm->db.ipmsgid; | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1352 | out: | 
| Hendrik Brueckner | 91d5d45 | 2008-12-25 13:38:58 +0100 | [diff] [blame] | 1353 | return rc; | 
|  | 1354 | } | 
|  | 1355 | EXPORT_SYMBOL(__iucv_message_send); | 
|  | 1356 |  | 
|  | 1357 | /** | 
|  | 1358 | * iucv_message_send | 
|  | 1359 | * @path: address of iucv path structure | 
|  | 1360 | * @msg: address of iucv msg structure | 
|  | 1361 | * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) | 
|  | 1362 | * @srccls: source class of message | 
|  | 1363 | * @buffer: address of send buffer or address of struct iucv_array | 
|  | 1364 | * @size: length of send buffer | 
|  | 1365 | * | 
|  | 1366 | * This function transmits data to another application. Data to be | 
|  | 1367 | * transmitted is in a buffer and this is a one-way message and the | 
|  | 1368 | * receiver will not reply to the message. | 
|  | 1369 | * | 
|  | 1370 | * Locking:	local_bh_enable/local_bh_disable | 
|  | 1371 | * | 
|  | 1372 | * Returns the result from the CP IUCV call. | 
|  | 1373 | */ | 
|  | 1374 | int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, | 
|  | 1375 | u8 flags, u32 srccls, void *buffer, size_t size) | 
|  | 1376 | { | 
|  | 1377 | int rc; | 
|  | 1378 |  | 
|  | 1379 | local_bh_disable(); | 
|  | 1380 | rc = __iucv_message_send(path, msg, flags, srccls, buffer, size); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1381 | local_bh_enable(); | 
|  | 1382 | return rc; | 
|  | 1383 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 1384 | EXPORT_SYMBOL(iucv_message_send); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1385 |  | 
|  | 1386 | /** | 
|  | 1387 | * iucv_message_send2way | 
|  | 1388 | * @path: address of iucv path structure | 
|  | 1389 | * @msg: address of iucv msg structure | 
|  | 1390 | * @flags: how the message is sent and the reply is received | 
|  | 1391 | *	   (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST) | 
|  | 1392 | * @srccls: source class of message | 
|  | 1393 | * @buffer: address of send buffer or address of struct iucv_array | 
|  | 1394 | * @size: length of send buffer | 
|  | 1395 | * @ansbuf: address of answer buffer or address of struct iucv_array | 
|  | 1396 | * @asize: size of reply buffer | 
|  | 1397 | * | 
|  | 1398 | * This function transmits data to another application. Data to be | 
|  | 1399 | * transmitted is in a buffer. The receiver of the send is expected to | 
|  | 1400 | * reply to the message and a buffer is provided into which IUCV moves | 
|  | 1401 | * the reply to this message. | 
|  | 1402 | * | 
|  | 1403 | * Returns the result from the CP IUCV call. | 
|  | 1404 | */ | 
|  | 1405 | int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, | 
|  | 1406 | u8 flags, u32 srccls, void *buffer, size_t size, | 
|  | 1407 | void *answer, size_t asize, size_t *residual) | 
|  | 1408 | { | 
|  | 1409 | union iucv_param *parm; | 
|  | 1410 | int rc; | 
|  | 1411 |  | 
|  | 1412 | local_bh_disable(); | 
| Hendrik Brueckner | d28ecab | 2009-09-16 04:37:23 +0000 | [diff] [blame] | 1413 | if (cpus_empty(iucv_buffer_cpumask)) { | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1414 | rc = -EIO; | 
|  | 1415 | goto out; | 
|  | 1416 | } | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 1417 | parm = iucv_param[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1418 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 1419 | if (flags & IUCV_IPRMDATA) { | 
|  | 1420 | parm->dpl.ippathid = path->pathid; | 
|  | 1421 | parm->dpl.ipflags1 = path->flags;	/* priority message */ | 
|  | 1422 | parm->dpl.iptrgcls = msg->class; | 
|  | 1423 | parm->dpl.ipsrccls = srccls; | 
|  | 1424 | parm->dpl.ipmsgtag = msg->tag; | 
|  | 1425 | parm->dpl.ipbfadr2 = (u32)(addr_t) answer; | 
|  | 1426 | parm->dpl.ipbfln2f = (u32) asize; | 
|  | 1427 | memcpy(parm->dpl.iprmmsg, buffer, 8); | 
|  | 1428 | } else { | 
|  | 1429 | parm->db.ippathid = path->pathid; | 
|  | 1430 | parm->db.ipflags1 = path->flags;	/* priority message */ | 
|  | 1431 | parm->db.iptrgcls = msg->class; | 
|  | 1432 | parm->db.ipsrccls = srccls; | 
|  | 1433 | parm->db.ipmsgtag = msg->tag; | 
|  | 1434 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; | 
|  | 1435 | parm->db.ipbfln1f = (u32) size; | 
|  | 1436 | parm->db.ipbfadr2 = (u32)(addr_t) answer; | 
|  | 1437 | parm->db.ipbfln2f = (u32) asize; | 
|  | 1438 | } | 
|  | 1439 | rc = iucv_call_b2f0(IUCV_SEND, parm); | 
|  | 1440 | if (!rc) | 
|  | 1441 | msg->id = parm->db.ipmsgid; | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1442 | out: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1443 | local_bh_enable(); | 
|  | 1444 | return rc; | 
|  | 1445 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 1446 | EXPORT_SYMBOL(iucv_message_send2way); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1447 |  | 
|  | 1448 | /** | 
|  | 1449 | * iucv_path_pending | 
|  | 1450 | * @data: Pointer to external interrupt buffer | 
|  | 1451 | * | 
|  | 1452 | * Process connection pending work item. Called from tasklet while holding | 
|  | 1453 | * iucv_table_lock. | 
|  | 1454 | */ | 
|  | 1455 | struct iucv_path_pending { | 
|  | 1456 | u16 ippathid; | 
|  | 1457 | u8  ipflags1; | 
|  | 1458 | u8  iptype; | 
|  | 1459 | u16 ipmsglim; | 
|  | 1460 | u16 res1; | 
|  | 1461 | u8  ipvmid[8]; | 
|  | 1462 | u8  ipuser[16]; | 
|  | 1463 | u32 res3; | 
|  | 1464 | u8  ippollfg; | 
|  | 1465 | u8  res4[3]; | 
| Eric Dumazet | bc10502 | 2010-06-03 03:21:52 -0700 | [diff] [blame] | 1466 | } __packed; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1467 |  | 
|  | 1468 | static void iucv_path_pending(struct iucv_irq_data *data) | 
|  | 1469 | { | 
|  | 1470 | struct iucv_path_pending *ipp = (void *) data; | 
|  | 1471 | struct iucv_handler *handler; | 
|  | 1472 | struct iucv_path *path; | 
|  | 1473 | char *error; | 
|  | 1474 |  | 
|  | 1475 | BUG_ON(iucv_path_table[ipp->ippathid]); | 
|  | 1476 | /* New pathid, handler found. Create a new path struct. */ | 
|  | 1477 | error = iucv_error_no_memory; | 
|  | 1478 | path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC); | 
|  | 1479 | if (!path) | 
|  | 1480 | goto out_sever; | 
|  | 1481 | path->pathid = ipp->ippathid; | 
|  | 1482 | iucv_path_table[path->pathid] = path; | 
|  | 1483 | EBCASC(ipp->ipvmid, 8); | 
|  | 1484 |  | 
|  | 1485 | /* Call registered handler until one is found that wants the path. */ | 
|  | 1486 | list_for_each_entry(handler, &iucv_handler_list, list) { | 
|  | 1487 | if (!handler->path_pending) | 
|  | 1488 | continue; | 
|  | 1489 | /* | 
|  | 1490 | * Add path to handler to allow a call to iucv_path_sever | 
|  | 1491 | * inside the path_pending function. If the handler returns | 
|  | 1492 | * an error remove the path from the handler again. | 
|  | 1493 | */ | 
|  | 1494 | list_add(&path->list, &handler->paths); | 
|  | 1495 | path->handler = handler; | 
|  | 1496 | if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser)) | 
|  | 1497 | return; | 
|  | 1498 | list_del(&path->list); | 
|  | 1499 | path->handler = NULL; | 
|  | 1500 | } | 
|  | 1501 | /* No handler wanted the path. */ | 
|  | 1502 | iucv_path_table[path->pathid] = NULL; | 
|  | 1503 | iucv_path_free(path); | 
|  | 1504 | error = iucv_error_no_listener; | 
|  | 1505 | out_sever: | 
|  | 1506 | iucv_sever_pathid(ipp->ippathid, error); | 
|  | 1507 | } | 
|  | 1508 |  | 
|  | 1509 | /** | 
|  | 1510 | * iucv_path_complete | 
|  | 1511 | * @data: Pointer to external interrupt buffer | 
|  | 1512 | * | 
|  | 1513 | * Process connection complete work item. Called from tasklet while holding | 
|  | 1514 | * iucv_table_lock. | 
|  | 1515 | */ | 
|  | 1516 | struct iucv_path_complete { | 
|  | 1517 | u16 ippathid; | 
|  | 1518 | u8  ipflags1; | 
|  | 1519 | u8  iptype; | 
|  | 1520 | u16 ipmsglim; | 
|  | 1521 | u16 res1; | 
|  | 1522 | u8  res2[8]; | 
|  | 1523 | u8  ipuser[16]; | 
|  | 1524 | u32 res3; | 
|  | 1525 | u8  ippollfg; | 
|  | 1526 | u8  res4[3]; | 
| Eric Dumazet | bc10502 | 2010-06-03 03:21:52 -0700 | [diff] [blame] | 1527 | } __packed; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1528 |  | 
|  | 1529 | static void iucv_path_complete(struct iucv_irq_data *data) | 
|  | 1530 | { | 
|  | 1531 | struct iucv_path_complete *ipc = (void *) data; | 
|  | 1532 | struct iucv_path *path = iucv_path_table[ipc->ippathid]; | 
|  | 1533 |  | 
| Hendrik Brueckner | b8942e3 | 2009-04-21 23:26:23 +0000 | [diff] [blame] | 1534 | if (path) | 
|  | 1535 | path->flags = ipc->ipflags1; | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1536 | if (path && path->handler && path->handler->path_complete) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1537 | path->handler->path_complete(path, ipc->ipuser); | 
|  | 1538 | } | 
|  | 1539 |  | 
|  | 1540 | /** | 
|  | 1541 | * iucv_path_severed | 
|  | 1542 | * @data: Pointer to external interrupt buffer | 
|  | 1543 | * | 
|  | 1544 | * Process connection severed work item. Called from tasklet while holding | 
|  | 1545 | * iucv_table_lock. | 
|  | 1546 | */ | 
|  | 1547 | struct iucv_path_severed { | 
|  | 1548 | u16 ippathid; | 
|  | 1549 | u8  res1; | 
|  | 1550 | u8  iptype; | 
|  | 1551 | u32 res2; | 
|  | 1552 | u8  res3[8]; | 
|  | 1553 | u8  ipuser[16]; | 
|  | 1554 | u32 res4; | 
|  | 1555 | u8  ippollfg; | 
|  | 1556 | u8  res5[3]; | 
| Eric Dumazet | bc10502 | 2010-06-03 03:21:52 -0700 | [diff] [blame] | 1557 | } __packed; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1558 |  | 
|  | 1559 | static void iucv_path_severed(struct iucv_irq_data *data) | 
|  | 1560 | { | 
|  | 1561 | struct iucv_path_severed *ips = (void *) data; | 
|  | 1562 | struct iucv_path *path = iucv_path_table[ips->ippathid]; | 
|  | 1563 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1564 | if (!path || !path->handler)	/* Already severed */ | 
|  | 1565 | return; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1566 | if (path->handler->path_severed) | 
|  | 1567 | path->handler->path_severed(path, ips->ipuser); | 
|  | 1568 | else { | 
|  | 1569 | iucv_sever_pathid(path->pathid, NULL); | 
|  | 1570 | iucv_path_table[path->pathid] = NULL; | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 1571 | list_del(&path->list); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1572 | iucv_path_free(path); | 
|  | 1573 | } | 
|  | 1574 | } | 
|  | 1575 |  | 
|  | 1576 | /** | 
|  | 1577 | * iucv_path_quiesced | 
|  | 1578 | * @data: Pointer to external interrupt buffer | 
|  | 1579 | * | 
|  | 1580 | * Process connection quiesced work item. Called from tasklet while holding | 
|  | 1581 | * iucv_table_lock. | 
|  | 1582 | */ | 
|  | 1583 | struct iucv_path_quiesced { | 
|  | 1584 | u16 ippathid; | 
|  | 1585 | u8  res1; | 
|  | 1586 | u8  iptype; | 
|  | 1587 | u32 res2; | 
|  | 1588 | u8  res3[8]; | 
|  | 1589 | u8  ipuser[16]; | 
|  | 1590 | u32 res4; | 
|  | 1591 | u8  ippollfg; | 
|  | 1592 | u8  res5[3]; | 
| Eric Dumazet | bc10502 | 2010-06-03 03:21:52 -0700 | [diff] [blame] | 1593 | } __packed; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1594 |  | 
|  | 1595 | static void iucv_path_quiesced(struct iucv_irq_data *data) | 
|  | 1596 | { | 
|  | 1597 | struct iucv_path_quiesced *ipq = (void *) data; | 
|  | 1598 | struct iucv_path *path = iucv_path_table[ipq->ippathid]; | 
|  | 1599 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1600 | if (path && path->handler && path->handler->path_quiesced) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1601 | path->handler->path_quiesced(path, ipq->ipuser); | 
|  | 1602 | } | 
|  | 1603 |  | 
|  | 1604 | /** | 
|  | 1605 | * iucv_path_resumed | 
|  | 1606 | * @data: Pointer to external interrupt buffer | 
|  | 1607 | * | 
|  | 1608 | * Process connection resumed work item. Called from tasklet while holding | 
|  | 1609 | * iucv_table_lock. | 
|  | 1610 | */ | 
|  | 1611 | struct iucv_path_resumed { | 
|  | 1612 | u16 ippathid; | 
|  | 1613 | u8  res1; | 
|  | 1614 | u8  iptype; | 
|  | 1615 | u32 res2; | 
|  | 1616 | u8  res3[8]; | 
|  | 1617 | u8  ipuser[16]; | 
|  | 1618 | u32 res4; | 
|  | 1619 | u8  ippollfg; | 
|  | 1620 | u8  res5[3]; | 
| Eric Dumazet | bc10502 | 2010-06-03 03:21:52 -0700 | [diff] [blame] | 1621 | } __packed; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1622 |  | 
|  | 1623 | static void iucv_path_resumed(struct iucv_irq_data *data) | 
|  | 1624 | { | 
|  | 1625 | struct iucv_path_resumed *ipr = (void *) data; | 
|  | 1626 | struct iucv_path *path = iucv_path_table[ipr->ippathid]; | 
|  | 1627 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1628 | if (path && path->handler && path->handler->path_resumed) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1629 | path->handler->path_resumed(path, ipr->ipuser); | 
|  | 1630 | } | 
|  | 1631 |  | 
|  | 1632 | /** | 
|  | 1633 | * iucv_message_complete | 
|  | 1634 | * @data: Pointer to external interrupt buffer | 
|  | 1635 | * | 
|  | 1636 | * Process message complete work item. Called from tasklet while holding | 
|  | 1637 | * iucv_table_lock. | 
|  | 1638 | */ | 
|  | 1639 | struct iucv_message_complete { | 
|  | 1640 | u16 ippathid; | 
|  | 1641 | u8  ipflags1; | 
|  | 1642 | u8  iptype; | 
|  | 1643 | u32 ipmsgid; | 
|  | 1644 | u32 ipaudit; | 
|  | 1645 | u8  iprmmsg[8]; | 
|  | 1646 | u32 ipsrccls; | 
|  | 1647 | u32 ipmsgtag; | 
|  | 1648 | u32 res; | 
|  | 1649 | u32 ipbfln2f; | 
|  | 1650 | u8  ippollfg; | 
|  | 1651 | u8  res2[3]; | 
| Eric Dumazet | bc10502 | 2010-06-03 03:21:52 -0700 | [diff] [blame] | 1652 | } __packed; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1653 |  | 
|  | 1654 | static void iucv_message_complete(struct iucv_irq_data *data) | 
|  | 1655 | { | 
|  | 1656 | struct iucv_message_complete *imc = (void *) data; | 
|  | 1657 | struct iucv_path *path = iucv_path_table[imc->ippathid]; | 
|  | 1658 | struct iucv_message msg; | 
|  | 1659 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1660 | if (path && path->handler && path->handler->message_complete) { | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1661 | msg.flags = imc->ipflags1; | 
|  | 1662 | msg.id = imc->ipmsgid; | 
|  | 1663 | msg.audit = imc->ipaudit; | 
|  | 1664 | memcpy(msg.rmmsg, imc->iprmmsg, 8); | 
|  | 1665 | msg.class = imc->ipsrccls; | 
|  | 1666 | msg.tag = imc->ipmsgtag; | 
|  | 1667 | msg.length = imc->ipbfln2f; | 
|  | 1668 | path->handler->message_complete(path, &msg); | 
|  | 1669 | } | 
|  | 1670 | } | 
|  | 1671 |  | 
|  | 1672 | /** | 
|  | 1673 | * iucv_message_pending | 
|  | 1674 | * @data: Pointer to external interrupt buffer | 
|  | 1675 | * | 
|  | 1676 | * Process message pending work item. Called from tasklet while holding | 
|  | 1677 | * iucv_table_lock. | 
|  | 1678 | */ | 
|  | 1679 | struct iucv_message_pending { | 
|  | 1680 | u16 ippathid; | 
|  | 1681 | u8  ipflags1; | 
|  | 1682 | u8  iptype; | 
|  | 1683 | u32 ipmsgid; | 
|  | 1684 | u32 iptrgcls; | 
|  | 1685 | union { | 
|  | 1686 | u32 iprmmsg1_u32; | 
|  | 1687 | u8  iprmmsg1[4]; | 
|  | 1688 | } ln1msg1; | 
|  | 1689 | union { | 
|  | 1690 | u32 ipbfln1f; | 
|  | 1691 | u8  iprmmsg2[4]; | 
|  | 1692 | } ln1msg2; | 
|  | 1693 | u32 res1[3]; | 
|  | 1694 | u32 ipbfln2f; | 
|  | 1695 | u8  ippollfg; | 
|  | 1696 | u8  res2[3]; | 
| Eric Dumazet | bc10502 | 2010-06-03 03:21:52 -0700 | [diff] [blame] | 1697 | } __packed; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1698 |  | 
|  | 1699 | static void iucv_message_pending(struct iucv_irq_data *data) | 
|  | 1700 | { | 
|  | 1701 | struct iucv_message_pending *imp = (void *) data; | 
|  | 1702 | struct iucv_path *path = iucv_path_table[imp->ippathid]; | 
|  | 1703 | struct iucv_message msg; | 
|  | 1704 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1705 | if (path && path->handler && path->handler->message_pending) { | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1706 | msg.flags = imp->ipflags1; | 
|  | 1707 | msg.id = imp->ipmsgid; | 
|  | 1708 | msg.class = imp->iptrgcls; | 
|  | 1709 | if (imp->ipflags1 & IUCV_IPRMDATA) { | 
|  | 1710 | memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8); | 
|  | 1711 | msg.length = 8; | 
|  | 1712 | } else | 
|  | 1713 | msg.length = imp->ln1msg2.ipbfln1f; | 
|  | 1714 | msg.reply_size = imp->ipbfln2f; | 
|  | 1715 | path->handler->message_pending(path, &msg); | 
|  | 1716 | } | 
|  | 1717 | } | 
|  | 1718 |  | 
|  | 1719 | /** | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1720 | * iucv_tasklet_fn: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1721 | * | 
|  | 1722 | * This tasklet loops over the queue of irq buffers created by | 
|  | 1723 | * iucv_external_interrupt, calls the appropriate action handler | 
|  | 1724 | * and then frees the buffer. | 
|  | 1725 | */ | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1726 | static void iucv_tasklet_fn(unsigned long ignored) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1727 | { | 
|  | 1728 | typedef void iucv_irq_fn(struct iucv_irq_data *); | 
|  | 1729 | static iucv_irq_fn *irq_fn[] = { | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1730 | [0x02] = iucv_path_complete, | 
|  | 1731 | [0x03] = iucv_path_severed, | 
|  | 1732 | [0x04] = iucv_path_quiesced, | 
|  | 1733 | [0x05] = iucv_path_resumed, | 
|  | 1734 | [0x06] = iucv_message_complete, | 
|  | 1735 | [0x07] = iucv_message_complete, | 
|  | 1736 | [0x08] = iucv_message_pending, | 
|  | 1737 | [0x09] = iucv_message_pending, | 
|  | 1738 | }; | 
| Denis Cheng | b5e7833 | 2007-12-07 00:51:45 -0800 | [diff] [blame] | 1739 | LIST_HEAD(task_queue); | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1740 | struct iucv_irq_list *p, *n; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1741 |  | 
|  | 1742 | /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ | 
| Ursula Braun | 13fdc9a | 2007-07-14 19:03:41 -0700 | [diff] [blame] | 1743 | if (!spin_trylock(&iucv_table_lock)) { | 
|  | 1744 | tasklet_schedule(&iucv_tasklet); | 
|  | 1745 | return; | 
|  | 1746 | } | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1747 | iucv_active_cpu = smp_processor_id(); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1748 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1749 | spin_lock_irq(&iucv_queue_lock); | 
|  | 1750 | list_splice_init(&iucv_task_queue, &task_queue); | 
|  | 1751 | spin_unlock_irq(&iucv_queue_lock); | 
|  | 1752 |  | 
|  | 1753 | list_for_each_entry_safe(p, n, &task_queue, list) { | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1754 | list_del_init(&p->list); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1755 | irq_fn[p->data.iptype](&p->data); | 
|  | 1756 | kfree(p); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1757 | } | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1758 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1759 | iucv_active_cpu = -1; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1760 | spin_unlock(&iucv_table_lock); | 
|  | 1761 | } | 
|  | 1762 |  | 
|  | 1763 | /** | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1764 | * iucv_work_fn: | 
|  | 1765 | * | 
|  | 1766 | * This work function loops over the queue of path pending irq blocks | 
|  | 1767 | * created by iucv_external_interrupt, calls the appropriate action | 
|  | 1768 | * handler and then frees the buffer. | 
|  | 1769 | */ | 
|  | 1770 | static void iucv_work_fn(struct work_struct *work) | 
|  | 1771 | { | 
| Denis Cheng | b5e7833 | 2007-12-07 00:51:45 -0800 | [diff] [blame] | 1772 | LIST_HEAD(work_queue); | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1773 | struct iucv_irq_list *p, *n; | 
|  | 1774 |  | 
|  | 1775 | /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ | 
|  | 1776 | spin_lock_bh(&iucv_table_lock); | 
|  | 1777 | iucv_active_cpu = smp_processor_id(); | 
|  | 1778 |  | 
|  | 1779 | spin_lock_irq(&iucv_queue_lock); | 
|  | 1780 | list_splice_init(&iucv_work_queue, &work_queue); | 
|  | 1781 | spin_unlock_irq(&iucv_queue_lock); | 
|  | 1782 |  | 
|  | 1783 | iucv_cleanup_queue(); | 
|  | 1784 | list_for_each_entry_safe(p, n, &work_queue, list) { | 
|  | 1785 | list_del_init(&p->list); | 
|  | 1786 | iucv_path_pending(&p->data); | 
|  | 1787 | kfree(p); | 
|  | 1788 | } | 
|  | 1789 |  | 
|  | 1790 | iucv_active_cpu = -1; | 
|  | 1791 | spin_unlock_bh(&iucv_table_lock); | 
|  | 1792 | } | 
|  | 1793 |  | 
|  | 1794 | /** | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1795 | * iucv_external_interrupt | 
|  | 1796 | * @code: irq code | 
|  | 1797 | * | 
|  | 1798 | * Handles external interrupts coming in from CP. | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1799 | * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1800 | */ | 
|  | 1801 | static void iucv_external_interrupt(u16 code) | 
|  | 1802 | { | 
|  | 1803 | struct iucv_irq_data *p; | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1804 | struct iucv_irq_list *work; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1805 |  | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 1806 | p = iucv_irq_data[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1807 | if (p->ippathid >= iucv_max_pathid) { | 
| Ursula Braun | c2b4afd | 2008-07-14 09:59:29 +0200 | [diff] [blame] | 1808 | WARN_ON(p->ippathid >= iucv_max_pathid); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1809 | iucv_sever_pathid(p->ippathid, iucv_error_no_listener); | 
|  | 1810 | return; | 
|  | 1811 | } | 
| Ursula Braun | c2b4afd | 2008-07-14 09:59:29 +0200 | [diff] [blame] | 1812 | BUG_ON(p->iptype  < 0x01 || p->iptype > 0x09); | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1813 | work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1814 | if (!work) { | 
| Ursula Braun | 8f7c502 | 2008-12-25 13:39:47 +0100 | [diff] [blame] | 1815 | pr_warning("iucv_external_interrupt: out of memory\n"); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1816 | return; | 
|  | 1817 | } | 
|  | 1818 | memcpy(&work->data, p, sizeof(work->data)); | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1819 | spin_lock(&iucv_queue_lock); | 
|  | 1820 | if (p->iptype == 0x01) { | 
|  | 1821 | /* Path pending interrupt. */ | 
|  | 1822 | list_add_tail(&work->list, &iucv_work_queue); | 
|  | 1823 | schedule_work(&iucv_work); | 
|  | 1824 | } else { | 
|  | 1825 | /* The other interrupts. */ | 
|  | 1826 | list_add_tail(&work->list, &iucv_task_queue); | 
|  | 1827 | tasklet_schedule(&iucv_tasklet); | 
|  | 1828 | } | 
|  | 1829 | spin_unlock(&iucv_queue_lock); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1830 | } | 
|  | 1831 |  | 
| Ursula Braun | 672e405 | 2009-06-16 10:30:42 +0200 | [diff] [blame] | 1832 | static int iucv_pm_prepare(struct device *dev) | 
|  | 1833 | { | 
|  | 1834 | int rc = 0; | 
|  | 1835 |  | 
|  | 1836 | #ifdef CONFIG_PM_DEBUG | 
|  | 1837 | printk(KERN_INFO "iucv_pm_prepare\n"); | 
|  | 1838 | #endif | 
|  | 1839 | if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) | 
|  | 1840 | rc = dev->driver->pm->prepare(dev); | 
|  | 1841 | return rc; | 
|  | 1842 | } | 
|  | 1843 |  | 
|  | 1844 | static void iucv_pm_complete(struct device *dev) | 
|  | 1845 | { | 
|  | 1846 | #ifdef CONFIG_PM_DEBUG | 
|  | 1847 | printk(KERN_INFO "iucv_pm_complete\n"); | 
|  | 1848 | #endif | 
|  | 1849 | if (dev->driver && dev->driver->pm && dev->driver->pm->complete) | 
|  | 1850 | dev->driver->pm->complete(dev); | 
|  | 1851 | } | 
|  | 1852 |  | 
|  | 1853 | /** | 
|  | 1854 | * iucv_path_table_empty() - determine if iucv path table is empty | 
|  | 1855 | * | 
|  | 1856 | * Returns 0 if there are still iucv pathes defined | 
|  | 1857 | *	   1 if there are no iucv pathes defined | 
|  | 1858 | */ | 
|  | 1859 | int iucv_path_table_empty(void) | 
|  | 1860 | { | 
|  | 1861 | int i; | 
|  | 1862 |  | 
|  | 1863 | for (i = 0; i < iucv_max_pathid; i++) { | 
|  | 1864 | if (iucv_path_table[i]) | 
|  | 1865 | return 0; | 
|  | 1866 | } | 
|  | 1867 | return 1; | 
|  | 1868 | } | 
|  | 1869 |  | 
|  | 1870 | /** | 
|  | 1871 | * iucv_pm_freeze() - Freeze PM callback | 
|  | 1872 | * @dev:	iucv-based device | 
|  | 1873 | * | 
|  | 1874 | * disable iucv interrupts | 
|  | 1875 | * invoke callback function of the iucv-based driver | 
|  | 1876 | * shut down iucv, if no iucv-pathes are established anymore | 
|  | 1877 | */ | 
|  | 1878 | static int iucv_pm_freeze(struct device *dev) | 
|  | 1879 | { | 
|  | 1880 | int cpu; | 
| Ursula Braun | b7c2aec | 2009-11-12 21:46:27 +0000 | [diff] [blame] | 1881 | struct iucv_irq_list *p, *n; | 
| Ursula Braun | 672e405 | 2009-06-16 10:30:42 +0200 | [diff] [blame] | 1882 | int rc = 0; | 
|  | 1883 |  | 
|  | 1884 | #ifdef CONFIG_PM_DEBUG | 
|  | 1885 | printk(KERN_WARNING "iucv_pm_freeze\n"); | 
|  | 1886 | #endif | 
| Ursula Braun | b7c2aec | 2009-11-12 21:46:27 +0000 | [diff] [blame] | 1887 | if (iucv_pm_state != IUCV_PM_FREEZING) { | 
|  | 1888 | for_each_cpu_mask_nr(cpu, iucv_irq_cpumask) | 
|  | 1889 | smp_call_function_single(cpu, iucv_block_cpu_almost, | 
|  | 1890 | NULL, 1); | 
|  | 1891 | cancel_work_sync(&iucv_work); | 
|  | 1892 | list_for_each_entry_safe(p, n, &iucv_work_queue, list) { | 
|  | 1893 | list_del_init(&p->list); | 
|  | 1894 | iucv_sever_pathid(p->data.ippathid, | 
|  | 1895 | iucv_error_no_listener); | 
|  | 1896 | kfree(p); | 
|  | 1897 | } | 
|  | 1898 | } | 
| Ursula Braun | 4c89d86 | 2009-09-16 04:37:22 +0000 | [diff] [blame] | 1899 | iucv_pm_state = IUCV_PM_FREEZING; | 
| Ursula Braun | 672e405 | 2009-06-16 10:30:42 +0200 | [diff] [blame] | 1900 | if (dev->driver && dev->driver->pm && dev->driver->pm->freeze) | 
|  | 1901 | rc = dev->driver->pm->freeze(dev); | 
|  | 1902 | if (iucv_path_table_empty()) | 
|  | 1903 | iucv_disable(); | 
|  | 1904 | return rc; | 
|  | 1905 | } | 
|  | 1906 |  | 
|  | 1907 | /** | 
|  | 1908 | * iucv_pm_thaw() - Thaw PM callback | 
|  | 1909 | * @dev:	iucv-based device | 
|  | 1910 | * | 
|  | 1911 | * make iucv ready for use again: allocate path table, declare interrupt buffers | 
|  | 1912 | *				  and enable iucv interrupts | 
|  | 1913 | * invoke callback function of the iucv-based driver | 
|  | 1914 | */ | 
|  | 1915 | static int iucv_pm_thaw(struct device *dev) | 
|  | 1916 | { | 
|  | 1917 | int rc = 0; | 
|  | 1918 |  | 
|  | 1919 | #ifdef CONFIG_PM_DEBUG | 
|  | 1920 | printk(KERN_WARNING "iucv_pm_thaw\n"); | 
|  | 1921 | #endif | 
| Ursula Braun | 4c89d86 | 2009-09-16 04:37:22 +0000 | [diff] [blame] | 1922 | iucv_pm_state = IUCV_PM_THAWING; | 
| Ursula Braun | 672e405 | 2009-06-16 10:30:42 +0200 | [diff] [blame] | 1923 | if (!iucv_path_table) { | 
|  | 1924 | rc = iucv_enable(); | 
|  | 1925 | if (rc) | 
|  | 1926 | goto out; | 
|  | 1927 | } | 
|  | 1928 | if (cpus_empty(iucv_irq_cpumask)) { | 
|  | 1929 | if (iucv_nonsmp_handler) | 
|  | 1930 | /* enable interrupts on one cpu */ | 
|  | 1931 | iucv_allow_cpu(NULL); | 
|  | 1932 | else | 
|  | 1933 | /* enable interrupts on all cpus */ | 
|  | 1934 | iucv_setmask_mp(); | 
|  | 1935 | } | 
|  | 1936 | if (dev->driver && dev->driver->pm && dev->driver->pm->thaw) | 
|  | 1937 | rc = dev->driver->pm->thaw(dev); | 
|  | 1938 | out: | 
|  | 1939 | return rc; | 
|  | 1940 | } | 
|  | 1941 |  | 
|  | 1942 | /** | 
|  | 1943 | * iucv_pm_restore() - Restore PM callback | 
|  | 1944 | * @dev:	iucv-based device | 
|  | 1945 | * | 
|  | 1946 | * make iucv ready for use again: allocate path table, declare interrupt buffers | 
|  | 1947 | *				  and enable iucv interrupts | 
|  | 1948 | * invoke callback function of the iucv-based driver | 
|  | 1949 | */ | 
|  | 1950 | static int iucv_pm_restore(struct device *dev) | 
|  | 1951 | { | 
|  | 1952 | int rc = 0; | 
|  | 1953 |  | 
|  | 1954 | #ifdef CONFIG_PM_DEBUG | 
|  | 1955 | printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table); | 
|  | 1956 | #endif | 
| Ursula Braun | 4c89d86 | 2009-09-16 04:37:22 +0000 | [diff] [blame] | 1957 | if ((iucv_pm_state != IUCV_PM_RESTORING) && iucv_path_table) | 
|  | 1958 | pr_warning("Suspending Linux did not completely close all IUCV " | 
|  | 1959 | "connections\n"); | 
|  | 1960 | iucv_pm_state = IUCV_PM_RESTORING; | 
| Ursula Braun | 672e405 | 2009-06-16 10:30:42 +0200 | [diff] [blame] | 1961 | if (cpus_empty(iucv_irq_cpumask)) { | 
|  | 1962 | rc = iucv_query_maxconn(); | 
|  | 1963 | rc = iucv_enable(); | 
|  | 1964 | if (rc) | 
|  | 1965 | goto out; | 
|  | 1966 | } | 
|  | 1967 | if (dev->driver && dev->driver->pm && dev->driver->pm->restore) | 
|  | 1968 | rc = dev->driver->pm->restore(dev); | 
|  | 1969 | out: | 
|  | 1970 | return rc; | 
|  | 1971 | } | 
|  | 1972 |  | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1973 | /** | 
|  | 1974 | * iucv_init | 
|  | 1975 | * | 
|  | 1976 | * Allocates and initializes various data structures. | 
|  | 1977 | */ | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 1978 | static int __init iucv_init(void) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1979 | { | 
|  | 1980 | int rc; | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 1981 | int cpu; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1982 |  | 
|  | 1983 | if (!MACHINE_IS_VM) { | 
|  | 1984 | rc = -EPROTONOSUPPORT; | 
|  | 1985 | goto out; | 
|  | 1986 | } | 
|  | 1987 | rc = iucv_query_maxconn(); | 
|  | 1988 | if (rc) | 
|  | 1989 | goto out; | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 1990 | rc = register_external_interrupt(0x4000, iucv_external_interrupt); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1991 | if (rc) | 
|  | 1992 | goto out; | 
| Mark McLoughlin | 035da16 | 2008-12-15 12:58:29 +0000 | [diff] [blame] | 1993 | iucv_root = root_device_register("iucv"); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1994 | if (IS_ERR(iucv_root)) { | 
|  | 1995 | rc = PTR_ERR(iucv_root); | 
| Cornelia Huck | 2d7bf36 | 2008-04-10 02:12:45 -0700 | [diff] [blame] | 1996 | goto out_int; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1997 | } | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 1998 |  | 
|  | 1999 | for_each_online_cpu(cpu) { | 
|  | 2000 | /* Note: GFP_DMA used to get memory below 2G */ | 
|  | 2001 | iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), | 
|  | 2002 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 
|  | 2003 | if (!iucv_irq_data[cpu]) { | 
|  | 2004 | rc = -ENOMEM; | 
|  | 2005 | goto out_free; | 
|  | 2006 | } | 
|  | 2007 |  | 
|  | 2008 | /* Allocate parameter blocks. */ | 
|  | 2009 | iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), | 
|  | 2010 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 
|  | 2011 | if (!iucv_param[cpu]) { | 
|  | 2012 | rc = -ENOMEM; | 
|  | 2013 | goto out_free; | 
|  | 2014 | } | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 2015 | iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), | 
|  | 2016 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 
|  | 2017 | if (!iucv_param_irq[cpu]) { | 
|  | 2018 | rc = -ENOMEM; | 
|  | 2019 | goto out_free; | 
|  | 2020 | } | 
|  | 2021 |  | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 2022 | } | 
| Cornelia Huck | 2d7bf36 | 2008-04-10 02:12:45 -0700 | [diff] [blame] | 2023 | rc = register_hotcpu_notifier(&iucv_cpu_notifier); | 
|  | 2024 | if (rc) | 
|  | 2025 | goto out_free; | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 2026 | rc = register_reboot_notifier(&iucv_reboot_notifier); | 
|  | 2027 | if (rc) | 
|  | 2028 | goto out_cpu; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 2029 | ASCEBC(iucv_error_no_listener, 16); | 
|  | 2030 | ASCEBC(iucv_error_no_memory, 16); | 
|  | 2031 | ASCEBC(iucv_error_pathid, 16); | 
|  | 2032 | iucv_available = 1; | 
| Cornelia Huck | 2d7bf36 | 2008-04-10 02:12:45 -0700 | [diff] [blame] | 2033 | rc = bus_register(&iucv_bus); | 
|  | 2034 | if (rc) | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 2035 | goto out_reboot; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 2036 | return 0; | 
|  | 2037 |  | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 2038 | out_reboot: | 
|  | 2039 | unregister_reboot_notifier(&iucv_reboot_notifier); | 
| Cornelia Huck | 2d7bf36 | 2008-04-10 02:12:45 -0700 | [diff] [blame] | 2040 | out_cpu: | 
|  | 2041 | unregister_hotcpu_notifier(&iucv_cpu_notifier); | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 2042 | out_free: | 
|  | 2043 | for_each_possible_cpu(cpu) { | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 2044 | kfree(iucv_param_irq[cpu]); | 
|  | 2045 | iucv_param_irq[cpu] = NULL; | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 2046 | kfree(iucv_param[cpu]); | 
|  | 2047 | iucv_param[cpu] = NULL; | 
|  | 2048 | kfree(iucv_irq_data[cpu]); | 
|  | 2049 | iucv_irq_data[cpu] = NULL; | 
|  | 2050 | } | 
| Mark McLoughlin | 035da16 | 2008-12-15 12:58:29 +0000 | [diff] [blame] | 2051 | root_device_unregister(iucv_root); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 2052 | out_int: | 
|  | 2053 | unregister_external_interrupt(0x4000, iucv_external_interrupt); | 
|  | 2054 | out: | 
|  | 2055 | return rc; | 
|  | 2056 | } | 
|  | 2057 |  | 
|  | 2058 | /** | 
|  | 2059 | * iucv_exit | 
|  | 2060 | * | 
|  | 2061 | * Frees everything allocated from iucv_init. | 
|  | 2062 | */ | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 2063 | static void __exit iucv_exit(void) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 2064 | { | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 2065 | struct iucv_irq_list *p, *n; | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 2066 | int cpu; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 2067 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 2068 | spin_lock_irq(&iucv_queue_lock); | 
|  | 2069 | list_for_each_entry_safe(p, n, &iucv_task_queue, list) | 
|  | 2070 | kfree(p); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 2071 | list_for_each_entry_safe(p, n, &iucv_work_queue, list) | 
|  | 2072 | kfree(p); | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 2073 | spin_unlock_irq(&iucv_queue_lock); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 2074 | unregister_reboot_notifier(&iucv_reboot_notifier); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 2075 | unregister_hotcpu_notifier(&iucv_cpu_notifier); | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 2076 | for_each_possible_cpu(cpu) { | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 2077 | kfree(iucv_param_irq[cpu]); | 
|  | 2078 | iucv_param_irq[cpu] = NULL; | 
| Christoph Lameter | 70cf503 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 2079 | kfree(iucv_param[cpu]); | 
|  | 2080 | iucv_param[cpu] = NULL; | 
|  | 2081 | kfree(iucv_irq_data[cpu]); | 
|  | 2082 | iucv_irq_data[cpu] = NULL; | 
|  | 2083 | } | 
| Mark McLoughlin | 035da16 | 2008-12-15 12:58:29 +0000 | [diff] [blame] | 2084 | root_device_unregister(iucv_root); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 2085 | bus_unregister(&iucv_bus); | 
|  | 2086 | unregister_external_interrupt(0x4000, iucv_external_interrupt); | 
|  | 2087 | } | 
|  | 2088 |  | 
|  | 2089 | subsys_initcall(iucv_init); | 
|  | 2090 | module_exit(iucv_exit); | 
|  | 2091 |  | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 2092 | MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); | 
|  | 2093 | MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver"); | 
|  | 2094 | MODULE_LICENSE("GPL"); |