| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * IUCV base infrastructure. | 
|  | 3 | * | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 4 | * Copyright IBM Corp. 2001, 2009 | 
|  | 5 | * | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 6 | * Author(s): | 
|  | 7 | *    Original source: | 
|  | 8 | *	Alan Altmark (Alan_Altmark@us.ibm.com)	Sept. 2000 | 
|  | 9 | *	Xenia Tkatschow (xenia@us.ibm.com) | 
|  | 10 | *    2Gb awareness and general cleanup: | 
|  | 11 | *	Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) | 
|  | 12 | *    Rewritten for af_iucv: | 
|  | 13 | *	Martin Schwidefsky <schwidefsky@de.ibm.com> | 
| Ursula Braun | 672e405 | 2009-06-16 10:30:42 +0200 | [diff] [blame] | 14 | *    PM functions: | 
|  | 15 | *	Ursula Braun (ursula.braun@de.ibm.com) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 16 | * | 
|  | 17 | * Documentation used: | 
|  | 18 | *    The original source | 
|  | 19 | *    CP Programming Service, IBM document # SC24-5760 | 
|  | 20 | * | 
|  | 21 | * This program is free software; you can redistribute it and/or modify | 
|  | 22 | * it under the terms of the GNU General Public License as published by | 
|  | 23 | * the Free Software Foundation; either version 2, or (at your option) | 
|  | 24 | * any later version. | 
|  | 25 | * | 
|  | 26 | * This program is distributed in the hope that it will be useful, | 
|  | 27 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 28 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 29 | * GNU General Public License for more details. | 
|  | 30 | * | 
|  | 31 | * You should have received a copy of the GNU General Public License | 
|  | 32 | * along with this program; if not, write to the Free Software | 
|  | 33 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 
|  | 34 | */ | 
|  | 35 |  | 
| Ursula Braun | 8f7c502 | 2008-12-25 13:39:47 +0100 | [diff] [blame] | 36 | #define KMSG_COMPONENT "iucv" | 
|  | 37 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 
|  | 38 |  | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 39 | #include <linux/module.h> | 
|  | 40 | #include <linux/moduleparam.h> | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 41 | #include <linux/spinlock.h> | 
|  | 42 | #include <linux/kernel.h> | 
|  | 43 | #include <linux/slab.h> | 
|  | 44 | #include <linux/init.h> | 
|  | 45 | #include <linux/interrupt.h> | 
|  | 46 | #include <linux/list.h> | 
|  | 47 | #include <linux/errno.h> | 
|  | 48 | #include <linux/err.h> | 
|  | 49 | #include <linux/device.h> | 
|  | 50 | #include <linux/cpu.h> | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 51 | #include <linux/reboot.h> | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 52 | #include <net/iucv/iucv.h> | 
|  | 53 | #include <asm/atomic.h> | 
|  | 54 | #include <asm/ebcdic.h> | 
|  | 55 | #include <asm/io.h> | 
|  | 56 | #include <asm/s390_ext.h> | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 57 | #include <asm/smp.h> | 
|  | 58 |  | 
|  | 59 | /* | 
|  | 60 | * FLAGS: | 
|  | 61 | * All flags are defined in the field IPFLAGS1 of each function | 
|  | 62 | * and can be found in CP Programming Services. | 
|  | 63 | * IPSRCCLS - Indicates you have specified a source class. | 
|  | 64 | * IPTRGCLS - Indicates you have specified a target class. | 
|  | 65 | * IPFGPID  - Indicates you have specified a pathid. | 
|  | 66 | * IPFGMID  - Indicates you have specified a message ID. | 
|  | 67 | * IPNORPY  - Indicates a one-way message. No reply expected. | 
|  | 68 | * IPALL    - Indicates that all paths are affected. | 
|  | 69 | */ | 
|  | 70 | #define IUCV_IPSRCCLS	0x01 | 
|  | 71 | #define IUCV_IPTRGCLS	0x01 | 
|  | 72 | #define IUCV_IPFGPID	0x02 | 
|  | 73 | #define IUCV_IPFGMID	0x04 | 
|  | 74 | #define IUCV_IPNORPY	0x10 | 
|  | 75 | #define IUCV_IPALL	0x80 | 
|  | 76 |  | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 77 | static int iucv_bus_match(struct device *dev, struct device_driver *drv) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 78 | { | 
|  | 79 | return 0; | 
|  | 80 | } | 
|  | 81 |  | 
| Ursula Braun | 672e405 | 2009-06-16 10:30:42 +0200 | [diff] [blame] | 82 | static int iucv_pm_prepare(struct device *); | 
|  | 83 | static void iucv_pm_complete(struct device *); | 
|  | 84 | static int iucv_pm_freeze(struct device *); | 
|  | 85 | static int iucv_pm_thaw(struct device *); | 
|  | 86 | static int iucv_pm_restore(struct device *); | 
|  | 87 |  | 
|  | 88 | static struct dev_pm_ops iucv_pm_ops = { | 
|  | 89 | .prepare = iucv_pm_prepare, | 
|  | 90 | .complete = iucv_pm_complete, | 
|  | 91 | .freeze = iucv_pm_freeze, | 
|  | 92 | .thaw = iucv_pm_thaw, | 
|  | 93 | .restore = iucv_pm_restore, | 
|  | 94 | }; | 
|  | 95 |  | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 96 | struct bus_type iucv_bus = { | 
|  | 97 | .name = "iucv", | 
|  | 98 | .match = iucv_bus_match, | 
| Ursula Braun | 672e405 | 2009-06-16 10:30:42 +0200 | [diff] [blame] | 99 | .pm = &iucv_pm_ops, | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 100 | }; | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 101 | EXPORT_SYMBOL(iucv_bus); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 102 |  | 
|  | 103 | struct device *iucv_root; | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 104 | EXPORT_SYMBOL(iucv_root); | 
|  | 105 |  | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 106 | static int iucv_available; | 
|  | 107 |  | 
|  | 108 | /* General IUCV interrupt structure */ | 
|  | 109 | struct iucv_irq_data { | 
|  | 110 | u16 ippathid; | 
|  | 111 | u8  ipflags1; | 
|  | 112 | u8  iptype; | 
|  | 113 | u32 res2[8]; | 
|  | 114 | }; | 
|  | 115 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 116 | struct iucv_irq_list { | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 117 | struct list_head list; | 
|  | 118 | struct iucv_irq_data data; | 
|  | 119 | }; | 
|  | 120 |  | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 121 | static struct iucv_irq_data *iucv_irq_data[NR_CPUS]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 122 | static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE; | 
|  | 123 | static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE; | 
|  | 124 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 125 | /* | 
|  | 126 | * Queue of interrupt buffers lock for delivery via the tasklet | 
|  | 127 | * (fast but can't call smp_call_function). | 
|  | 128 | */ | 
|  | 129 | static LIST_HEAD(iucv_task_queue); | 
|  | 130 |  | 
|  | 131 | /* | 
|  | 132 | * The tasklet for fast delivery of iucv interrupts. | 
|  | 133 | */ | 
|  | 134 | static void iucv_tasklet_fn(unsigned long); | 
|  | 135 | static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0); | 
|  | 136 |  | 
|  | 137 | /* | 
|  | 138 | * Queue of interrupt buffers for delivery via a work queue | 
|  | 139 | * (slower but can call smp_call_function). | 
|  | 140 | */ | 
|  | 141 | static LIST_HEAD(iucv_work_queue); | 
|  | 142 |  | 
|  | 143 | /* | 
|  | 144 | * The work element to deliver path pending interrupts. | 
|  | 145 | */ | 
|  | 146 | static void iucv_work_fn(struct work_struct *work); | 
|  | 147 | static DECLARE_WORK(iucv_work, iucv_work_fn); | 
|  | 148 |  | 
|  | 149 | /* | 
|  | 150 | * Spinlock protecting task and work queue. | 
|  | 151 | */ | 
|  | 152 | static DEFINE_SPINLOCK(iucv_queue_lock); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 153 |  | 
|  | 154 | enum iucv_command_codes { | 
|  | 155 | IUCV_QUERY = 0, | 
|  | 156 | IUCV_RETRIEVE_BUFFER = 2, | 
|  | 157 | IUCV_SEND = 4, | 
|  | 158 | IUCV_RECEIVE = 5, | 
|  | 159 | IUCV_REPLY = 6, | 
|  | 160 | IUCV_REJECT = 8, | 
|  | 161 | IUCV_PURGE = 9, | 
|  | 162 | IUCV_ACCEPT = 10, | 
|  | 163 | IUCV_CONNECT = 11, | 
|  | 164 | IUCV_DECLARE_BUFFER = 12, | 
|  | 165 | IUCV_QUIESCE = 13, | 
|  | 166 | IUCV_RESUME = 14, | 
|  | 167 | IUCV_SEVER = 15, | 
|  | 168 | IUCV_SETMASK = 16, | 
| Ursula Braun | 672e405 | 2009-06-16 10:30:42 +0200 | [diff] [blame] | 169 | IUCV_SETCONTROLMASK = 17, | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 170 | }; | 
|  | 171 |  | 
|  | 172 | /* | 
|  | 173 | * Error messages that are used with the iucv_sever function. They get | 
|  | 174 | * converted to EBCDIC. | 
|  | 175 | */ | 
|  | 176 | static char iucv_error_no_listener[16] = "NO LISTENER"; | 
|  | 177 | static char iucv_error_no_memory[16] = "NO MEMORY"; | 
|  | 178 | static char iucv_error_pathid[16] = "INVALID PATHID"; | 
|  | 179 |  | 
|  | 180 | /* | 
|  | 181 | * iucv_handler_list: List of registered handlers. | 
|  | 182 | */ | 
|  | 183 | static LIST_HEAD(iucv_handler_list); | 
|  | 184 |  | 
|  | 185 | /* | 
|  | 186 | * iucv_path_table: an array of iucv_path structures. | 
|  | 187 | */ | 
|  | 188 | static struct iucv_path **iucv_path_table; | 
|  | 189 | static unsigned long iucv_max_pathid; | 
|  | 190 |  | 
|  | 191 | /* | 
|  | 192 | * iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table | 
|  | 193 | */ | 
|  | 194 | static DEFINE_SPINLOCK(iucv_table_lock); | 
|  | 195 |  | 
|  | 196 | /* | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 197 | * iucv_active_cpu: contains the number of the cpu executing the tasklet | 
|  | 198 | * or the work handler. Needed for iucv_path_sever called from tasklet. | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 199 | */ | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 200 | static int iucv_active_cpu = -1; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 201 |  | 
|  | 202 | /* | 
|  | 203 | * Mutex and wait queue for iucv_register/iucv_unregister. | 
|  | 204 | */ | 
|  | 205 | static DEFINE_MUTEX(iucv_register_mutex); | 
|  | 206 |  | 
|  | 207 | /* | 
|  | 208 | * Counter for number of non-smp capable handlers. | 
|  | 209 | */ | 
|  | 210 | static int iucv_nonsmp_handler; | 
|  | 211 |  | 
|  | 212 | /* | 
|  | 213 | * IUCV control data structure. Used by iucv_path_accept, iucv_path_connect, | 
|  | 214 | * iucv_path_quiesce and iucv_path_sever. | 
|  | 215 | */ | 
|  | 216 | struct iucv_cmd_control { | 
|  | 217 | u16 ippathid; | 
|  | 218 | u8  ipflags1; | 
|  | 219 | u8  iprcode; | 
|  | 220 | u16 ipmsglim; | 
|  | 221 | u16 res1; | 
|  | 222 | u8  ipvmid[8]; | 
|  | 223 | u8  ipuser[16]; | 
|  | 224 | u8  iptarget[8]; | 
|  | 225 | } __attribute__ ((packed,aligned(8))); | 
|  | 226 |  | 
|  | 227 | /* | 
|  | 228 | * Data in parameter list iucv structure. Used by iucv_message_send, | 
|  | 229 | * iucv_message_send2way and iucv_message_reply. | 
|  | 230 | */ | 
|  | 231 | struct iucv_cmd_dpl { | 
|  | 232 | u16 ippathid; | 
|  | 233 | u8  ipflags1; | 
|  | 234 | u8  iprcode; | 
|  | 235 | u32 ipmsgid; | 
|  | 236 | u32 iptrgcls; | 
|  | 237 | u8  iprmmsg[8]; | 
|  | 238 | u32 ipsrccls; | 
|  | 239 | u32 ipmsgtag; | 
|  | 240 | u32 ipbfadr2; | 
|  | 241 | u32 ipbfln2f; | 
|  | 242 | u32 res; | 
|  | 243 | } __attribute__ ((packed,aligned(8))); | 
|  | 244 |  | 
|  | 245 | /* | 
|  | 246 | * Data in buffer iucv structure. Used by iucv_message_receive, | 
|  | 247 | * iucv_message_reject, iucv_message_send, iucv_message_send2way | 
|  | 248 | * and iucv_declare_cpu. | 
|  | 249 | */ | 
|  | 250 | struct iucv_cmd_db { | 
|  | 251 | u16 ippathid; | 
|  | 252 | u8  ipflags1; | 
|  | 253 | u8  iprcode; | 
|  | 254 | u32 ipmsgid; | 
|  | 255 | u32 iptrgcls; | 
|  | 256 | u32 ipbfadr1; | 
|  | 257 | u32 ipbfln1f; | 
|  | 258 | u32 ipsrccls; | 
|  | 259 | u32 ipmsgtag; | 
|  | 260 | u32 ipbfadr2; | 
|  | 261 | u32 ipbfln2f; | 
|  | 262 | u32 res; | 
|  | 263 | } __attribute__ ((packed,aligned(8))); | 
|  | 264 |  | 
|  | 265 | /* | 
|  | 266 | * Purge message iucv structure. Used by iucv_message_purge. | 
|  | 267 | */ | 
|  | 268 | struct iucv_cmd_purge { | 
|  | 269 | u16 ippathid; | 
|  | 270 | u8  ipflags1; | 
|  | 271 | u8  iprcode; | 
|  | 272 | u32 ipmsgid; | 
|  | 273 | u8  ipaudit[3]; | 
|  | 274 | u8  res1[5]; | 
|  | 275 | u32 res2; | 
|  | 276 | u32 ipsrccls; | 
|  | 277 | u32 ipmsgtag; | 
|  | 278 | u32 res3[3]; | 
|  | 279 | } __attribute__ ((packed,aligned(8))); | 
|  | 280 |  | 
|  | 281 | /* | 
|  | 282 | * Set mask iucv structure. Used by iucv_enable_cpu. | 
|  | 283 | */ | 
|  | 284 | struct iucv_cmd_set_mask { | 
|  | 285 | u8  ipmask; | 
|  | 286 | u8  res1[2]; | 
|  | 287 | u8  iprcode; | 
|  | 288 | u32 res2[9]; | 
|  | 289 | } __attribute__ ((packed,aligned(8))); | 
|  | 290 |  | 
|  | 291 | union iucv_param { | 
|  | 292 | struct iucv_cmd_control ctrl; | 
|  | 293 | struct iucv_cmd_dpl dpl; | 
|  | 294 | struct iucv_cmd_db db; | 
|  | 295 | struct iucv_cmd_purge purge; | 
|  | 296 | struct iucv_cmd_set_mask set_mask; | 
|  | 297 | }; | 
|  | 298 |  | 
|  | 299 | /* | 
|  | 300 | * Anchor for per-cpu IUCV command parameter block. | 
|  | 301 | */ | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 302 | static union iucv_param *iucv_param[NR_CPUS]; | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 303 | static union iucv_param *iucv_param_irq[NR_CPUS]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 304 |  | 
|  | 305 | /** | 
|  | 306 | * iucv_call_b2f0 | 
|  | 307 | * @code: identifier of IUCV call to CP. | 
|  | 308 | * @parm: pointer to a struct iucv_parm block | 
|  | 309 | * | 
|  | 310 | * Calls CP to execute IUCV commands. | 
|  | 311 | * | 
|  | 312 | * Returns the result of the CP IUCV call. | 
|  | 313 | */ | 
|  | 314 | static inline int iucv_call_b2f0(int command, union iucv_param *parm) | 
|  | 315 | { | 
|  | 316 | register unsigned long reg0 asm ("0"); | 
|  | 317 | register unsigned long reg1 asm ("1"); | 
|  | 318 | int ccode; | 
|  | 319 |  | 
|  | 320 | reg0 = command; | 
|  | 321 | reg1 = virt_to_phys(parm); | 
|  | 322 | asm volatile( | 
|  | 323 | "	.long 0xb2f01000\n" | 
|  | 324 | "	ipm	%0\n" | 
|  | 325 | "	srl	%0,28\n" | 
|  | 326 | : "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1) | 
|  | 327 | :  "m" (*parm) : "cc"); | 
|  | 328 | return (ccode == 1) ? parm->ctrl.iprcode : ccode; | 
|  | 329 | } | 
|  | 330 |  | 
|  | 331 | /** | 
|  | 332 | * iucv_query_maxconn | 
|  | 333 | * | 
|  | 334 | * Determines the maximum number of connections that may be established. | 
|  | 335 | * | 
|  | 336 | * Returns the maximum number of connections or -EPERM is IUCV is not | 
|  | 337 | * available. | 
|  | 338 | */ | 
|  | 339 | static int iucv_query_maxconn(void) | 
|  | 340 | { | 
|  | 341 | register unsigned long reg0 asm ("0"); | 
|  | 342 | register unsigned long reg1 asm ("1"); | 
|  | 343 | void *param; | 
|  | 344 | int ccode; | 
|  | 345 |  | 
|  | 346 | param = kzalloc(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA); | 
|  | 347 | if (!param) | 
|  | 348 | return -ENOMEM; | 
|  | 349 | reg0 = IUCV_QUERY; | 
|  | 350 | reg1 = (unsigned long) param; | 
|  | 351 | asm volatile ( | 
|  | 352 | "	.long	0xb2f01000\n" | 
|  | 353 | "	ipm	%0\n" | 
|  | 354 | "	srl	%0,28\n" | 
|  | 355 | : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc"); | 
|  | 356 | if (ccode == 0) | 
|  | 357 | iucv_max_pathid = reg0; | 
|  | 358 | kfree(param); | 
|  | 359 | return ccode ? -EPERM : 0; | 
|  | 360 | } | 
|  | 361 |  | 
|  | 362 | /** | 
|  | 363 | * iucv_allow_cpu | 
|  | 364 | * @data: unused | 
|  | 365 | * | 
|  | 366 | * Allow iucv interrupts on this cpu. | 
|  | 367 | */ | 
|  | 368 | static void iucv_allow_cpu(void *data) | 
|  | 369 | { | 
|  | 370 | int cpu = smp_processor_id(); | 
|  | 371 | union iucv_param *parm; | 
|  | 372 |  | 
|  | 373 | /* | 
|  | 374 | * Enable all iucv interrupts. | 
|  | 375 | * ipmask contains bits for the different interrupts | 
|  | 376 | *	0x80 - Flag to allow nonpriority message pending interrupts | 
|  | 377 | *	0x40 - Flag to allow priority message pending interrupts | 
|  | 378 | *	0x20 - Flag to allow nonpriority message completion interrupts | 
|  | 379 | *	0x10 - Flag to allow priority message completion interrupts | 
|  | 380 | *	0x08 - Flag to allow IUCV control interrupts | 
|  | 381 | */ | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 382 | parm = iucv_param_irq[cpu]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 383 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 384 | parm->set_mask.ipmask = 0xf8; | 
|  | 385 | iucv_call_b2f0(IUCV_SETMASK, parm); | 
|  | 386 |  | 
| Ursula Braun | 672e405 | 2009-06-16 10:30:42 +0200 | [diff] [blame] | 387 | /* | 
|  | 388 | * Enable all iucv control interrupts. | 
|  | 389 | * ipmask contains bits for the different interrupts | 
|  | 390 | *	0x80 - Flag to allow pending connections interrupts | 
|  | 391 | *	0x40 - Flag to allow connection complete interrupts | 
|  | 392 | *	0x20 - Flag to allow connection severed interrupts | 
|  | 393 | *	0x10 - Flag to allow connection quiesced interrupts | 
|  | 394 | *	0x08 - Flag to allow connection resumed interrupts | 
|  | 395 | */ | 
|  | 396 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 397 | parm->set_mask.ipmask = 0xf8; | 
|  | 398 | iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 399 | /* Set indication that iucv interrupts are allowed for this cpu. */ | 
|  | 400 | cpu_set(cpu, iucv_irq_cpumask); | 
|  | 401 | } | 
|  | 402 |  | 
|  | 403 | /** | 
|  | 404 | * iucv_block_cpu | 
|  | 405 | * @data: unused | 
|  | 406 | * | 
|  | 407 | * Block iucv interrupts on this cpu. | 
|  | 408 | */ | 
|  | 409 | static void iucv_block_cpu(void *data) | 
|  | 410 | { | 
|  | 411 | int cpu = smp_processor_id(); | 
|  | 412 | union iucv_param *parm; | 
|  | 413 |  | 
|  | 414 | /* Disable all iucv interrupts. */ | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 415 | parm = iucv_param_irq[cpu]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 416 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 417 | iucv_call_b2f0(IUCV_SETMASK, parm); | 
|  | 418 |  | 
|  | 419 | /* Clear indication that iucv interrupts are allowed for this cpu. */ | 
|  | 420 | cpu_clear(cpu, iucv_irq_cpumask); | 
|  | 421 | } | 
|  | 422 |  | 
|  | 423 | /** | 
| Ursula Braun | 672e405 | 2009-06-16 10:30:42 +0200 | [diff] [blame] | 424 | * iucv_block_cpu_almost | 
|  | 425 | * @data: unused | 
|  | 426 | * | 
|  | 427 | * Allow connection-severed interrupts only on this cpu. | 
|  | 428 | */ | 
|  | 429 | static void iucv_block_cpu_almost(void *data) | 
|  | 430 | { | 
|  | 431 | int cpu = smp_processor_id(); | 
|  | 432 | union iucv_param *parm; | 
|  | 433 |  | 
|  | 434 | /* Allow iucv control interrupts only */ | 
|  | 435 | parm = iucv_param_irq[cpu]; | 
|  | 436 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 437 | parm->set_mask.ipmask = 0x08; | 
|  | 438 | iucv_call_b2f0(IUCV_SETMASK, parm); | 
|  | 439 | /* Allow iucv-severed interrupt only */ | 
|  | 440 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 441 | parm->set_mask.ipmask = 0x20; | 
|  | 442 | iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); | 
|  | 443 |  | 
|  | 444 | /* Clear indication that iucv interrupts are allowed for this cpu. */ | 
|  | 445 | cpu_clear(cpu, iucv_irq_cpumask); | 
|  | 446 | } | 
|  | 447 |  | 
|  | 448 | /** | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 449 | * iucv_declare_cpu | 
|  | 450 | * @data: unused | 
|  | 451 | * | 
| Robert P. J. Day | 3a4fa0a | 2007-10-19 23:10:43 +0200 | [diff] [blame] | 452 | * Declare a interrupt buffer on this cpu. | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 453 | */ | 
|  | 454 | static void iucv_declare_cpu(void *data) | 
|  | 455 | { | 
|  | 456 | int cpu = smp_processor_id(); | 
|  | 457 | union iucv_param *parm; | 
|  | 458 | int rc; | 
|  | 459 |  | 
|  | 460 | if (cpu_isset(cpu, iucv_buffer_cpumask)) | 
|  | 461 | return; | 
|  | 462 |  | 
|  | 463 | /* Declare interrupt buffer. */ | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 464 | parm = iucv_param_irq[cpu]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 465 | memset(parm, 0, sizeof(union iucv_param)); | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 466 | parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 467 | rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); | 
|  | 468 | if (rc) { | 
|  | 469 | char *err = "Unknown"; | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 470 | switch (rc) { | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 471 | case 0x03: | 
|  | 472 | err = "Directory error"; | 
|  | 473 | break; | 
|  | 474 | case 0x0a: | 
|  | 475 | err = "Invalid length"; | 
|  | 476 | break; | 
|  | 477 | case 0x13: | 
|  | 478 | err = "Buffer already exists"; | 
|  | 479 | break; | 
|  | 480 | case 0x3e: | 
|  | 481 | err = "Buffer overlap"; | 
|  | 482 | break; | 
|  | 483 | case 0x5c: | 
|  | 484 | err = "Paging or storage error"; | 
|  | 485 | break; | 
|  | 486 | } | 
| Ursula Braun | 8f7c502 | 2008-12-25 13:39:47 +0100 | [diff] [blame] | 487 | pr_warning("Defining an interrupt buffer on CPU %i" | 
|  | 488 | " failed with 0x%02x (%s)\n", cpu, rc, err); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 489 | return; | 
|  | 490 | } | 
|  | 491 |  | 
|  | 492 | /* Set indication that an iucv buffer exists for this cpu. */ | 
|  | 493 | cpu_set(cpu, iucv_buffer_cpumask); | 
|  | 494 |  | 
|  | 495 | if (iucv_nonsmp_handler == 0 || cpus_empty(iucv_irq_cpumask)) | 
|  | 496 | /* Enable iucv interrupts on this cpu. */ | 
|  | 497 | iucv_allow_cpu(NULL); | 
|  | 498 | else | 
|  | 499 | /* Disable iucv interrupts on this cpu. */ | 
|  | 500 | iucv_block_cpu(NULL); | 
|  | 501 | } | 
|  | 502 |  | 
|  | 503 | /** | 
|  | 504 | * iucv_retrieve_cpu | 
|  | 505 | * @data: unused | 
|  | 506 | * | 
|  | 507 | * Retrieve interrupt buffer on this cpu. | 
|  | 508 | */ | 
|  | 509 | static void iucv_retrieve_cpu(void *data) | 
|  | 510 | { | 
|  | 511 | int cpu = smp_processor_id(); | 
|  | 512 | union iucv_param *parm; | 
|  | 513 |  | 
|  | 514 | if (!cpu_isset(cpu, iucv_buffer_cpumask)) | 
|  | 515 | return; | 
|  | 516 |  | 
|  | 517 | /* Block iucv interrupts. */ | 
|  | 518 | iucv_block_cpu(NULL); | 
|  | 519 |  | 
|  | 520 | /* Retrieve interrupt buffer. */ | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 521 | parm = iucv_param_irq[cpu]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 522 | iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); | 
|  | 523 |  | 
|  | 524 | /* Clear indication that an iucv buffer exists for this cpu. */ | 
|  | 525 | cpu_clear(cpu, iucv_buffer_cpumask); | 
|  | 526 | } | 
|  | 527 |  | 
|  | 528 | /** | 
|  | 529 | * iucv_setmask_smp | 
|  | 530 | * | 
|  | 531 | * Allow iucv interrupts on all cpus. | 
|  | 532 | */ | 
|  | 533 | static void iucv_setmask_mp(void) | 
|  | 534 | { | 
|  | 535 | int cpu; | 
|  | 536 |  | 
| Heiko Carstens | 7b9d1b2 | 2008-06-09 15:50:30 -0700 | [diff] [blame] | 537 | get_online_cpus(); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 538 | for_each_online_cpu(cpu) | 
|  | 539 | /* Enable all cpus with a declared buffer. */ | 
|  | 540 | if (cpu_isset(cpu, iucv_buffer_cpumask) && | 
|  | 541 | !cpu_isset(cpu, iucv_irq_cpumask)) | 
| Heiko Carstens | 3bb447f | 2007-07-27 12:29:08 +0200 | [diff] [blame] | 542 | smp_call_function_single(cpu, iucv_allow_cpu, | 
| Jens Axboe | 8691e5a | 2008-06-06 11:18:06 +0200 | [diff] [blame] | 543 | NULL, 1); | 
| Heiko Carstens | 7b9d1b2 | 2008-06-09 15:50:30 -0700 | [diff] [blame] | 544 | put_online_cpus(); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 545 | } | 
|  | 546 |  | 
|  | 547 | /** | 
|  | 548 | * iucv_setmask_up | 
|  | 549 | * | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 550 | * Allow iucv interrupts on a single cpu. | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 551 | */ | 
|  | 552 | static void iucv_setmask_up(void) | 
|  | 553 | { | 
|  | 554 | cpumask_t cpumask; | 
|  | 555 | int cpu; | 
|  | 556 |  | 
|  | 557 | /* Disable all cpu but the first in cpu_irq_cpumask. */ | 
|  | 558 | cpumask = iucv_irq_cpumask; | 
|  | 559 | cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); | 
| Mike Travis | 0e12f84 | 2008-05-12 21:21:13 +0200 | [diff] [blame] | 560 | for_each_cpu_mask_nr(cpu, cpumask) | 
| Jens Axboe | 8691e5a | 2008-06-06 11:18:06 +0200 | [diff] [blame] | 561 | smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 562 | } | 
|  | 563 |  | 
|  | 564 | /** | 
|  | 565 | * iucv_enable | 
|  | 566 | * | 
|  | 567 | * This function makes iucv ready for use. It allocates the pathid | 
|  | 568 | * table, declares an iucv interrupt buffer and enables the iucv | 
|  | 569 | * interrupts. Called when the first user has registered an iucv | 
|  | 570 | * handler. | 
|  | 571 | */ | 
|  | 572 | static int iucv_enable(void) | 
|  | 573 | { | 
|  | 574 | size_t alloc_size; | 
|  | 575 | int cpu, rc; | 
|  | 576 |  | 
| Heiko Carstens | f1d3e4d | 2009-01-05 18:09:02 -0800 | [diff] [blame] | 577 | get_online_cpus(); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 578 | rc = -ENOMEM; | 
|  | 579 | alloc_size = iucv_max_pathid * sizeof(struct iucv_path); | 
|  | 580 | iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); | 
|  | 581 | if (!iucv_path_table) | 
|  | 582 | goto out; | 
|  | 583 | /* Declare per cpu buffers. */ | 
|  | 584 | rc = -EIO; | 
|  | 585 | for_each_online_cpu(cpu) | 
| Jens Axboe | 8691e5a | 2008-06-06 11:18:06 +0200 | [diff] [blame] | 586 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 587 | if (cpus_empty(iucv_buffer_cpumask)) | 
|  | 588 | /* No cpu could declare an iucv buffer. */ | 
| Heiko Carstens | f1d3e4d | 2009-01-05 18:09:02 -0800 | [diff] [blame] | 589 | goto out; | 
| Heiko Carstens | 7b9d1b2 | 2008-06-09 15:50:30 -0700 | [diff] [blame] | 590 | put_online_cpus(); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 591 | return 0; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 592 | out: | 
| Heiko Carstens | f1d3e4d | 2009-01-05 18:09:02 -0800 | [diff] [blame] | 593 | kfree(iucv_path_table); | 
|  | 594 | iucv_path_table = NULL; | 
|  | 595 | put_online_cpus(); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 596 | return rc; | 
|  | 597 | } | 
|  | 598 |  | 
|  | 599 | /** | 
|  | 600 | * iucv_disable | 
|  | 601 | * | 
|  | 602 | * This function shuts down iucv. It disables iucv interrupts, retrieves | 
|  | 603 | * the iucv interrupt buffer and frees the pathid table. Called after the | 
|  | 604 | * last user unregister its iucv handler. | 
|  | 605 | */ | 
|  | 606 | static void iucv_disable(void) | 
|  | 607 | { | 
| Heiko Carstens | 8b122ef | 2008-09-30 03:03:35 -0700 | [diff] [blame] | 608 | get_online_cpus(); | 
| Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 609 | on_each_cpu(iucv_retrieve_cpu, NULL, 1); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 610 | kfree(iucv_path_table); | 
| Heiko Carstens | f1d3e4d | 2009-01-05 18:09:02 -0800 | [diff] [blame] | 611 | iucv_path_table = NULL; | 
|  | 612 | put_online_cpus(); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 613 | } | 
|  | 614 |  | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 615 | static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | 
|  | 616 | unsigned long action, void *hcpu) | 
|  | 617 | { | 
|  | 618 | cpumask_t cpumask; | 
|  | 619 | long cpu = (long) hcpu; | 
|  | 620 |  | 
|  | 621 | switch (action) { | 
|  | 622 | case CPU_UP_PREPARE: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 623 | case CPU_UP_PREPARE_FROZEN: | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 624 | iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), | 
|  | 625 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 
|  | 626 | if (!iucv_irq_data[cpu]) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 627 | return NOTIFY_BAD; | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 628 | iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), | 
|  | 629 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 
| Akinobu Mita | d0236f8 | 2008-07-15 02:09:53 -0700 | [diff] [blame] | 630 | if (!iucv_param[cpu]) { | 
|  | 631 | kfree(iucv_irq_data[cpu]); | 
|  | 632 | iucv_irq_data[cpu] = NULL; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 633 | return NOTIFY_BAD; | 
| Akinobu Mita | d0236f8 | 2008-07-15 02:09:53 -0700 | [diff] [blame] | 634 | } | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 635 | iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), | 
|  | 636 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 
|  | 637 | if (!iucv_param_irq[cpu]) { | 
|  | 638 | kfree(iucv_param[cpu]); | 
|  | 639 | iucv_param[cpu] = NULL; | 
|  | 640 | kfree(iucv_irq_data[cpu]); | 
|  | 641 | iucv_irq_data[cpu] = NULL; | 
|  | 642 | return NOTIFY_BAD; | 
|  | 643 | } | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 644 | break; | 
|  | 645 | case CPU_UP_CANCELED: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 646 | case CPU_UP_CANCELED_FROZEN: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 647 | case CPU_DEAD: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 648 | case CPU_DEAD_FROZEN: | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 649 | kfree(iucv_param_irq[cpu]); | 
|  | 650 | iucv_param_irq[cpu] = NULL; | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 651 | kfree(iucv_param[cpu]); | 
|  | 652 | iucv_param[cpu] = NULL; | 
|  | 653 | kfree(iucv_irq_data[cpu]); | 
|  | 654 | iucv_irq_data[cpu] = NULL; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 655 | break; | 
|  | 656 | case CPU_ONLINE: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 657 | case CPU_ONLINE_FROZEN: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 658 | case CPU_DOWN_FAILED: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 659 | case CPU_DOWN_FAILED_FROZEN: | 
| Heiko Carstens | f1d3e4d | 2009-01-05 18:09:02 -0800 | [diff] [blame] | 660 | if (!iucv_path_table) | 
|  | 661 | break; | 
| Jens Axboe | 8691e5a | 2008-06-06 11:18:06 +0200 | [diff] [blame] | 662 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 663 | break; | 
|  | 664 | case CPU_DOWN_PREPARE: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 665 | case CPU_DOWN_PREPARE_FROZEN: | 
| Heiko Carstens | f1d3e4d | 2009-01-05 18:09:02 -0800 | [diff] [blame] | 666 | if (!iucv_path_table) | 
|  | 667 | break; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 668 | cpumask = iucv_buffer_cpumask; | 
|  | 669 | cpu_clear(cpu, cpumask); | 
|  | 670 | if (cpus_empty(cpumask)) | 
|  | 671 | /* Can't offline last IUCV enabled cpu. */ | 
|  | 672 | return NOTIFY_BAD; | 
| Jens Axboe | 8691e5a | 2008-06-06 11:18:06 +0200 | [diff] [blame] | 673 | smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 674 | if (cpus_empty(iucv_irq_cpumask)) | 
| Heiko Carstens | 3bb447f | 2007-07-27 12:29:08 +0200 | [diff] [blame] | 675 | smp_call_function_single(first_cpu(iucv_buffer_cpumask), | 
| Jens Axboe | 8691e5a | 2008-06-06 11:18:06 +0200 | [diff] [blame] | 676 | iucv_allow_cpu, NULL, 1); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 677 | break; | 
|  | 678 | } | 
|  | 679 | return NOTIFY_OK; | 
|  | 680 | } | 
|  | 681 |  | 
| Heiko Carstens | f1494ed | 2008-06-09 15:49:57 -0700 | [diff] [blame] | 682 | static struct notifier_block __refdata iucv_cpu_notifier = { | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 683 | .notifier_call = iucv_cpu_notify, | 
|  | 684 | }; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 685 |  | 
|  | 686 | /** | 
|  | 687 | * iucv_sever_pathid | 
|  | 688 | * @pathid: path identification number. | 
|  | 689 | * @userdata: 16-bytes of user data. | 
|  | 690 | * | 
|  | 691 | * Sever an iucv path to free up the pathid. Used internally. | 
|  | 692 | */ | 
|  | 693 | static int iucv_sever_pathid(u16 pathid, u8 userdata[16]) | 
|  | 694 | { | 
|  | 695 | union iucv_param *parm; | 
|  | 696 |  | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 697 | parm = iucv_param_irq[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 698 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 699 | if (userdata) | 
|  | 700 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | 
|  | 701 | parm->ctrl.ippathid = pathid; | 
|  | 702 | return iucv_call_b2f0(IUCV_SEVER, parm); | 
|  | 703 | } | 
|  | 704 |  | 
|  | 705 | /** | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 706 | * __iucv_cleanup_queue | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 707 | * @dummy: unused dummy argument | 
|  | 708 | * | 
|  | 709 | * Nop function called via smp_call_function to force work items from | 
|  | 710 | * pending external iucv interrupts to the work queue. | 
|  | 711 | */ | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 712 | static void __iucv_cleanup_queue(void *dummy) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 713 | { | 
|  | 714 | } | 
|  | 715 |  | 
|  | 716 | /** | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 717 | * iucv_cleanup_queue | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 718 | * | 
|  | 719 | * Function called after a path has been severed to find all remaining | 
|  | 720 | * work items for the now stale pathid. The caller needs to hold the | 
|  | 721 | * iucv_table_lock. | 
|  | 722 | */ | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 723 | static void iucv_cleanup_queue(void) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 724 | { | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 725 | struct iucv_irq_list *p, *n; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 726 |  | 
|  | 727 | /* | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 728 | * When a path is severed, the pathid can be reused immediatly | 
|  | 729 | * on a iucv connect or a connection pending interrupt. Remove | 
|  | 730 | * all entries from the task queue that refer to a stale pathid | 
|  | 731 | * (iucv_path_table[ix] == NULL). Only then do the iucv connect | 
|  | 732 | * or deliver the connection pending interrupt. To get all the | 
|  | 733 | * pending interrupts force them to the work queue by calling | 
|  | 734 | * an empty function on all cpus. | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 735 | */ | 
| Jens Axboe | 8691e5a | 2008-06-06 11:18:06 +0200 | [diff] [blame] | 736 | smp_call_function(__iucv_cleanup_queue, NULL, 1); | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 737 | spin_lock_irq(&iucv_queue_lock); | 
|  | 738 | list_for_each_entry_safe(p, n, &iucv_task_queue, list) { | 
|  | 739 | /* Remove stale work items from the task queue. */ | 
|  | 740 | if (iucv_path_table[p->data.ippathid] == NULL) { | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 741 | list_del(&p->list); | 
|  | 742 | kfree(p); | 
|  | 743 | } | 
|  | 744 | } | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 745 | spin_unlock_irq(&iucv_queue_lock); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 746 | } | 
|  | 747 |  | 
|  | 748 | /** | 
|  | 749 | * iucv_register: | 
|  | 750 | * @handler: address of iucv handler structure | 
|  | 751 | * @smp: != 0 indicates that the handler can deal with out of order messages | 
|  | 752 | * | 
|  | 753 | * Registers a driver with IUCV. | 
|  | 754 | * | 
|  | 755 | * Returns 0 on success, -ENOMEM if the memory allocation for the pathid | 
|  | 756 | * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus. | 
|  | 757 | */ | 
|  | 758 | int iucv_register(struct iucv_handler *handler, int smp) | 
|  | 759 | { | 
|  | 760 | int rc; | 
|  | 761 |  | 
|  | 762 | if (!iucv_available) | 
|  | 763 | return -ENOSYS; | 
|  | 764 | mutex_lock(&iucv_register_mutex); | 
|  | 765 | if (!smp) | 
|  | 766 | iucv_nonsmp_handler++; | 
|  | 767 | if (list_empty(&iucv_handler_list)) { | 
|  | 768 | rc = iucv_enable(); | 
|  | 769 | if (rc) | 
|  | 770 | goto out_mutex; | 
|  | 771 | } else if (!smp && iucv_nonsmp_handler == 1) | 
|  | 772 | iucv_setmask_up(); | 
|  | 773 | INIT_LIST_HEAD(&handler->paths); | 
|  | 774 |  | 
| Ursula Braun | 435bc9d | 2008-02-07 18:06:52 -0800 | [diff] [blame] | 775 | spin_lock_bh(&iucv_table_lock); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 776 | list_add_tail(&handler->list, &iucv_handler_list); | 
| Ursula Braun | 435bc9d | 2008-02-07 18:06:52 -0800 | [diff] [blame] | 777 | spin_unlock_bh(&iucv_table_lock); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 778 | rc = 0; | 
|  | 779 | out_mutex: | 
|  | 780 | mutex_unlock(&iucv_register_mutex); | 
|  | 781 | return rc; | 
|  | 782 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 783 | EXPORT_SYMBOL(iucv_register); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 784 |  | 
|  | 785 | /** | 
|  | 786 | * iucv_unregister | 
|  | 787 | * @handler:  address of iucv handler structure | 
|  | 788 | * @smp: != 0 indicates that the handler can deal with out of order messages | 
|  | 789 | * | 
|  | 790 | * Unregister driver from IUCV. | 
|  | 791 | */ | 
|  | 792 | void iucv_unregister(struct iucv_handler *handler, int smp) | 
|  | 793 | { | 
|  | 794 | struct iucv_path *p, *n; | 
|  | 795 |  | 
|  | 796 | mutex_lock(&iucv_register_mutex); | 
|  | 797 | spin_lock_bh(&iucv_table_lock); | 
|  | 798 | /* Remove handler from the iucv_handler_list. */ | 
|  | 799 | list_del_init(&handler->list); | 
|  | 800 | /* Sever all pathids still refering to the handler. */ | 
|  | 801 | list_for_each_entry_safe(p, n, &handler->paths, list) { | 
|  | 802 | iucv_sever_pathid(p->pathid, NULL); | 
|  | 803 | iucv_path_table[p->pathid] = NULL; | 
|  | 804 | list_del(&p->list); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 805 | iucv_path_free(p); | 
|  | 806 | } | 
|  | 807 | spin_unlock_bh(&iucv_table_lock); | 
|  | 808 | if (!smp) | 
|  | 809 | iucv_nonsmp_handler--; | 
|  | 810 | if (list_empty(&iucv_handler_list)) | 
|  | 811 | iucv_disable(); | 
|  | 812 | else if (!smp && iucv_nonsmp_handler == 0) | 
|  | 813 | iucv_setmask_mp(); | 
|  | 814 | mutex_unlock(&iucv_register_mutex); | 
|  | 815 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 816 | EXPORT_SYMBOL(iucv_unregister); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 817 |  | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 818 | static int iucv_reboot_event(struct notifier_block *this, | 
|  | 819 | unsigned long event, void *ptr) | 
|  | 820 | { | 
|  | 821 | int i, rc; | 
|  | 822 |  | 
|  | 823 | get_online_cpus(); | 
|  | 824 | on_each_cpu(iucv_block_cpu, NULL, 1); | 
|  | 825 | preempt_disable(); | 
|  | 826 | for (i = 0; i < iucv_max_pathid; i++) { | 
|  | 827 | if (iucv_path_table[i]) | 
|  | 828 | rc = iucv_sever_pathid(i, NULL); | 
|  | 829 | } | 
|  | 830 | preempt_enable(); | 
|  | 831 | put_online_cpus(); | 
|  | 832 | iucv_disable(); | 
|  | 833 | return NOTIFY_DONE; | 
|  | 834 | } | 
|  | 835 |  | 
|  | 836 | static struct notifier_block iucv_reboot_notifier = { | 
|  | 837 | .notifier_call = iucv_reboot_event, | 
|  | 838 | }; | 
|  | 839 |  | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 840 | /** | 
|  | 841 | * iucv_path_accept | 
|  | 842 | * @path: address of iucv path structure | 
|  | 843 | * @handler: address of iucv handler structure | 
|  | 844 | * @userdata: 16 bytes of data reflected to the communication partner | 
|  | 845 | * @private: private data passed to interrupt handlers for this path | 
|  | 846 | * | 
|  | 847 | * This function is issued after the user received a connection pending | 
|  | 848 | * external interrupt and now wishes to complete the IUCV communication path. | 
|  | 849 | * | 
|  | 850 | * Returns the result of the CP IUCV call. | 
|  | 851 | */ | 
|  | 852 | int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, | 
|  | 853 | u8 userdata[16], void *private) | 
|  | 854 | { | 
|  | 855 | union iucv_param *parm; | 
|  | 856 | int rc; | 
|  | 857 |  | 
|  | 858 | local_bh_disable(); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 859 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | 
|  | 860 | rc = -EIO; | 
|  | 861 | goto out; | 
|  | 862 | } | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 863 | /* Prepare parameter block. */ | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 864 | parm = iucv_param[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 865 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 866 | parm->ctrl.ippathid = path->pathid; | 
|  | 867 | parm->ctrl.ipmsglim = path->msglim; | 
|  | 868 | if (userdata) | 
|  | 869 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | 
|  | 870 | parm->ctrl.ipflags1 = path->flags; | 
|  | 871 |  | 
|  | 872 | rc = iucv_call_b2f0(IUCV_ACCEPT, parm); | 
|  | 873 | if (!rc) { | 
|  | 874 | path->private = private; | 
|  | 875 | path->msglim = parm->ctrl.ipmsglim; | 
|  | 876 | path->flags = parm->ctrl.ipflags1; | 
|  | 877 | } | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 878 | out: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 879 | local_bh_enable(); | 
|  | 880 | return rc; | 
|  | 881 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 882 | EXPORT_SYMBOL(iucv_path_accept); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 883 |  | 
|  | 884 | /** | 
|  | 885 | * iucv_path_connect | 
|  | 886 | * @path: address of iucv path structure | 
|  | 887 | * @handler: address of iucv handler structure | 
|  | 888 | * @userid: 8-byte user identification | 
|  | 889 | * @system: 8-byte target system identification | 
|  | 890 | * @userdata: 16 bytes of data reflected to the communication partner | 
|  | 891 | * @private: private data passed to interrupt handlers for this path | 
|  | 892 | * | 
|  | 893 | * This function establishes an IUCV path. Although the connect may complete | 
|  | 894 | * successfully, you are not able to use the path until you receive an IUCV | 
|  | 895 | * Connection Complete external interrupt. | 
|  | 896 | * | 
|  | 897 | * Returns the result of the CP IUCV call. | 
|  | 898 | */ | 
|  | 899 | int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, | 
|  | 900 | u8 userid[8], u8 system[8], u8 userdata[16], | 
|  | 901 | void *private) | 
|  | 902 | { | 
|  | 903 | union iucv_param *parm; | 
|  | 904 | int rc; | 
|  | 905 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 906 | spin_lock_bh(&iucv_table_lock); | 
|  | 907 | iucv_cleanup_queue(); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 908 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | 
|  | 909 | rc = -EIO; | 
|  | 910 | goto out; | 
|  | 911 | } | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 912 | parm = iucv_param[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 913 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 914 | parm->ctrl.ipmsglim = path->msglim; | 
|  | 915 | parm->ctrl.ipflags1 = path->flags; | 
|  | 916 | if (userid) { | 
|  | 917 | memcpy(parm->ctrl.ipvmid, userid, sizeof(parm->ctrl.ipvmid)); | 
|  | 918 | ASCEBC(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); | 
|  | 919 | EBC_TOUPPER(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); | 
|  | 920 | } | 
|  | 921 | if (system) { | 
|  | 922 | memcpy(parm->ctrl.iptarget, system, | 
|  | 923 | sizeof(parm->ctrl.iptarget)); | 
|  | 924 | ASCEBC(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); | 
|  | 925 | EBC_TOUPPER(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); | 
|  | 926 | } | 
|  | 927 | if (userdata) | 
|  | 928 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | 
|  | 929 |  | 
|  | 930 | rc = iucv_call_b2f0(IUCV_CONNECT, parm); | 
|  | 931 | if (!rc) { | 
|  | 932 | if (parm->ctrl.ippathid < iucv_max_pathid) { | 
|  | 933 | path->pathid = parm->ctrl.ippathid; | 
|  | 934 | path->msglim = parm->ctrl.ipmsglim; | 
|  | 935 | path->flags = parm->ctrl.ipflags1; | 
|  | 936 | path->handler = handler; | 
|  | 937 | path->private = private; | 
|  | 938 | list_add_tail(&path->list, &handler->paths); | 
|  | 939 | iucv_path_table[path->pathid] = path; | 
|  | 940 | } else { | 
|  | 941 | iucv_sever_pathid(parm->ctrl.ippathid, | 
|  | 942 | iucv_error_pathid); | 
|  | 943 | rc = -EIO; | 
|  | 944 | } | 
|  | 945 | } | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 946 | out: | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 947 | spin_unlock_bh(&iucv_table_lock); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 948 | return rc; | 
|  | 949 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 950 | EXPORT_SYMBOL(iucv_path_connect); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 951 |  | 
|  | 952 | /** | 
|  | 953 | * iucv_path_quiesce: | 
|  | 954 | * @path: address of iucv path structure | 
|  | 955 | * @userdata: 16 bytes of data reflected to the communication partner | 
|  | 956 | * | 
|  | 957 | * This function temporarily suspends incoming messages on an IUCV path. | 
|  | 958 | * You can later reactivate the path by invoking the iucv_resume function. | 
|  | 959 | * | 
|  | 960 | * Returns the result from the CP IUCV call. | 
|  | 961 | */ | 
|  | 962 | int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]) | 
|  | 963 | { | 
|  | 964 | union iucv_param *parm; | 
|  | 965 | int rc; | 
|  | 966 |  | 
|  | 967 | local_bh_disable(); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 968 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | 
|  | 969 | rc = -EIO; | 
|  | 970 | goto out; | 
|  | 971 | } | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 972 | parm = iucv_param[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 973 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 974 | if (userdata) | 
|  | 975 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | 
|  | 976 | parm->ctrl.ippathid = path->pathid; | 
|  | 977 | rc = iucv_call_b2f0(IUCV_QUIESCE, parm); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 978 | out: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 979 | local_bh_enable(); | 
|  | 980 | return rc; | 
|  | 981 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 982 | EXPORT_SYMBOL(iucv_path_quiesce); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 983 |  | 
|  | 984 | /** | 
|  | 985 | * iucv_path_resume: | 
|  | 986 | * @path: address of iucv path structure | 
|  | 987 | * @userdata: 16 bytes of data reflected to the communication partner | 
|  | 988 | * | 
|  | 989 | * This function resumes incoming messages on an IUCV path that has | 
|  | 990 | * been stopped with iucv_path_quiesce. | 
|  | 991 | * | 
|  | 992 | * Returns the result from the CP IUCV call. | 
|  | 993 | */ | 
|  | 994 | int iucv_path_resume(struct iucv_path *path, u8 userdata[16]) | 
|  | 995 | { | 
|  | 996 | union iucv_param *parm; | 
|  | 997 | int rc; | 
|  | 998 |  | 
|  | 999 | local_bh_disable(); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1000 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | 
|  | 1001 | rc = -EIO; | 
|  | 1002 | goto out; | 
|  | 1003 | } | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 1004 | parm = iucv_param[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1005 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 1006 | if (userdata) | 
|  | 1007 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | 
|  | 1008 | parm->ctrl.ippathid = path->pathid; | 
|  | 1009 | rc = iucv_call_b2f0(IUCV_RESUME, parm); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1010 | out: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1011 | local_bh_enable(); | 
|  | 1012 | return rc; | 
|  | 1013 | } | 
|  | 1014 |  | 
|  | 1015 | /** | 
|  | 1016 | * iucv_path_sever | 
|  | 1017 | * @path: address of iucv path structure | 
|  | 1018 | * @userdata: 16 bytes of data reflected to the communication partner | 
|  | 1019 | * | 
|  | 1020 | * This function terminates an IUCV path. | 
|  | 1021 | * | 
|  | 1022 | * Returns the result from the CP IUCV call. | 
|  | 1023 | */ | 
|  | 1024 | int iucv_path_sever(struct iucv_path *path, u8 userdata[16]) | 
|  | 1025 | { | 
|  | 1026 | int rc; | 
|  | 1027 |  | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1028 | preempt_disable(); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1029 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | 
|  | 1030 | rc = -EIO; | 
|  | 1031 | goto out; | 
|  | 1032 | } | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1033 | if (iucv_active_cpu != smp_processor_id()) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1034 | spin_lock_bh(&iucv_table_lock); | 
|  | 1035 | rc = iucv_sever_pathid(path->pathid, userdata); | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 1036 | iucv_path_table[path->pathid] = NULL; | 
|  | 1037 | list_del_init(&path->list); | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1038 | if (iucv_active_cpu != smp_processor_id()) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1039 | spin_unlock_bh(&iucv_table_lock); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1040 | out: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1041 | preempt_enable(); | 
|  | 1042 | return rc; | 
|  | 1043 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 1044 | EXPORT_SYMBOL(iucv_path_sever); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1045 |  | 
|  | 1046 | /** | 
|  | 1047 | * iucv_message_purge | 
|  | 1048 | * @path: address of iucv path structure | 
|  | 1049 | * @msg: address of iucv msg structure | 
|  | 1050 | * @srccls: source class of message | 
|  | 1051 | * | 
|  | 1052 | * Cancels a message you have sent. | 
|  | 1053 | * | 
|  | 1054 | * Returns the result from the CP IUCV call. | 
|  | 1055 | */ | 
|  | 1056 | int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, | 
|  | 1057 | u32 srccls) | 
|  | 1058 | { | 
|  | 1059 | union iucv_param *parm; | 
|  | 1060 | int rc; | 
|  | 1061 |  | 
|  | 1062 | local_bh_disable(); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1063 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | 
|  | 1064 | rc = -EIO; | 
|  | 1065 | goto out; | 
|  | 1066 | } | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 1067 | parm = iucv_param[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1068 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 1069 | parm->purge.ippathid = path->pathid; | 
|  | 1070 | parm->purge.ipmsgid = msg->id; | 
|  | 1071 | parm->purge.ipsrccls = srccls; | 
|  | 1072 | parm->purge.ipflags1 = IUCV_IPSRCCLS | IUCV_IPFGMID | IUCV_IPFGPID; | 
|  | 1073 | rc = iucv_call_b2f0(IUCV_PURGE, parm); | 
|  | 1074 | if (!rc) { | 
|  | 1075 | msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8; | 
|  | 1076 | msg->tag = parm->purge.ipmsgtag; | 
|  | 1077 | } | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1078 | out: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1079 | local_bh_enable(); | 
|  | 1080 | return rc; | 
|  | 1081 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 1082 | EXPORT_SYMBOL(iucv_message_purge); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1083 |  | 
|  | 1084 | /** | 
| Hendrik Brueckner | 91d5d45 | 2008-12-25 13:38:58 +0100 | [diff] [blame] | 1085 | * iucv_message_receive_iprmdata | 
|  | 1086 | * @path: address of iucv path structure | 
|  | 1087 | * @msg: address of iucv msg structure | 
|  | 1088 | * @flags: how the message is received (IUCV_IPBUFLST) | 
|  | 1089 | * @buffer: address of data buffer or address of struct iucv_array | 
|  | 1090 | * @size: length of data buffer | 
|  | 1091 | * @residual: | 
|  | 1092 | * | 
|  | 1093 | * Internal function used by iucv_message_receive and __iucv_message_receive | 
|  | 1094 | * to receive RMDATA data stored in struct iucv_message. | 
|  | 1095 | */ | 
|  | 1096 | static int iucv_message_receive_iprmdata(struct iucv_path *path, | 
|  | 1097 | struct iucv_message *msg, | 
|  | 1098 | u8 flags, void *buffer, | 
|  | 1099 | size_t size, size_t *residual) | 
|  | 1100 | { | 
|  | 1101 | struct iucv_array *array; | 
|  | 1102 | u8 *rmmsg; | 
|  | 1103 | size_t copy; | 
|  | 1104 |  | 
|  | 1105 | /* | 
|  | 1106 | * Message is 8 bytes long and has been stored to the | 
|  | 1107 | * message descriptor itself. | 
|  | 1108 | */ | 
|  | 1109 | if (residual) | 
|  | 1110 | *residual = abs(size - 8); | 
|  | 1111 | rmmsg = msg->rmmsg; | 
|  | 1112 | if (flags & IUCV_IPBUFLST) { | 
|  | 1113 | /* Copy to struct iucv_array. */ | 
|  | 1114 | size = (size < 8) ? size : 8; | 
|  | 1115 | for (array = buffer; size > 0; array++) { | 
|  | 1116 | copy = min_t(size_t, size, array->length); | 
|  | 1117 | memcpy((u8 *)(addr_t) array->address, | 
|  | 1118 | rmmsg, copy); | 
|  | 1119 | rmmsg += copy; | 
|  | 1120 | size -= copy; | 
|  | 1121 | } | 
|  | 1122 | } else { | 
|  | 1123 | /* Copy to direct buffer. */ | 
|  | 1124 | memcpy(buffer, rmmsg, min_t(size_t, size, 8)); | 
|  | 1125 | } | 
|  | 1126 | return 0; | 
|  | 1127 | } | 
|  | 1128 |  | 
|  | 1129 | /** | 
|  | 1130 | * __iucv_message_receive | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1131 | * @path: address of iucv path structure | 
|  | 1132 | * @msg: address of iucv msg structure | 
|  | 1133 | * @flags: how the message is received (IUCV_IPBUFLST) | 
|  | 1134 | * @buffer: address of data buffer or address of struct iucv_array | 
|  | 1135 | * @size: length of data buffer | 
|  | 1136 | * @residual: | 
|  | 1137 | * | 
|  | 1138 | * This function receives messages that are being sent to you over | 
|  | 1139 | * established paths. This function will deal with RMDATA messages | 
|  | 1140 | * embedded in struct iucv_message as well. | 
|  | 1141 | * | 
| Hendrik Brueckner | 91d5d45 | 2008-12-25 13:38:58 +0100 | [diff] [blame] | 1142 | * Locking:	no locking | 
|  | 1143 | * | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1144 | * Returns the result from the CP IUCV call. | 
|  | 1145 | */ | 
| Hendrik Brueckner | 91d5d45 | 2008-12-25 13:38:58 +0100 | [diff] [blame] | 1146 | int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, | 
|  | 1147 | u8 flags, void *buffer, size_t size, size_t *residual) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1148 | { | 
|  | 1149 | union iucv_param *parm; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1150 | int rc; | 
|  | 1151 |  | 
| Hendrik Brueckner | 91d5d45 | 2008-12-25 13:38:58 +0100 | [diff] [blame] | 1152 | if (msg->flags & IUCV_IPRMDATA) | 
|  | 1153 | return iucv_message_receive_iprmdata(path, msg, flags, | 
|  | 1154 | buffer, size, residual); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1155 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | 
|  | 1156 | rc = -EIO; | 
|  | 1157 | goto out; | 
|  | 1158 | } | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 1159 | parm = iucv_param[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1160 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 1161 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; | 
|  | 1162 | parm->db.ipbfln1f = (u32) size; | 
|  | 1163 | parm->db.ipmsgid = msg->id; | 
|  | 1164 | parm->db.ippathid = path->pathid; | 
|  | 1165 | parm->db.iptrgcls = msg->class; | 
|  | 1166 | parm->db.ipflags1 = (flags | IUCV_IPFGPID | | 
|  | 1167 | IUCV_IPFGMID | IUCV_IPTRGCLS); | 
|  | 1168 | rc = iucv_call_b2f0(IUCV_RECEIVE, parm); | 
|  | 1169 | if (!rc || rc == 5) { | 
|  | 1170 | msg->flags = parm->db.ipflags1; | 
|  | 1171 | if (residual) | 
|  | 1172 | *residual = parm->db.ipbfln1f; | 
|  | 1173 | } | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1174 | out: | 
| Hendrik Brueckner | 91d5d45 | 2008-12-25 13:38:58 +0100 | [diff] [blame] | 1175 | return rc; | 
|  | 1176 | } | 
|  | 1177 | EXPORT_SYMBOL(__iucv_message_receive); | 
|  | 1178 |  | 
|  | 1179 | /** | 
|  | 1180 | * iucv_message_receive | 
|  | 1181 | * @path: address of iucv path structure | 
|  | 1182 | * @msg: address of iucv msg structure | 
|  | 1183 | * @flags: how the message is received (IUCV_IPBUFLST) | 
|  | 1184 | * @buffer: address of data buffer or address of struct iucv_array | 
|  | 1185 | * @size: length of data buffer | 
|  | 1186 | * @residual: | 
|  | 1187 | * | 
|  | 1188 | * This function receives messages that are being sent to you over | 
|  | 1189 | * established paths. This function will deal with RMDATA messages | 
|  | 1190 | * embedded in struct iucv_message as well. | 
|  | 1191 | * | 
|  | 1192 | * Locking:	local_bh_enable/local_bh_disable | 
|  | 1193 | * | 
|  | 1194 | * Returns the result from the CP IUCV call. | 
|  | 1195 | */ | 
|  | 1196 | int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, | 
|  | 1197 | u8 flags, void *buffer, size_t size, size_t *residual) | 
|  | 1198 | { | 
|  | 1199 | int rc; | 
|  | 1200 |  | 
|  | 1201 | if (msg->flags & IUCV_IPRMDATA) | 
|  | 1202 | return iucv_message_receive_iprmdata(path, msg, flags, | 
|  | 1203 | buffer, size, residual); | 
|  | 1204 | local_bh_disable(); | 
|  | 1205 | rc = __iucv_message_receive(path, msg, flags, buffer, size, residual); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1206 | local_bh_enable(); | 
|  | 1207 | return rc; | 
|  | 1208 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 1209 | EXPORT_SYMBOL(iucv_message_receive); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1210 |  | 
|  | 1211 | /** | 
|  | 1212 | * iucv_message_reject | 
|  | 1213 | * @path: address of iucv path structure | 
|  | 1214 | * @msg: address of iucv msg structure | 
|  | 1215 | * | 
|  | 1216 | * The reject function refuses a specified message. Between the time you | 
|  | 1217 | * are notified of a message and the time that you complete the message, | 
|  | 1218 | * the message may be rejected. | 
|  | 1219 | * | 
|  | 1220 | * Returns the result from the CP IUCV call. | 
|  | 1221 | */ | 
|  | 1222 | int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) | 
|  | 1223 | { | 
|  | 1224 | union iucv_param *parm; | 
|  | 1225 | int rc; | 
|  | 1226 |  | 
|  | 1227 | local_bh_disable(); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1228 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | 
|  | 1229 | rc = -EIO; | 
|  | 1230 | goto out; | 
|  | 1231 | } | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 1232 | parm = iucv_param[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1233 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 1234 | parm->db.ippathid = path->pathid; | 
|  | 1235 | parm->db.ipmsgid = msg->id; | 
|  | 1236 | parm->db.iptrgcls = msg->class; | 
|  | 1237 | parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID); | 
|  | 1238 | rc = iucv_call_b2f0(IUCV_REJECT, parm); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1239 | out: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1240 | local_bh_enable(); | 
|  | 1241 | return rc; | 
|  | 1242 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 1243 | EXPORT_SYMBOL(iucv_message_reject); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1244 |  | 
|  | 1245 | /** | 
|  | 1246 | * iucv_message_reply | 
|  | 1247 | * @path: address of iucv path structure | 
|  | 1248 | * @msg: address of iucv msg structure | 
|  | 1249 | * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) | 
|  | 1250 | * @reply: address of reply data buffer or address of struct iucv_array | 
|  | 1251 | * @size: length of reply data buffer | 
|  | 1252 | * | 
|  | 1253 | * This function responds to the two-way messages that you receive. You | 
|  | 1254 | * must identify completely the message to which you wish to reply. ie, | 
|  | 1255 | * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into | 
|  | 1256 | * the parameter list. | 
|  | 1257 | * | 
|  | 1258 | * Returns the result from the CP IUCV call. | 
|  | 1259 | */ | 
|  | 1260 | int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, | 
|  | 1261 | u8 flags, void *reply, size_t size) | 
|  | 1262 | { | 
|  | 1263 | union iucv_param *parm; | 
|  | 1264 | int rc; | 
|  | 1265 |  | 
|  | 1266 | local_bh_disable(); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1267 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | 
|  | 1268 | rc = -EIO; | 
|  | 1269 | goto out; | 
|  | 1270 | } | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 1271 | parm = iucv_param[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1272 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 1273 | if (flags & IUCV_IPRMDATA) { | 
|  | 1274 | parm->dpl.ippathid = path->pathid; | 
|  | 1275 | parm->dpl.ipflags1 = flags; | 
|  | 1276 | parm->dpl.ipmsgid = msg->id; | 
|  | 1277 | parm->dpl.iptrgcls = msg->class; | 
|  | 1278 | memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8)); | 
|  | 1279 | } else { | 
|  | 1280 | parm->db.ipbfadr1 = (u32)(addr_t) reply; | 
|  | 1281 | parm->db.ipbfln1f = (u32) size; | 
|  | 1282 | parm->db.ippathid = path->pathid; | 
|  | 1283 | parm->db.ipflags1 = flags; | 
|  | 1284 | parm->db.ipmsgid = msg->id; | 
|  | 1285 | parm->db.iptrgcls = msg->class; | 
|  | 1286 | } | 
|  | 1287 | rc = iucv_call_b2f0(IUCV_REPLY, parm); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1288 | out: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1289 | local_bh_enable(); | 
|  | 1290 | return rc; | 
|  | 1291 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 1292 | EXPORT_SYMBOL(iucv_message_reply); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1293 |  | 
|  | 1294 | /** | 
| Hendrik Brueckner | 91d5d45 | 2008-12-25 13:38:58 +0100 | [diff] [blame] | 1295 | * __iucv_message_send | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1296 | * @path: address of iucv path structure | 
|  | 1297 | * @msg: address of iucv msg structure | 
|  | 1298 | * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) | 
|  | 1299 | * @srccls: source class of message | 
|  | 1300 | * @buffer: address of send buffer or address of struct iucv_array | 
|  | 1301 | * @size: length of send buffer | 
|  | 1302 | * | 
|  | 1303 | * This function transmits data to another application. Data to be | 
|  | 1304 | * transmitted is in a buffer and this is a one-way message and the | 
|  | 1305 | * receiver will not reply to the message. | 
|  | 1306 | * | 
| Hendrik Brueckner | 91d5d45 | 2008-12-25 13:38:58 +0100 | [diff] [blame] | 1307 | * Locking:	no locking | 
|  | 1308 | * | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1309 | * Returns the result from the CP IUCV call. | 
|  | 1310 | */ | 
| Hendrik Brueckner | 91d5d45 | 2008-12-25 13:38:58 +0100 | [diff] [blame] | 1311 | int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1312 | u8 flags, u32 srccls, void *buffer, size_t size) | 
|  | 1313 | { | 
|  | 1314 | union iucv_param *parm; | 
|  | 1315 | int rc; | 
|  | 1316 |  | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1317 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | 
|  | 1318 | rc = -EIO; | 
|  | 1319 | goto out; | 
|  | 1320 | } | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 1321 | parm = iucv_param[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1322 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 1323 | if (flags & IUCV_IPRMDATA) { | 
|  | 1324 | /* Message of 8 bytes can be placed into the parameter list. */ | 
|  | 1325 | parm->dpl.ippathid = path->pathid; | 
|  | 1326 | parm->dpl.ipflags1 = flags | IUCV_IPNORPY; | 
|  | 1327 | parm->dpl.iptrgcls = msg->class; | 
|  | 1328 | parm->dpl.ipsrccls = srccls; | 
|  | 1329 | parm->dpl.ipmsgtag = msg->tag; | 
|  | 1330 | memcpy(parm->dpl.iprmmsg, buffer, 8); | 
|  | 1331 | } else { | 
|  | 1332 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; | 
|  | 1333 | parm->db.ipbfln1f = (u32) size; | 
|  | 1334 | parm->db.ippathid = path->pathid; | 
|  | 1335 | parm->db.ipflags1 = flags | IUCV_IPNORPY; | 
|  | 1336 | parm->db.iptrgcls = msg->class; | 
|  | 1337 | parm->db.ipsrccls = srccls; | 
|  | 1338 | parm->db.ipmsgtag = msg->tag; | 
|  | 1339 | } | 
|  | 1340 | rc = iucv_call_b2f0(IUCV_SEND, parm); | 
|  | 1341 | if (!rc) | 
|  | 1342 | msg->id = parm->db.ipmsgid; | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1343 | out: | 
| Hendrik Brueckner | 91d5d45 | 2008-12-25 13:38:58 +0100 | [diff] [blame] | 1344 | return rc; | 
|  | 1345 | } | 
|  | 1346 | EXPORT_SYMBOL(__iucv_message_send); | 
|  | 1347 |  | 
|  | 1348 | /** | 
|  | 1349 | * iucv_message_send | 
|  | 1350 | * @path: address of iucv path structure | 
|  | 1351 | * @msg: address of iucv msg structure | 
|  | 1352 | * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) | 
|  | 1353 | * @srccls: source class of message | 
|  | 1354 | * @buffer: address of send buffer or address of struct iucv_array | 
|  | 1355 | * @size: length of send buffer | 
|  | 1356 | * | 
|  | 1357 | * This function transmits data to another application. Data to be | 
|  | 1358 | * transmitted is in a buffer and this is a one-way message and the | 
|  | 1359 | * receiver will not reply to the message. | 
|  | 1360 | * | 
|  | 1361 | * Locking:	local_bh_enable/local_bh_disable | 
|  | 1362 | * | 
|  | 1363 | * Returns the result from the CP IUCV call. | 
|  | 1364 | */ | 
|  | 1365 | int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, | 
|  | 1366 | u8 flags, u32 srccls, void *buffer, size_t size) | 
|  | 1367 | { | 
|  | 1368 | int rc; | 
|  | 1369 |  | 
|  | 1370 | local_bh_disable(); | 
|  | 1371 | rc = __iucv_message_send(path, msg, flags, srccls, buffer, size); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1372 | local_bh_enable(); | 
|  | 1373 | return rc; | 
|  | 1374 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 1375 | EXPORT_SYMBOL(iucv_message_send); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1376 |  | 
|  | 1377 | /** | 
|  | 1378 | * iucv_message_send2way | 
|  | 1379 | * @path: address of iucv path structure | 
|  | 1380 | * @msg: address of iucv msg structure | 
|  | 1381 | * @flags: how the message is sent and the reply is received | 
|  | 1382 | *	   (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST) | 
|  | 1383 | * @srccls: source class of message | 
|  | 1384 | * @buffer: address of send buffer or address of struct iucv_array | 
|  | 1385 | * @size: length of send buffer | 
|  | 1386 | * @ansbuf: address of answer buffer or address of struct iucv_array | 
|  | 1387 | * @asize: size of reply buffer | 
|  | 1388 | * | 
|  | 1389 | * This function transmits data to another application. Data to be | 
|  | 1390 | * transmitted is in a buffer. The receiver of the send is expected to | 
|  | 1391 | * reply to the message and a buffer is provided into which IUCV moves | 
|  | 1392 | * the reply to this message. | 
|  | 1393 | * | 
|  | 1394 | * Returns the result from the CP IUCV call. | 
|  | 1395 | */ | 
|  | 1396 | int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, | 
|  | 1397 | u8 flags, u32 srccls, void *buffer, size_t size, | 
|  | 1398 | void *answer, size_t asize, size_t *residual) | 
|  | 1399 | { | 
|  | 1400 | union iucv_param *parm; | 
|  | 1401 | int rc; | 
|  | 1402 |  | 
|  | 1403 | local_bh_disable(); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1404 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | 
|  | 1405 | rc = -EIO; | 
|  | 1406 | goto out; | 
|  | 1407 | } | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 1408 | parm = iucv_param[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1409 | memset(parm, 0, sizeof(union iucv_param)); | 
|  | 1410 | if (flags & IUCV_IPRMDATA) { | 
|  | 1411 | parm->dpl.ippathid = path->pathid; | 
|  | 1412 | parm->dpl.ipflags1 = path->flags;	/* priority message */ | 
|  | 1413 | parm->dpl.iptrgcls = msg->class; | 
|  | 1414 | parm->dpl.ipsrccls = srccls; | 
|  | 1415 | parm->dpl.ipmsgtag = msg->tag; | 
|  | 1416 | parm->dpl.ipbfadr2 = (u32)(addr_t) answer; | 
|  | 1417 | parm->dpl.ipbfln2f = (u32) asize; | 
|  | 1418 | memcpy(parm->dpl.iprmmsg, buffer, 8); | 
|  | 1419 | } else { | 
|  | 1420 | parm->db.ippathid = path->pathid; | 
|  | 1421 | parm->db.ipflags1 = path->flags;	/* priority message */ | 
|  | 1422 | parm->db.iptrgcls = msg->class; | 
|  | 1423 | parm->db.ipsrccls = srccls; | 
|  | 1424 | parm->db.ipmsgtag = msg->tag; | 
|  | 1425 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; | 
|  | 1426 | parm->db.ipbfln1f = (u32) size; | 
|  | 1427 | parm->db.ipbfadr2 = (u32)(addr_t) answer; | 
|  | 1428 | parm->db.ipbfln2f = (u32) asize; | 
|  | 1429 | } | 
|  | 1430 | rc = iucv_call_b2f0(IUCV_SEND, parm); | 
|  | 1431 | if (!rc) | 
|  | 1432 | msg->id = parm->db.ipmsgid; | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 1433 | out: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1434 | local_bh_enable(); | 
|  | 1435 | return rc; | 
|  | 1436 | } | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 1437 | EXPORT_SYMBOL(iucv_message_send2way); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1438 |  | 
|  | 1439 | /** | 
|  | 1440 | * iucv_path_pending | 
|  | 1441 | * @data: Pointer to external interrupt buffer | 
|  | 1442 | * | 
|  | 1443 | * Process connection pending work item. Called from tasklet while holding | 
|  | 1444 | * iucv_table_lock. | 
|  | 1445 | */ | 
|  | 1446 | struct iucv_path_pending { | 
|  | 1447 | u16 ippathid; | 
|  | 1448 | u8  ipflags1; | 
|  | 1449 | u8  iptype; | 
|  | 1450 | u16 ipmsglim; | 
|  | 1451 | u16 res1; | 
|  | 1452 | u8  ipvmid[8]; | 
|  | 1453 | u8  ipuser[16]; | 
|  | 1454 | u32 res3; | 
|  | 1455 | u8  ippollfg; | 
|  | 1456 | u8  res4[3]; | 
|  | 1457 | } __attribute__ ((packed)); | 
|  | 1458 |  | 
|  | 1459 | static void iucv_path_pending(struct iucv_irq_data *data) | 
|  | 1460 | { | 
|  | 1461 | struct iucv_path_pending *ipp = (void *) data; | 
|  | 1462 | struct iucv_handler *handler; | 
|  | 1463 | struct iucv_path *path; | 
|  | 1464 | char *error; | 
|  | 1465 |  | 
|  | 1466 | BUG_ON(iucv_path_table[ipp->ippathid]); | 
|  | 1467 | /* New pathid, handler found. Create a new path struct. */ | 
|  | 1468 | error = iucv_error_no_memory; | 
|  | 1469 | path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC); | 
|  | 1470 | if (!path) | 
|  | 1471 | goto out_sever; | 
|  | 1472 | path->pathid = ipp->ippathid; | 
|  | 1473 | iucv_path_table[path->pathid] = path; | 
|  | 1474 | EBCASC(ipp->ipvmid, 8); | 
|  | 1475 |  | 
|  | 1476 | /* Call registered handler until one is found that wants the path. */ | 
|  | 1477 | list_for_each_entry(handler, &iucv_handler_list, list) { | 
|  | 1478 | if (!handler->path_pending) | 
|  | 1479 | continue; | 
|  | 1480 | /* | 
|  | 1481 | * Add path to handler to allow a call to iucv_path_sever | 
|  | 1482 | * inside the path_pending function. If the handler returns | 
|  | 1483 | * an error remove the path from the handler again. | 
|  | 1484 | */ | 
|  | 1485 | list_add(&path->list, &handler->paths); | 
|  | 1486 | path->handler = handler; | 
|  | 1487 | if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser)) | 
|  | 1488 | return; | 
|  | 1489 | list_del(&path->list); | 
|  | 1490 | path->handler = NULL; | 
|  | 1491 | } | 
|  | 1492 | /* No handler wanted the path. */ | 
|  | 1493 | iucv_path_table[path->pathid] = NULL; | 
|  | 1494 | iucv_path_free(path); | 
|  | 1495 | error = iucv_error_no_listener; | 
|  | 1496 | out_sever: | 
|  | 1497 | iucv_sever_pathid(ipp->ippathid, error); | 
|  | 1498 | } | 
|  | 1499 |  | 
|  | 1500 | /** | 
|  | 1501 | * iucv_path_complete | 
|  | 1502 | * @data: Pointer to external interrupt buffer | 
|  | 1503 | * | 
|  | 1504 | * Process connection complete work item. Called from tasklet while holding | 
|  | 1505 | * iucv_table_lock. | 
|  | 1506 | */ | 
|  | 1507 | struct iucv_path_complete { | 
|  | 1508 | u16 ippathid; | 
|  | 1509 | u8  ipflags1; | 
|  | 1510 | u8  iptype; | 
|  | 1511 | u16 ipmsglim; | 
|  | 1512 | u16 res1; | 
|  | 1513 | u8  res2[8]; | 
|  | 1514 | u8  ipuser[16]; | 
|  | 1515 | u32 res3; | 
|  | 1516 | u8  ippollfg; | 
|  | 1517 | u8  res4[3]; | 
|  | 1518 | } __attribute__ ((packed)); | 
|  | 1519 |  | 
|  | 1520 | static void iucv_path_complete(struct iucv_irq_data *data) | 
|  | 1521 | { | 
|  | 1522 | struct iucv_path_complete *ipc = (void *) data; | 
|  | 1523 | struct iucv_path *path = iucv_path_table[ipc->ippathid]; | 
|  | 1524 |  | 
| Hendrik Brueckner | b8942e3 | 2009-04-21 23:26:23 +0000 | [diff] [blame] | 1525 | if (path) | 
|  | 1526 | path->flags = ipc->ipflags1; | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1527 | if (path && path->handler && path->handler->path_complete) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1528 | path->handler->path_complete(path, ipc->ipuser); | 
|  | 1529 | } | 
|  | 1530 |  | 
|  | 1531 | /** | 
|  | 1532 | * iucv_path_severed | 
|  | 1533 | * @data: Pointer to external interrupt buffer | 
|  | 1534 | * | 
|  | 1535 | * Process connection severed work item. Called from tasklet while holding | 
|  | 1536 | * iucv_table_lock. | 
|  | 1537 | */ | 
|  | 1538 | struct iucv_path_severed { | 
|  | 1539 | u16 ippathid; | 
|  | 1540 | u8  res1; | 
|  | 1541 | u8  iptype; | 
|  | 1542 | u32 res2; | 
|  | 1543 | u8  res3[8]; | 
|  | 1544 | u8  ipuser[16]; | 
|  | 1545 | u32 res4; | 
|  | 1546 | u8  ippollfg; | 
|  | 1547 | u8  res5[3]; | 
|  | 1548 | } __attribute__ ((packed)); | 
|  | 1549 |  | 
|  | 1550 | static void iucv_path_severed(struct iucv_irq_data *data) | 
|  | 1551 | { | 
|  | 1552 | struct iucv_path_severed *ips = (void *) data; | 
|  | 1553 | struct iucv_path *path = iucv_path_table[ips->ippathid]; | 
|  | 1554 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1555 | if (!path || !path->handler)	/* Already severed */ | 
|  | 1556 | return; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1557 | if (path->handler->path_severed) | 
|  | 1558 | path->handler->path_severed(path, ips->ipuser); | 
|  | 1559 | else { | 
|  | 1560 | iucv_sever_pathid(path->pathid, NULL); | 
|  | 1561 | iucv_path_table[path->pathid] = NULL; | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 1562 | list_del(&path->list); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1563 | iucv_path_free(path); | 
|  | 1564 | } | 
|  | 1565 | } | 
|  | 1566 |  | 
|  | 1567 | /** | 
|  | 1568 | * iucv_path_quiesced | 
|  | 1569 | * @data: Pointer to external interrupt buffer | 
|  | 1570 | * | 
|  | 1571 | * Process connection quiesced work item. Called from tasklet while holding | 
|  | 1572 | * iucv_table_lock. | 
|  | 1573 | */ | 
|  | 1574 | struct iucv_path_quiesced { | 
|  | 1575 | u16 ippathid; | 
|  | 1576 | u8  res1; | 
|  | 1577 | u8  iptype; | 
|  | 1578 | u32 res2; | 
|  | 1579 | u8  res3[8]; | 
|  | 1580 | u8  ipuser[16]; | 
|  | 1581 | u32 res4; | 
|  | 1582 | u8  ippollfg; | 
|  | 1583 | u8  res5[3]; | 
|  | 1584 | } __attribute__ ((packed)); | 
|  | 1585 |  | 
|  | 1586 | static void iucv_path_quiesced(struct iucv_irq_data *data) | 
|  | 1587 | { | 
|  | 1588 | struct iucv_path_quiesced *ipq = (void *) data; | 
|  | 1589 | struct iucv_path *path = iucv_path_table[ipq->ippathid]; | 
|  | 1590 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1591 | if (path && path->handler && path->handler->path_quiesced) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1592 | path->handler->path_quiesced(path, ipq->ipuser); | 
|  | 1593 | } | 
|  | 1594 |  | 
|  | 1595 | /** | 
|  | 1596 | * iucv_path_resumed | 
|  | 1597 | * @data: Pointer to external interrupt buffer | 
|  | 1598 | * | 
|  | 1599 | * Process connection resumed work item. Called from tasklet while holding | 
|  | 1600 | * iucv_table_lock. | 
|  | 1601 | */ | 
|  | 1602 | struct iucv_path_resumed { | 
|  | 1603 | u16 ippathid; | 
|  | 1604 | u8  res1; | 
|  | 1605 | u8  iptype; | 
|  | 1606 | u32 res2; | 
|  | 1607 | u8  res3[8]; | 
|  | 1608 | u8  ipuser[16]; | 
|  | 1609 | u32 res4; | 
|  | 1610 | u8  ippollfg; | 
|  | 1611 | u8  res5[3]; | 
|  | 1612 | } __attribute__ ((packed)); | 
|  | 1613 |  | 
|  | 1614 | static void iucv_path_resumed(struct iucv_irq_data *data) | 
|  | 1615 | { | 
|  | 1616 | struct iucv_path_resumed *ipr = (void *) data; | 
|  | 1617 | struct iucv_path *path = iucv_path_table[ipr->ippathid]; | 
|  | 1618 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1619 | if (path && path->handler && path->handler->path_resumed) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1620 | path->handler->path_resumed(path, ipr->ipuser); | 
|  | 1621 | } | 
|  | 1622 |  | 
|  | 1623 | /** | 
|  | 1624 | * iucv_message_complete | 
|  | 1625 | * @data: Pointer to external interrupt buffer | 
|  | 1626 | * | 
|  | 1627 | * Process message complete work item. Called from tasklet while holding | 
|  | 1628 | * iucv_table_lock. | 
|  | 1629 | */ | 
|  | 1630 | struct iucv_message_complete { | 
|  | 1631 | u16 ippathid; | 
|  | 1632 | u8  ipflags1; | 
|  | 1633 | u8  iptype; | 
|  | 1634 | u32 ipmsgid; | 
|  | 1635 | u32 ipaudit; | 
|  | 1636 | u8  iprmmsg[8]; | 
|  | 1637 | u32 ipsrccls; | 
|  | 1638 | u32 ipmsgtag; | 
|  | 1639 | u32 res; | 
|  | 1640 | u32 ipbfln2f; | 
|  | 1641 | u8  ippollfg; | 
|  | 1642 | u8  res2[3]; | 
|  | 1643 | } __attribute__ ((packed)); | 
|  | 1644 |  | 
|  | 1645 | static void iucv_message_complete(struct iucv_irq_data *data) | 
|  | 1646 | { | 
|  | 1647 | struct iucv_message_complete *imc = (void *) data; | 
|  | 1648 | struct iucv_path *path = iucv_path_table[imc->ippathid]; | 
|  | 1649 | struct iucv_message msg; | 
|  | 1650 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1651 | if (path && path->handler && path->handler->message_complete) { | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1652 | msg.flags = imc->ipflags1; | 
|  | 1653 | msg.id = imc->ipmsgid; | 
|  | 1654 | msg.audit = imc->ipaudit; | 
|  | 1655 | memcpy(msg.rmmsg, imc->iprmmsg, 8); | 
|  | 1656 | msg.class = imc->ipsrccls; | 
|  | 1657 | msg.tag = imc->ipmsgtag; | 
|  | 1658 | msg.length = imc->ipbfln2f; | 
|  | 1659 | path->handler->message_complete(path, &msg); | 
|  | 1660 | } | 
|  | 1661 | } | 
|  | 1662 |  | 
|  | 1663 | /** | 
|  | 1664 | * iucv_message_pending | 
|  | 1665 | * @data: Pointer to external interrupt buffer | 
|  | 1666 | * | 
|  | 1667 | * Process message pending work item. Called from tasklet while holding | 
|  | 1668 | * iucv_table_lock. | 
|  | 1669 | */ | 
|  | 1670 | struct iucv_message_pending { | 
|  | 1671 | u16 ippathid; | 
|  | 1672 | u8  ipflags1; | 
|  | 1673 | u8  iptype; | 
|  | 1674 | u32 ipmsgid; | 
|  | 1675 | u32 iptrgcls; | 
|  | 1676 | union { | 
|  | 1677 | u32 iprmmsg1_u32; | 
|  | 1678 | u8  iprmmsg1[4]; | 
|  | 1679 | } ln1msg1; | 
|  | 1680 | union { | 
|  | 1681 | u32 ipbfln1f; | 
|  | 1682 | u8  iprmmsg2[4]; | 
|  | 1683 | } ln1msg2; | 
|  | 1684 | u32 res1[3]; | 
|  | 1685 | u32 ipbfln2f; | 
|  | 1686 | u8  ippollfg; | 
|  | 1687 | u8  res2[3]; | 
|  | 1688 | } __attribute__ ((packed)); | 
|  | 1689 |  | 
|  | 1690 | static void iucv_message_pending(struct iucv_irq_data *data) | 
|  | 1691 | { | 
|  | 1692 | struct iucv_message_pending *imp = (void *) data; | 
|  | 1693 | struct iucv_path *path = iucv_path_table[imp->ippathid]; | 
|  | 1694 | struct iucv_message msg; | 
|  | 1695 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1696 | if (path && path->handler && path->handler->message_pending) { | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1697 | msg.flags = imp->ipflags1; | 
|  | 1698 | msg.id = imp->ipmsgid; | 
|  | 1699 | msg.class = imp->iptrgcls; | 
|  | 1700 | if (imp->ipflags1 & IUCV_IPRMDATA) { | 
|  | 1701 | memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8); | 
|  | 1702 | msg.length = 8; | 
|  | 1703 | } else | 
|  | 1704 | msg.length = imp->ln1msg2.ipbfln1f; | 
|  | 1705 | msg.reply_size = imp->ipbfln2f; | 
|  | 1706 | path->handler->message_pending(path, &msg); | 
|  | 1707 | } | 
|  | 1708 | } | 
|  | 1709 |  | 
|  | 1710 | /** | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1711 | * iucv_tasklet_fn: | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1712 | * | 
|  | 1713 | * This tasklet loops over the queue of irq buffers created by | 
|  | 1714 | * iucv_external_interrupt, calls the appropriate action handler | 
|  | 1715 | * and then frees the buffer. | 
|  | 1716 | */ | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1717 | static void iucv_tasklet_fn(unsigned long ignored) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1718 | { | 
|  | 1719 | typedef void iucv_irq_fn(struct iucv_irq_data *); | 
|  | 1720 | static iucv_irq_fn *irq_fn[] = { | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1721 | [0x02] = iucv_path_complete, | 
|  | 1722 | [0x03] = iucv_path_severed, | 
|  | 1723 | [0x04] = iucv_path_quiesced, | 
|  | 1724 | [0x05] = iucv_path_resumed, | 
|  | 1725 | [0x06] = iucv_message_complete, | 
|  | 1726 | [0x07] = iucv_message_complete, | 
|  | 1727 | [0x08] = iucv_message_pending, | 
|  | 1728 | [0x09] = iucv_message_pending, | 
|  | 1729 | }; | 
| Denis Cheng | b5e7833 | 2007-12-07 00:51:45 -0800 | [diff] [blame] | 1730 | LIST_HEAD(task_queue); | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1731 | struct iucv_irq_list *p, *n; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1732 |  | 
|  | 1733 | /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ | 
| Ursula Braun | 13fdc9a | 2007-07-14 19:03:41 -0700 | [diff] [blame] | 1734 | if (!spin_trylock(&iucv_table_lock)) { | 
|  | 1735 | tasklet_schedule(&iucv_tasklet); | 
|  | 1736 | return; | 
|  | 1737 | } | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1738 | iucv_active_cpu = smp_processor_id(); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1739 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1740 | spin_lock_irq(&iucv_queue_lock); | 
|  | 1741 | list_splice_init(&iucv_task_queue, &task_queue); | 
|  | 1742 | spin_unlock_irq(&iucv_queue_lock); | 
|  | 1743 |  | 
|  | 1744 | list_for_each_entry_safe(p, n, &task_queue, list) { | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1745 | list_del_init(&p->list); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1746 | irq_fn[p->data.iptype](&p->data); | 
|  | 1747 | kfree(p); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1748 | } | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1749 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1750 | iucv_active_cpu = -1; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1751 | spin_unlock(&iucv_table_lock); | 
|  | 1752 | } | 
|  | 1753 |  | 
|  | 1754 | /** | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1755 | * iucv_work_fn: | 
|  | 1756 | * | 
|  | 1757 | * This work function loops over the queue of path pending irq blocks | 
|  | 1758 | * created by iucv_external_interrupt, calls the appropriate action | 
|  | 1759 | * handler and then frees the buffer. | 
|  | 1760 | */ | 
|  | 1761 | static void iucv_work_fn(struct work_struct *work) | 
|  | 1762 | { | 
|  | 1763 | typedef void iucv_irq_fn(struct iucv_irq_data *); | 
| Denis Cheng | b5e7833 | 2007-12-07 00:51:45 -0800 | [diff] [blame] | 1764 | LIST_HEAD(work_queue); | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1765 | struct iucv_irq_list *p, *n; | 
|  | 1766 |  | 
|  | 1767 | /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ | 
|  | 1768 | spin_lock_bh(&iucv_table_lock); | 
|  | 1769 | iucv_active_cpu = smp_processor_id(); | 
|  | 1770 |  | 
|  | 1771 | spin_lock_irq(&iucv_queue_lock); | 
|  | 1772 | list_splice_init(&iucv_work_queue, &work_queue); | 
|  | 1773 | spin_unlock_irq(&iucv_queue_lock); | 
|  | 1774 |  | 
|  | 1775 | iucv_cleanup_queue(); | 
|  | 1776 | list_for_each_entry_safe(p, n, &work_queue, list) { | 
|  | 1777 | list_del_init(&p->list); | 
|  | 1778 | iucv_path_pending(&p->data); | 
|  | 1779 | kfree(p); | 
|  | 1780 | } | 
|  | 1781 |  | 
|  | 1782 | iucv_active_cpu = -1; | 
|  | 1783 | spin_unlock_bh(&iucv_table_lock); | 
|  | 1784 | } | 
|  | 1785 |  | 
|  | 1786 | /** | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1787 | * iucv_external_interrupt | 
|  | 1788 | * @code: irq code | 
|  | 1789 | * | 
|  | 1790 | * Handles external interrupts coming in from CP. | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1791 | * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1792 | */ | 
|  | 1793 | static void iucv_external_interrupt(u16 code) | 
|  | 1794 | { | 
|  | 1795 | struct iucv_irq_data *p; | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1796 | struct iucv_irq_list *work; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1797 |  | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 1798 | p = iucv_irq_data[smp_processor_id()]; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1799 | if (p->ippathid >= iucv_max_pathid) { | 
| Ursula Braun | c2b4afd | 2008-07-14 09:59:29 +0200 | [diff] [blame] | 1800 | WARN_ON(p->ippathid >= iucv_max_pathid); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1801 | iucv_sever_pathid(p->ippathid, iucv_error_no_listener); | 
|  | 1802 | return; | 
|  | 1803 | } | 
| Ursula Braun | c2b4afd | 2008-07-14 09:59:29 +0200 | [diff] [blame] | 1804 | BUG_ON(p->iptype  < 0x01 || p->iptype > 0x09); | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1805 | work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1806 | if (!work) { | 
| Ursula Braun | 8f7c502 | 2008-12-25 13:39:47 +0100 | [diff] [blame] | 1807 | pr_warning("iucv_external_interrupt: out of memory\n"); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1808 | return; | 
|  | 1809 | } | 
|  | 1810 | memcpy(&work->data, p, sizeof(work->data)); | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 1811 | spin_lock(&iucv_queue_lock); | 
|  | 1812 | if (p->iptype == 0x01) { | 
|  | 1813 | /* Path pending interrupt. */ | 
|  | 1814 | list_add_tail(&work->list, &iucv_work_queue); | 
|  | 1815 | schedule_work(&iucv_work); | 
|  | 1816 | } else { | 
|  | 1817 | /* The other interrupts. */ | 
|  | 1818 | list_add_tail(&work->list, &iucv_task_queue); | 
|  | 1819 | tasklet_schedule(&iucv_tasklet); | 
|  | 1820 | } | 
|  | 1821 | spin_unlock(&iucv_queue_lock); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1822 | } | 
|  | 1823 |  | 
| Ursula Braun | 672e405 | 2009-06-16 10:30:42 +0200 | [diff] [blame] | 1824 | static int iucv_pm_prepare(struct device *dev) | 
|  | 1825 | { | 
|  | 1826 | int rc = 0; | 
|  | 1827 |  | 
|  | 1828 | #ifdef CONFIG_PM_DEBUG | 
|  | 1829 | printk(KERN_INFO "iucv_pm_prepare\n"); | 
|  | 1830 | #endif | 
|  | 1831 | if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) | 
|  | 1832 | rc = dev->driver->pm->prepare(dev); | 
|  | 1833 | return rc; | 
|  | 1834 | } | 
|  | 1835 |  | 
|  | 1836 | static void iucv_pm_complete(struct device *dev) | 
|  | 1837 | { | 
|  | 1838 | #ifdef CONFIG_PM_DEBUG | 
|  | 1839 | printk(KERN_INFO "iucv_pm_complete\n"); | 
|  | 1840 | #endif | 
|  | 1841 | if (dev->driver && dev->driver->pm && dev->driver->pm->complete) | 
|  | 1842 | dev->driver->pm->complete(dev); | 
|  | 1843 | } | 
|  | 1844 |  | 
|  | 1845 | /** | 
|  | 1846 | * iucv_path_table_empty() - determine if iucv path table is empty | 
|  | 1847 | * | 
|  | 1848 | * Returns 0 if there are still iucv pathes defined | 
|  | 1849 | *	   1 if there are no iucv pathes defined | 
|  | 1850 | */ | 
|  | 1851 | int iucv_path_table_empty(void) | 
|  | 1852 | { | 
|  | 1853 | int i; | 
|  | 1854 |  | 
|  | 1855 | for (i = 0; i < iucv_max_pathid; i++) { | 
|  | 1856 | if (iucv_path_table[i]) | 
|  | 1857 | return 0; | 
|  | 1858 | } | 
|  | 1859 | return 1; | 
|  | 1860 | } | 
|  | 1861 |  | 
|  | 1862 | /** | 
|  | 1863 | * iucv_pm_freeze() - Freeze PM callback | 
|  | 1864 | * @dev:	iucv-based device | 
|  | 1865 | * | 
|  | 1866 | * disable iucv interrupts | 
|  | 1867 | * invoke callback function of the iucv-based driver | 
|  | 1868 | * shut down iucv, if no iucv-pathes are established anymore | 
|  | 1869 | */ | 
|  | 1870 | static int iucv_pm_freeze(struct device *dev) | 
|  | 1871 | { | 
|  | 1872 | int cpu; | 
|  | 1873 | int rc = 0; | 
|  | 1874 |  | 
|  | 1875 | #ifdef CONFIG_PM_DEBUG | 
|  | 1876 | printk(KERN_WARNING "iucv_pm_freeze\n"); | 
|  | 1877 | #endif | 
|  | 1878 | for_each_cpu_mask_nr(cpu, iucv_irq_cpumask) | 
|  | 1879 | smp_call_function_single(cpu, iucv_block_cpu_almost, NULL, 1); | 
|  | 1880 | if (dev->driver && dev->driver->pm && dev->driver->pm->freeze) | 
|  | 1881 | rc = dev->driver->pm->freeze(dev); | 
|  | 1882 | if (iucv_path_table_empty()) | 
|  | 1883 | iucv_disable(); | 
|  | 1884 | return rc; | 
|  | 1885 | } | 
|  | 1886 |  | 
|  | 1887 | /** | 
|  | 1888 | * iucv_pm_thaw() - Thaw PM callback | 
|  | 1889 | * @dev:	iucv-based device | 
|  | 1890 | * | 
|  | 1891 | * make iucv ready for use again: allocate path table, declare interrupt buffers | 
|  | 1892 | *				  and enable iucv interrupts | 
|  | 1893 | * invoke callback function of the iucv-based driver | 
|  | 1894 | */ | 
|  | 1895 | static int iucv_pm_thaw(struct device *dev) | 
|  | 1896 | { | 
|  | 1897 | int rc = 0; | 
|  | 1898 |  | 
|  | 1899 | #ifdef CONFIG_PM_DEBUG | 
|  | 1900 | printk(KERN_WARNING "iucv_pm_thaw\n"); | 
|  | 1901 | #endif | 
|  | 1902 | if (!iucv_path_table) { | 
|  | 1903 | rc = iucv_enable(); | 
|  | 1904 | if (rc) | 
|  | 1905 | goto out; | 
|  | 1906 | } | 
|  | 1907 | if (cpus_empty(iucv_irq_cpumask)) { | 
|  | 1908 | if (iucv_nonsmp_handler) | 
|  | 1909 | /* enable interrupts on one cpu */ | 
|  | 1910 | iucv_allow_cpu(NULL); | 
|  | 1911 | else | 
|  | 1912 | /* enable interrupts on all cpus */ | 
|  | 1913 | iucv_setmask_mp(); | 
|  | 1914 | } | 
|  | 1915 | if (dev->driver && dev->driver->pm && dev->driver->pm->thaw) | 
|  | 1916 | rc = dev->driver->pm->thaw(dev); | 
|  | 1917 | out: | 
|  | 1918 | return rc; | 
|  | 1919 | } | 
|  | 1920 |  | 
|  | 1921 | /** | 
|  | 1922 | * iucv_pm_restore() - Restore PM callback | 
|  | 1923 | * @dev:	iucv-based device | 
|  | 1924 | * | 
|  | 1925 | * make iucv ready for use again: allocate path table, declare interrupt buffers | 
|  | 1926 | *				  and enable iucv interrupts | 
|  | 1927 | * invoke callback function of the iucv-based driver | 
|  | 1928 | */ | 
|  | 1929 | static int iucv_pm_restore(struct device *dev) | 
|  | 1930 | { | 
|  | 1931 | int rc = 0; | 
|  | 1932 |  | 
|  | 1933 | #ifdef CONFIG_PM_DEBUG | 
|  | 1934 | printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table); | 
|  | 1935 | #endif | 
|  | 1936 | if (cpus_empty(iucv_irq_cpumask)) { | 
|  | 1937 | rc = iucv_query_maxconn(); | 
|  | 1938 | rc = iucv_enable(); | 
|  | 1939 | if (rc) | 
|  | 1940 | goto out; | 
|  | 1941 | } | 
|  | 1942 | if (dev->driver && dev->driver->pm && dev->driver->pm->restore) | 
|  | 1943 | rc = dev->driver->pm->restore(dev); | 
|  | 1944 | out: | 
|  | 1945 | return rc; | 
|  | 1946 | } | 
|  | 1947 |  | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1948 | /** | 
|  | 1949 | * iucv_init | 
|  | 1950 | * | 
|  | 1951 | * Allocates and initializes various data structures. | 
|  | 1952 | */ | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 1953 | static int __init iucv_init(void) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1954 | { | 
|  | 1955 | int rc; | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 1956 | int cpu; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1957 |  | 
|  | 1958 | if (!MACHINE_IS_VM) { | 
|  | 1959 | rc = -EPROTONOSUPPORT; | 
|  | 1960 | goto out; | 
|  | 1961 | } | 
|  | 1962 | rc = iucv_query_maxconn(); | 
|  | 1963 | if (rc) | 
|  | 1964 | goto out; | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 1965 | rc = register_external_interrupt(0x4000, iucv_external_interrupt); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1966 | if (rc) | 
|  | 1967 | goto out; | 
| Mark McLoughlin | 035da16 | 2008-12-15 12:58:29 +0000 | [diff] [blame] | 1968 | iucv_root = root_device_register("iucv"); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1969 | if (IS_ERR(iucv_root)) { | 
|  | 1970 | rc = PTR_ERR(iucv_root); | 
| Cornelia Huck | 2d7bf36 | 2008-04-10 02:12:45 -0700 | [diff] [blame] | 1971 | goto out_int; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1972 | } | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 1973 |  | 
|  | 1974 | for_each_online_cpu(cpu) { | 
|  | 1975 | /* Note: GFP_DMA used to get memory below 2G */ | 
|  | 1976 | iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), | 
|  | 1977 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 
|  | 1978 | if (!iucv_irq_data[cpu]) { | 
|  | 1979 | rc = -ENOMEM; | 
|  | 1980 | goto out_free; | 
|  | 1981 | } | 
|  | 1982 |  | 
|  | 1983 | /* Allocate parameter blocks. */ | 
|  | 1984 | iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), | 
|  | 1985 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 
|  | 1986 | if (!iucv_param[cpu]) { | 
|  | 1987 | rc = -ENOMEM; | 
|  | 1988 | goto out_free; | 
|  | 1989 | } | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 1990 | iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), | 
|  | 1991 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 
|  | 1992 | if (!iucv_param_irq[cpu]) { | 
|  | 1993 | rc = -ENOMEM; | 
|  | 1994 | goto out_free; | 
|  | 1995 | } | 
|  | 1996 |  | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 1997 | } | 
| Cornelia Huck | 2d7bf36 | 2008-04-10 02:12:45 -0700 | [diff] [blame] | 1998 | rc = register_hotcpu_notifier(&iucv_cpu_notifier); | 
|  | 1999 | if (rc) | 
|  | 2000 | goto out_free; | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 2001 | rc = register_reboot_notifier(&iucv_reboot_notifier); | 
|  | 2002 | if (rc) | 
|  | 2003 | goto out_cpu; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 2004 | ASCEBC(iucv_error_no_listener, 16); | 
|  | 2005 | ASCEBC(iucv_error_no_memory, 16); | 
|  | 2006 | ASCEBC(iucv_error_pathid, 16); | 
|  | 2007 | iucv_available = 1; | 
| Cornelia Huck | 2d7bf36 | 2008-04-10 02:12:45 -0700 | [diff] [blame] | 2008 | rc = bus_register(&iucv_bus); | 
|  | 2009 | if (rc) | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 2010 | goto out_reboot; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 2011 | return 0; | 
|  | 2012 |  | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 2013 | out_reboot: | 
|  | 2014 | unregister_reboot_notifier(&iucv_reboot_notifier); | 
| Cornelia Huck | 2d7bf36 | 2008-04-10 02:12:45 -0700 | [diff] [blame] | 2015 | out_cpu: | 
|  | 2016 | unregister_hotcpu_notifier(&iucv_cpu_notifier); | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 2017 | out_free: | 
|  | 2018 | for_each_possible_cpu(cpu) { | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 2019 | kfree(iucv_param_irq[cpu]); | 
|  | 2020 | iucv_param_irq[cpu] = NULL; | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 2021 | kfree(iucv_param[cpu]); | 
|  | 2022 | iucv_param[cpu] = NULL; | 
|  | 2023 | kfree(iucv_irq_data[cpu]); | 
|  | 2024 | iucv_irq_data[cpu] = NULL; | 
|  | 2025 | } | 
| Mark McLoughlin | 035da16 | 2008-12-15 12:58:29 +0000 | [diff] [blame] | 2026 | root_device_unregister(iucv_root); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 2027 | out_int: | 
|  | 2028 | unregister_external_interrupt(0x4000, iucv_external_interrupt); | 
|  | 2029 | out: | 
|  | 2030 | return rc; | 
|  | 2031 | } | 
|  | 2032 |  | 
|  | 2033 | /** | 
|  | 2034 | * iucv_exit | 
|  | 2035 | * | 
|  | 2036 | * Frees everything allocated from iucv_init. | 
|  | 2037 | */ | 
| Heiko Carstens | da99f05 | 2007-05-04 12:23:27 -0700 | [diff] [blame] | 2038 | static void __exit iucv_exit(void) | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 2039 | { | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 2040 | struct iucv_irq_list *p, *n; | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 2041 | int cpu; | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 2042 |  | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 2043 | spin_lock_irq(&iucv_queue_lock); | 
|  | 2044 | list_for_each_entry_safe(p, n, &iucv_task_queue, list) | 
|  | 2045 | kfree(p); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 2046 | list_for_each_entry_safe(p, n, &iucv_work_queue, list) | 
|  | 2047 | kfree(p); | 
| Martin Schwidefsky | 04b090d | 2007-04-28 23:03:59 -0700 | [diff] [blame] | 2048 | spin_unlock_irq(&iucv_queue_lock); | 
| Ursula Braun | 6c00596 | 2009-06-16 10:30:41 +0200 | [diff] [blame] | 2049 | unregister_reboot_notifier(&iucv_reboot_notifier); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 2050 | unregister_hotcpu_notifier(&iucv_cpu_notifier); | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 2051 | for_each_possible_cpu(cpu) { | 
| Ursula Braun | 42e1b4c | 2009-04-21 23:26:20 +0000 | [diff] [blame] | 2052 | kfree(iucv_param_irq[cpu]); | 
|  | 2053 | iucv_param_irq[cpu] = NULL; | 
| Christoph Lameter | 70cf5035 | 2007-11-20 11:13:38 +0100 | [diff] [blame] | 2054 | kfree(iucv_param[cpu]); | 
|  | 2055 | iucv_param[cpu] = NULL; | 
|  | 2056 | kfree(iucv_irq_data[cpu]); | 
|  | 2057 | iucv_irq_data[cpu] = NULL; | 
|  | 2058 | } | 
| Mark McLoughlin | 035da16 | 2008-12-15 12:58:29 +0000 | [diff] [blame] | 2059 | root_device_unregister(iucv_root); | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 2060 | bus_unregister(&iucv_bus); | 
|  | 2061 | unregister_external_interrupt(0x4000, iucv_external_interrupt); | 
|  | 2062 | } | 
|  | 2063 |  | 
|  | 2064 | subsys_initcall(iucv_init); | 
|  | 2065 | module_exit(iucv_exit); | 
|  | 2066 |  | 
| Martin Schwidefsky | 2356f4c | 2007-02-08 13:37:42 -0800 | [diff] [blame] | 2067 | MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); | 
|  | 2068 | MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver"); | 
|  | 2069 | MODULE_LICENSE("GPL"); |