| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1 | /* drivers/usb/function/diag.c |
| 2 | * |
| 3 | * Diag Function Device - Route DIAG frames between SMD and USB |
| 4 | * |
| 5 | * Copyright (C) 2007 Google, Inc. |
| 6 | * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. |
| 7 | * Author: Brian Swetland <swetland@google.com> |
| 8 | * |
| 9 | * This software is licensed under the terms of the GNU General Public |
| 10 | * License version 2, as published by the Free Software Foundation, and |
| 11 | * may be copied, distributed, and modified under those terms. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | * GNU General Public License for more details. |
| 17 | * |
| 18 | */ |
| 19 | |
| 20 | #include <linux/init.h> |
| 21 | #include <linux/module.h> |
| 22 | #include <linux/kernel.h> |
| 23 | #include <linux/platform_device.h> |
| 24 | #include <linux/workqueue.h> |
| 25 | #include <linux/err.h> |
| 26 | |
| 27 | #include <mach/msm_smd.h> |
| 28 | #include <mach/usbdiag.h> |
| 29 | |
| 30 | #include "usb_function.h" |
| 31 | |
| 32 | #define WRITE_COMPLETE 0 |
| 33 | #define READ_COMPLETE 0 |
| 34 | static struct usb_interface_descriptor intf_desc = { |
| 35 | .bLength = sizeof intf_desc, |
| 36 | .bDescriptorType = USB_DT_INTERFACE, |
| 37 | .bNumEndpoints = 2, |
| 38 | .bInterfaceClass = 0xFF, |
| 39 | .bInterfaceSubClass = 0xFF, |
| 40 | .bInterfaceProtocol = 0xFF, |
| 41 | }; |
| 42 | |
| 43 | static struct usb_endpoint_descriptor hs_bulk_in_desc = { |
| 44 | .bLength = USB_DT_ENDPOINT_SIZE, |
| 45 | .bDescriptorType = USB_DT_ENDPOINT, |
| 46 | .bEndpointAddress = USB_DIR_IN, |
| 47 | .bmAttributes = USB_ENDPOINT_XFER_BULK, |
| 48 | .wMaxPacketSize = __constant_cpu_to_le16(512), |
| 49 | .bInterval = 0, |
| 50 | }; |
| 51 | static struct usb_endpoint_descriptor fs_bulk_in_desc = { |
| 52 | .bLength = USB_DT_ENDPOINT_SIZE, |
| 53 | .bDescriptorType = USB_DT_ENDPOINT, |
| 54 | .bEndpointAddress = USB_DIR_IN, |
| 55 | .bmAttributes = USB_ENDPOINT_XFER_BULK, |
| 56 | .wMaxPacketSize = __constant_cpu_to_le16(64), |
| 57 | .bInterval = 0, |
| 58 | }; |
| 59 | |
| 60 | static struct usb_endpoint_descriptor hs_bulk_out_desc = { |
| 61 | .bLength = USB_DT_ENDPOINT_SIZE, |
| 62 | .bDescriptorType = USB_DT_ENDPOINT, |
| 63 | .bEndpointAddress = USB_DIR_OUT, |
| 64 | .bmAttributes = USB_ENDPOINT_XFER_BULK, |
| 65 | .wMaxPacketSize = __constant_cpu_to_le16(512), |
| 66 | .bInterval = 0, |
| 67 | }; |
| 68 | |
| 69 | static struct usb_endpoint_descriptor fs_bulk_out_desc = { |
| 70 | .bLength = USB_DT_ENDPOINT_SIZE, |
| 71 | .bDescriptorType = USB_DT_ENDPOINT, |
| 72 | .bEndpointAddress = USB_DIR_OUT, |
| 73 | .bmAttributes = USB_ENDPOINT_XFER_BULK, |
| 74 | .wMaxPacketSize = __constant_cpu_to_le16(64), |
| 75 | .bInterval = 0, |
| 76 | }; |
| 77 | |
| 78 | /* list of requests */ |
| 79 | struct diag_req_entry { |
| 80 | struct list_head re_entry; |
| 81 | struct usb_request *usb_req; |
| 82 | void *diag_request; |
| 83 | }; |
| 84 | struct diag_context { |
| 85 | struct usb_endpoint *epout; |
| 86 | struct usb_endpoint *epin; |
| 87 | spinlock_t dev_lock; |
| 88 | /* linked list of read requets*/ |
| 89 | struct list_head dev_read_req_list; |
| 90 | /* linked list of write requets*/ |
| 91 | struct list_head dev_write_req_list; |
| 92 | struct diag_operations *operations; |
| 93 | struct workqueue_struct *diag_wq; |
| 94 | struct work_struct usb_config_work; |
| 95 | unsigned configured; |
| 96 | unsigned bound; |
| 97 | int diag_opened; |
| 98 | }; |
| 99 | |
| 100 | static struct usb_function usb_func_diag; |
| 101 | static struct diag_context _context; |
| 102 | static void diag_write_complete(struct usb_endpoint *, |
| 103 | struct usb_request *); |
| 104 | static struct diag_req_entry *diag_alloc_req_entry(struct usb_endpoint *, |
| 105 | unsigned len, gfp_t); |
| 106 | static void diag_free_req_entry(struct usb_endpoint *, struct diag_req_entry *); |
| 107 | static void diag_read_complete(struct usb_endpoint *, struct usb_request *); |
| 108 | |
| 109 | |
| 110 | static void diag_unbind(void *context) |
| 111 | { |
| 112 | |
| 113 | struct diag_context *ctxt = context; |
| 114 | |
| 115 | if (!ctxt) |
| 116 | return; |
| 117 | if (!ctxt->bound) |
| 118 | return; |
| 119 | if (ctxt->epin) { |
| 120 | usb_ept_fifo_flush(ctxt->epin); |
| 121 | usb_ept_enable(ctxt->epin, 0); |
| 122 | usb_free_endpoint(ctxt->epin); |
| 123 | } |
| 124 | if (ctxt->epout) { |
| 125 | usb_ept_fifo_flush(ctxt->epout); |
| 126 | usb_ept_enable(ctxt->epout, 0); |
| 127 | usb_free_endpoint(ctxt->epout); |
| 128 | } |
| 129 | ctxt->bound = 0; |
| 130 | } |
| 131 | static void diag_bind(void *context) |
| 132 | { |
| 133 | struct diag_context *ctxt = context; |
| 134 | |
| 135 | if (!ctxt) |
| 136 | return; |
| 137 | |
| 138 | intf_desc.bInterfaceNumber = |
| 139 | usb_msm_get_next_ifc_number(&usb_func_diag); |
| 140 | |
| 141 | ctxt->epin = usb_alloc_endpoint(USB_DIR_IN); |
| 142 | if (ctxt->epin) { |
| 143 | hs_bulk_in_desc.bEndpointAddress = |
| 144 | USB_DIR_IN | ctxt->epin->num; |
| 145 | fs_bulk_in_desc.bEndpointAddress = |
| 146 | USB_DIR_IN | ctxt->epin->num; |
| 147 | } |
| 148 | |
| 149 | ctxt->epout = usb_alloc_endpoint(USB_DIR_OUT); |
| 150 | if (ctxt->epout) { |
| 151 | hs_bulk_out_desc.bEndpointAddress = |
| 152 | USB_DIR_OUT | ctxt->epout->num; |
| 153 | fs_bulk_out_desc.bEndpointAddress = |
| 154 | USB_DIR_OUT | ctxt->epout->num; |
| 155 | } |
| 156 | |
| 157 | ctxt->bound = 1; |
| 158 | } |
| 159 | static void diag_configure(int configured, void *_ctxt) |
| 160 | |
| 161 | { |
| 162 | struct diag_context *ctxt = _ctxt; |
| 163 | |
| 164 | if (!ctxt) |
| 165 | return; |
| 166 | if (configured) { |
| 167 | if (usb_msm_get_speed() == USB_SPEED_HIGH) { |
| 168 | usb_configure_endpoint(ctxt->epin, &hs_bulk_in_desc); |
| 169 | usb_configure_endpoint(ctxt->epout, &hs_bulk_out_desc); |
| 170 | } else { |
| 171 | usb_configure_endpoint(ctxt->epin, &fs_bulk_in_desc); |
| 172 | usb_configure_endpoint(ctxt->epout, &fs_bulk_out_desc); |
| 173 | } |
| 174 | usb_ept_enable(ctxt->epin, 1); |
| 175 | usb_ept_enable(ctxt->epout, 1); |
| 176 | ctxt->configured = 1; |
| 177 | queue_work(_context.diag_wq, &(_context.usb_config_work)); |
| 178 | } else { |
| 179 | /* all pending requests will be canceled */ |
| 180 | ctxt->configured = 0; |
| 181 | if (ctxt->epin) { |
| 182 | usb_ept_fifo_flush(ctxt->epin); |
| 183 | usb_ept_enable(ctxt->epin, 0); |
| 184 | } |
| 185 | if (ctxt->epout) { |
| 186 | usb_ept_fifo_flush(ctxt->epout); |
| 187 | usb_ept_enable(ctxt->epout, 0); |
| 188 | } |
| 189 | if ((ctxt->operations) && |
| 190 | (ctxt->operations->diag_disconnect)) |
| 191 | ctxt->operations->diag_disconnect(); |
| 192 | } |
| 193 | |
| 194 | } |
| 195 | static struct usb_function usb_func_diag = { |
| 196 | .bind = diag_bind, |
| 197 | .configure = diag_configure, |
| 198 | .unbind = diag_unbind, |
| 199 | |
| 200 | |
| 201 | .name = "diag", |
| 202 | .context = &_context, |
| 203 | }; |
| 204 | int diag_usb_register(struct diag_operations *func) |
| 205 | { |
| 206 | struct diag_context *ctxt = &_context; |
| 207 | |
| 208 | if (func == NULL) { |
| 209 | printk(KERN_ERR "diag_usb_register:registering" |
| 210 | "diag char operations NULL\n"); |
| 211 | return -1; |
| 212 | } |
| 213 | ctxt->operations = func; |
| 214 | if (ctxt->configured == 1) |
| 215 | if ((ctxt->operations) && |
| 216 | (ctxt->operations->diag_connect)) |
| 217 | ctxt->operations->diag_connect(); |
| 218 | return 0; |
| 219 | } |
| 220 | EXPORT_SYMBOL(diag_usb_register); |
| 221 | |
| 222 | int diag_usb_unregister(void) |
| 223 | { |
| 224 | struct diag_context *ctxt = &_context; |
| 225 | |
| 226 | ctxt->operations = NULL; |
| 227 | return 0; |
| 228 | } |
| 229 | EXPORT_SYMBOL(diag_usb_unregister); |
| 230 | |
| 231 | int diag_open(int num_req) |
| 232 | { |
| 233 | struct diag_context *ctxt = &_context; |
| 234 | struct diag_req_entry *write_entry; |
| 235 | struct diag_req_entry *read_entry; |
| 236 | int i = 0; |
| 237 | |
| 238 | for (i = 0; i < num_req; i++) { |
| 239 | write_entry = diag_alloc_req_entry(ctxt->epin, 0, GFP_KERNEL); |
| 240 | if (write_entry) { |
| 241 | write_entry->usb_req->complete = diag_write_complete; |
| 242 | write_entry->usb_req->device = (void *)ctxt; |
| 243 | list_add(&write_entry->re_entry, |
| 244 | &ctxt->dev_write_req_list); |
| 245 | } else |
| 246 | goto write_error; |
| 247 | } |
| 248 | |
| 249 | for (i = 0; i < num_req ; i++) { |
| 250 | read_entry = diag_alloc_req_entry(ctxt->epout, 0 , GFP_KERNEL); |
| 251 | if (read_entry) { |
| 252 | read_entry->usb_req->complete = diag_read_complete; |
| 253 | read_entry->usb_req->device = (void *)ctxt; |
| 254 | list_add(&read_entry->re_entry , |
| 255 | &ctxt->dev_read_req_list); |
| 256 | } else |
| 257 | goto read_error; |
| 258 | } |
| 259 | ctxt->diag_opened = 1; |
| 260 | return 0; |
| 261 | read_error: |
| 262 | printk(KERN_ERR "%s:read requests allocation failure\n", __func__); |
| 263 | while (!list_empty(&ctxt->dev_read_req_list)) { |
| 264 | read_entry = list_entry(ctxt->dev_read_req_list.next, |
| 265 | struct diag_req_entry, re_entry); |
| 266 | list_del(&read_entry->re_entry); |
| 267 | diag_free_req_entry(ctxt->epout, read_entry); |
| 268 | } |
| 269 | write_error: |
| 270 | printk(KERN_ERR "%s: write requests allocation failure\n", __func__); |
| 271 | while (!list_empty(&ctxt->dev_write_req_list)) { |
| 272 | write_entry = list_entry(ctxt->dev_write_req_list.next, |
| 273 | struct diag_req_entry, re_entry); |
| 274 | list_del(&write_entry->re_entry); |
| 275 | diag_free_req_entry(ctxt->epin, write_entry); |
| 276 | } |
| 277 | ctxt->diag_opened = 0; |
| 278 | return -ENOMEM; |
| 279 | } |
| 280 | EXPORT_SYMBOL(diag_open); |
| 281 | |
| 282 | void diag_close(void) |
| 283 | { |
| 284 | struct diag_context *ctxt = &_context; |
| 285 | struct diag_req_entry *req_entry; |
| 286 | /* free write requests */ |
| 287 | |
| 288 | while (!list_empty(&ctxt->dev_write_req_list)) { |
| 289 | req_entry = list_entry(ctxt->dev_write_req_list.next, |
| 290 | struct diag_req_entry, re_entry); |
| 291 | list_del(&req_entry->re_entry); |
| 292 | diag_free_req_entry(ctxt->epin, req_entry); |
| 293 | } |
| 294 | |
| 295 | /* free read requests */ |
| 296 | while (!list_empty(&ctxt->dev_read_req_list)) { |
| 297 | req_entry = list_entry(ctxt->dev_read_req_list.next, |
| 298 | struct diag_req_entry, re_entry); |
| 299 | list_del(&req_entry->re_entry); |
| 300 | diag_free_req_entry(ctxt->epout, req_entry); |
| 301 | } |
| 302 | return; |
| 303 | } |
| 304 | EXPORT_SYMBOL(diag_close); |
| 305 | |
| 306 | static void diag_free_req_entry(struct usb_endpoint *ep, |
| 307 | struct diag_req_entry *req) |
| 308 | { |
| 309 | if (ep != NULL && req != NULL) { |
| 310 | if (req->usb_req != NULL) |
| 311 | usb_ept_free_req(ep, req->usb_req); |
| 312 | kfree(req); |
| 313 | } |
| 314 | } |
| 315 | |
| 316 | |
| 317 | static struct diag_req_entry *diag_alloc_req_entry(struct usb_endpoint *ep, |
| 318 | unsigned len, gfp_t kmalloc_flags) |
| 319 | { |
| 320 | struct diag_req_entry *req; |
| 321 | |
| 322 | req = kmalloc(sizeof(struct diag_req_entry), kmalloc_flags); |
| 323 | if (req == NULL) |
| 324 | return ERR_PTR(-ENOMEM); |
| 325 | |
| 326 | |
| 327 | req->usb_req = usb_ept_alloc_req(ep , 0); |
| 328 | if (req->usb_req == NULL) { |
| 329 | kfree(req); |
| 330 | return ERR_PTR(-ENOMEM); |
| 331 | } |
| 332 | |
| 333 | req->usb_req->context = req; |
| 334 | return req; |
| 335 | } |
| 336 | |
| 337 | int diag_read(struct diag_request *d_req) |
| 338 | { |
| 339 | unsigned long flags; |
| 340 | struct usb_request *req = NULL; |
| 341 | struct diag_req_entry *req_entry = NULL; |
| 342 | struct diag_context *ctxt = &_context; |
| 343 | |
| 344 | |
| 345 | if (ctxt->diag_opened != 1) |
| 346 | return -EIO; |
| 347 | spin_lock_irqsave(&ctxt->dev_lock , flags); |
| 348 | if (!list_empty(&ctxt->dev_read_req_list)) { |
| 349 | req_entry = list_entry(ctxt->dev_read_req_list.next , |
| 350 | struct diag_req_entry , re_entry); |
| 351 | req_entry->diag_request = d_req; |
| 352 | req = req_entry->usb_req; |
| 353 | list_del(&req_entry->re_entry); |
| 354 | } |
| 355 | spin_unlock_irqrestore(&ctxt->dev_lock , flags); |
| 356 | if (req) { |
| 357 | req->buf = d_req->buf; |
| 358 | req->length = d_req->length; |
| 359 | req->device = ctxt; |
| 360 | if (usb_ept_queue_xfer(ctxt->epout, req)) { |
| 361 | /* If error add the link to the linked list again. */ |
| 362 | spin_lock_irqsave(&ctxt->dev_lock , flags); |
| 363 | list_add_tail(&req_entry->re_entry , |
| 364 | &ctxt->dev_read_req_list); |
| 365 | spin_unlock_irqrestore(&ctxt->dev_lock , flags); |
| 366 | printk(KERN_ERR "diag_read:can't queue the request\n"); |
| 367 | return -EIO; |
| 368 | } |
| 369 | } else { |
| 370 | printk(KERN_ERR |
| 371 | "diag_read:no requests avialable\n"); |
| 372 | return -EIO; |
| 373 | } |
| 374 | return 0; |
| 375 | } |
| 376 | EXPORT_SYMBOL(diag_read); |
| 377 | |
| 378 | int diag_write(struct diag_request *d_req) |
| 379 | { |
| 380 | unsigned long flags; |
| 381 | struct usb_request *req = NULL; |
| 382 | struct diag_req_entry *req_entry = NULL; |
| 383 | struct diag_context *ctxt = &_context; |
| 384 | |
| 385 | if (ctxt->diag_opened != 1) |
| 386 | return -EIO; |
| 387 | spin_lock_irqsave(&ctxt->dev_lock , flags); |
| 388 | if (!list_empty(&ctxt->dev_write_req_list)) { |
| 389 | req_entry = list_entry(ctxt->dev_write_req_list.next , |
| 390 | struct diag_req_entry , re_entry); |
| 391 | req_entry->diag_request = d_req; |
| 392 | req = req_entry->usb_req; |
| 393 | list_del(&req_entry->re_entry); |
| 394 | } |
| 395 | spin_unlock_irqrestore(&ctxt->dev_lock, flags); |
| 396 | if (req) { |
| 397 | req->buf = d_req->buf; |
| 398 | req->length = d_req->length; |
| 399 | req->device = ctxt; |
| 400 | if (usb_ept_queue_xfer(ctxt->epin, req)) { |
| 401 | /* If error add the link to linked list again*/ |
| 402 | spin_lock_irqsave(&ctxt->dev_lock, flags); |
| 403 | list_add_tail(&req_entry->re_entry , |
| 404 | &ctxt->dev_write_req_list); |
| 405 | spin_unlock_irqrestore(&ctxt->dev_lock, flags); |
| 406 | printk(KERN_ERR "diag_write: cannot queue" |
| 407 | " read request\n"); |
| 408 | return -EIO; |
| 409 | } |
| 410 | } else { |
| 411 | printk(KERN_ERR "diag_write: no requests available\n"); |
| 412 | return -EIO; |
| 413 | } |
| 414 | return 0; |
| 415 | } |
| 416 | EXPORT_SYMBOL(diag_write); |
| 417 | |
| 418 | static void diag_write_complete(struct usb_endpoint *ep , |
| 419 | struct usb_request *req) |
| 420 | { |
| 421 | struct diag_context *ctxt = (struct diag_context *)req->device; |
| 422 | struct diag_req_entry *diag_req = req->context; |
| 423 | struct diag_request *d_req = (struct diag_request *) |
| 424 | diag_req->diag_request; |
| 425 | unsigned long flags; |
| 426 | |
| 427 | if (ctxt == NULL) { |
| 428 | printk(KERN_ERR "diag_write_complete : requesting" |
| 429 | "NULL device pointer\n"); |
| 430 | return; |
| 431 | } |
| 432 | if (req->status == WRITE_COMPLETE) { |
| 433 | if ((req->length >= ep->max_pkt) && |
| 434 | ((req->length % ep->max_pkt) == 0)) { |
| 435 | req->length = 0; |
| 436 | req->device = ctxt; |
| 437 | d_req->actual = req->actual; |
| 438 | d_req->status = req->status; |
| 439 | /* Queue zero length packet */ |
| 440 | usb_ept_queue_xfer(ctxt->epin, req); |
| 441 | return; |
| 442 | } |
| 443 | /* normal completion*/ |
| 444 | spin_lock_irqsave(&ctxt->dev_lock, flags); |
| 445 | list_add_tail(&diag_req->re_entry , |
| 446 | &ctxt->dev_write_req_list); |
| 447 | if (req->length != 0) { |
| 448 | d_req->actual = req->actual; |
| 449 | d_req->status = req->status; |
| 450 | } |
| 451 | spin_unlock_irqrestore(&ctxt->dev_lock , flags); |
| 452 | if ((ctxt->operations) && |
| 453 | (ctxt->operations->diag_char_write_complete)) |
| 454 | ctxt->operations->diag_char_write_complete( |
| 455 | d_req); |
| 456 | } else { |
| 457 | spin_lock_irqsave(&ctxt->dev_lock, flags); |
| 458 | list_add_tail(&diag_req->re_entry , |
| 459 | &ctxt->dev_write_req_list); |
| 460 | d_req->actual = req->actual; |
| 461 | d_req->status = req->status; |
| 462 | spin_unlock_irqrestore(&ctxt->dev_lock , flags); |
| 463 | if ((ctxt->operations) && |
| 464 | (ctxt->operations->diag_char_write_complete)) |
| 465 | ctxt->operations->diag_char_write_complete( |
| 466 | d_req); |
| 467 | } |
| 468 | } |
| 469 | static void diag_read_complete(struct usb_endpoint *ep , |
| 470 | struct usb_request *req) |
| 471 | { |
| 472 | struct diag_context *ctxt = (struct diag_context *)req->device; |
| 473 | struct diag_req_entry *diag_req = req->context; |
| 474 | struct diag_request *d_req = (struct diag_request *) |
| 475 | diag_req->diag_request; |
| 476 | unsigned long flags; |
| 477 | |
| 478 | if (ctxt == NULL) { |
| 479 | printk(KERN_ERR "diag_read_complete : requesting" |
| 480 | "NULL device pointer\n"); |
| 481 | return; |
| 482 | } |
| 483 | if (req->status == READ_COMPLETE) { |
| 484 | /* normal completion*/ |
| 485 | spin_lock_irqsave(&ctxt->dev_lock, flags); |
| 486 | list_add_tail(&diag_req->re_entry , |
| 487 | &ctxt->dev_read_req_list); |
| 488 | d_req->actual = req->actual; |
| 489 | d_req->status = req->status; |
| 490 | spin_unlock_irqrestore(&ctxt->dev_lock, flags); |
| 491 | if ((ctxt->operations) && |
| 492 | (ctxt->operations->diag_char_read_complete)) |
| 493 | ctxt->operations->diag_char_read_complete( |
| 494 | d_req); |
| 495 | } else { |
| 496 | spin_lock_irqsave(&ctxt->dev_lock, flags); |
| 497 | list_add_tail(&diag_req->re_entry , |
| 498 | &ctxt->dev_read_req_list); |
| 499 | d_req->actual = req->actual; |
| 500 | d_req->status = req->status; |
| 501 | spin_unlock_irqrestore(&ctxt->dev_lock, flags); |
| 502 | if ((ctxt->operations) && |
| 503 | (ctxt->operations->diag_char_read_complete)) |
| 504 | ctxt->operations->diag_char_read_complete( |
| 505 | d_req); |
| 506 | } |
| 507 | } |
| 508 | void usb_config_work_func(struct work_struct *work) |
| 509 | { |
| 510 | struct diag_context *ctxt = &_context; |
| 511 | if ((ctxt->operations) && |
| 512 | (ctxt->operations->diag_connect)) |
| 513 | ctxt->operations->diag_connect(); |
| 514 | } |
| 515 | |
| 516 | struct usb_descriptor_header *diag_hs_descriptors[4]; |
| 517 | struct usb_descriptor_header *diag_fs_descriptors[4]; |
| 518 | |
| 519 | static int __init diag_init(void) |
| 520 | { |
| 521 | int r; |
| 522 | struct diag_context *ctxt = &_context; |
| 523 | |
| 524 | diag_hs_descriptors[0] = (struct usb_descriptor_header *)&intf_desc; |
| 525 | diag_hs_descriptors[1] = |
| 526 | (struct usb_descriptor_header *)&hs_bulk_in_desc; |
| 527 | diag_hs_descriptors[2] = |
| 528 | (struct usb_descriptor_header *)&hs_bulk_out_desc; |
| 529 | diag_hs_descriptors[3] = NULL; |
| 530 | |
| 531 | diag_fs_descriptors[0] = (struct usb_descriptor_header *)&intf_desc; |
| 532 | diag_fs_descriptors[1] = |
| 533 | (struct usb_descriptor_header *)&fs_bulk_in_desc; |
| 534 | diag_fs_descriptors[2] = |
| 535 | (struct usb_descriptor_header *)&fs_bulk_out_desc; |
| 536 | diag_fs_descriptors[3] = NULL; |
| 537 | INIT_LIST_HEAD(&ctxt->dev_read_req_list); |
| 538 | INIT_LIST_HEAD(&ctxt->dev_write_req_list); |
| 539 | ctxt->diag_wq = create_singlethread_workqueue("diag"); |
| 540 | if (ctxt->diag_wq == NULL) |
| 541 | return -1; |
| 542 | INIT_WORK(&_context.usb_config_work , usb_config_work_func); |
| 543 | |
| 544 | usb_func_diag.hs_descriptors = diag_hs_descriptors; |
| 545 | usb_func_diag.fs_descriptors = diag_fs_descriptors; |
| 546 | spin_lock_init(&_context.dev_lock); |
| 547 | r = usb_function_register(&usb_func_diag); |
| 548 | if (r < 0) |
| 549 | destroy_workqueue(ctxt->diag_wq); |
| 550 | return r; |
| 551 | } |
| 552 | |
| 553 | module_init(diag_init); |
| 554 | static void __exit diag_exit(void) |
| 555 | { |
| 556 | struct diag_context *ctxt = &_context; |
| 557 | if (!ctxt) |
| 558 | return; |
| 559 | if (!ctxt) |
| 560 | BUG_ON(1); |
| 561 | |
| 562 | usb_function_unregister(&usb_func_diag); |
| 563 | destroy_workqueue(ctxt->diag_wq); |
| 564 | } |
| 565 | module_exit(diag_exit); |
| 566 | |
| 567 | MODULE_LICENSE("GPL v2"); |