blob: d42ad0e98de35b274bad13e18ab83a78e5a2bf95 [file] [log] [blame]
Karsten Keil1b2b03f2008-07-27 01:54:58 +02001/*
2 *
3 * Author Karsten Keil <kkeil@novell.com>
4 *
5 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/gfp.h>
Karsten Keil1b2b03f2008-07-27 01:54:58 +020019#include <linux/module.h>
20#include <linux/mISDNhw.h>
21
22static void
23dchannel_bh(struct work_struct *ws)
24{
25 struct dchannel *dch = container_of(ws, struct dchannel, workq);
26 struct sk_buff *skb;
27 int err;
28
29 if (test_and_clear_bit(FLG_RECVQUEUE, &dch->Flags)) {
30 while ((skb = skb_dequeue(&dch->rqueue))) {
31 if (likely(dch->dev.D.peer)) {
32 err = dch->dev.D.recv(dch->dev.D.peer, skb);
33 if (err)
34 dev_kfree_skb(skb);
35 } else
36 dev_kfree_skb(skb);
37 }
38 }
39 if (test_and_clear_bit(FLG_PHCHANGE, &dch->Flags)) {
40 if (dch->phfunc)
41 dch->phfunc(dch);
42 }
43}
44
45static void
46bchannel_bh(struct work_struct *ws)
47{
48 struct bchannel *bch = container_of(ws, struct bchannel, workq);
49 struct sk_buff *skb;
50 int err;
51
52 if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) {
53 while ((skb = skb_dequeue(&bch->rqueue))) {
Karsten Keil1b2b03f2008-07-27 01:54:58 +020054 bch->rcount--;
55 if (likely(bch->ch.peer)) {
56 err = bch->ch.recv(bch->ch.peer, skb);
57 if (err)
58 dev_kfree_skb(skb);
59 } else
60 dev_kfree_skb(skb);
61 }
62 }
63}
64
65int
66mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf)
67{
68 test_and_set_bit(FLG_HDLC, &ch->Flags);
69 ch->maxlen = maxlen;
70 ch->hw = NULL;
71 ch->rx_skb = NULL;
72 ch->tx_skb = NULL;
73 ch->tx_idx = 0;
74 ch->phfunc = phf;
75 skb_queue_head_init(&ch->squeue);
76 skb_queue_head_init(&ch->rqueue);
77 INIT_LIST_HEAD(&ch->dev.bchannels);
78 INIT_WORK(&ch->workq, dchannel_bh);
79 return 0;
80}
81EXPORT_SYMBOL(mISDN_initdchannel);
82
83int
Karsten Keil034005a2012-05-15 23:51:06 +000084mISDN_initbchannel(struct bchannel *ch, unsigned short maxlen,
85 unsigned short minlen)
Karsten Keil1b2b03f2008-07-27 01:54:58 +020086{
87 ch->Flags = 0;
Karsten Keil034005a2012-05-15 23:51:06 +000088 ch->minlen = minlen;
89 ch->next_minlen = minlen;
90 ch->init_minlen = minlen;
Karsten Keil1b2b03f2008-07-27 01:54:58 +020091 ch->maxlen = maxlen;
Karsten Keil034005a2012-05-15 23:51:06 +000092 ch->next_maxlen = maxlen;
93 ch->init_maxlen = maxlen;
Karsten Keil1b2b03f2008-07-27 01:54:58 +020094 ch->hw = NULL;
95 ch->rx_skb = NULL;
96 ch->tx_skb = NULL;
97 ch->tx_idx = 0;
98 skb_queue_head_init(&ch->rqueue);
99 ch->rcount = 0;
100 ch->next_skb = NULL;
101 INIT_WORK(&ch->workq, bchannel_bh);
102 return 0;
103}
104EXPORT_SYMBOL(mISDN_initbchannel);
105
106int
107mISDN_freedchannel(struct dchannel *ch)
108{
109 if (ch->tx_skb) {
110 dev_kfree_skb(ch->tx_skb);
111 ch->tx_skb = NULL;
112 }
113 if (ch->rx_skb) {
114 dev_kfree_skb(ch->rx_skb);
115 ch->rx_skb = NULL;
116 }
117 skb_queue_purge(&ch->squeue);
118 skb_queue_purge(&ch->rqueue);
Tejun Heo0d26aa72010-12-24 15:59:07 +0100119 flush_work_sync(&ch->workq);
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200120 return 0;
121}
122EXPORT_SYMBOL(mISDN_freedchannel);
123
Karsten Keilfb286f02009-07-09 10:02:29 +0200124void
125mISDN_clear_bchannel(struct bchannel *ch)
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200126{
127 if (ch->tx_skb) {
128 dev_kfree_skb(ch->tx_skb);
129 ch->tx_skb = NULL;
130 }
Karsten Keilfb286f02009-07-09 10:02:29 +0200131 ch->tx_idx = 0;
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200132 if (ch->rx_skb) {
133 dev_kfree_skb(ch->rx_skb);
134 ch->rx_skb = NULL;
135 }
136 if (ch->next_skb) {
137 dev_kfree_skb(ch->next_skb);
138 ch->next_skb = NULL;
139 }
Karsten Keilfb286f02009-07-09 10:02:29 +0200140 test_and_clear_bit(FLG_TX_BUSY, &ch->Flags);
141 test_and_clear_bit(FLG_TX_NEXT, &ch->Flags);
142 test_and_clear_bit(FLG_ACTIVE, &ch->Flags);
Karsten Keil034005a2012-05-15 23:51:06 +0000143 ch->minlen = ch->init_minlen;
144 ch->next_minlen = ch->init_minlen;
145 ch->maxlen = ch->init_maxlen;
146 ch->next_maxlen = ch->init_maxlen;
Karsten Keilfb286f02009-07-09 10:02:29 +0200147}
148EXPORT_SYMBOL(mISDN_clear_bchannel);
149
150int
151mISDN_freebchannel(struct bchannel *ch)
152{
153 mISDN_clear_bchannel(ch);
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200154 skb_queue_purge(&ch->rqueue);
155 ch->rcount = 0;
Tejun Heo0d26aa72010-12-24 15:59:07 +0100156 flush_work_sync(&ch->workq);
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200157 return 0;
158}
159EXPORT_SYMBOL(mISDN_freebchannel);
160
Karsten Keil034005a2012-05-15 23:51:06 +0000161int
162mISDN_ctrl_bchannel(struct bchannel *bch, struct mISDN_ctrl_req *cq)
163{
164 int ret = 0;
165
166 switch (cq->op) {
167 case MISDN_CTRL_GETOP:
168 cq->op = MISDN_CTRL_RX_BUFFER;
169 break;
170 case MISDN_CTRL_RX_BUFFER:
171 if (cq->p2 > MISDN_CTRL_RX_SIZE_IGNORE)
172 bch->next_maxlen = cq->p2;
173 if (cq->p1 > MISDN_CTRL_RX_SIZE_IGNORE)
174 bch->next_minlen = cq->p1;
175 /* we return the old values */
176 cq->p1 = bch->minlen;
177 cq->p2 = bch->maxlen;
178 break;
179 default:
180 pr_info("mISDN unhandled control %x operation\n", cq->op);
181 ret = -EINVAL;
182 break;
183 }
184 return ret;
185}
186EXPORT_SYMBOL(mISDN_ctrl_bchannel);
187
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200188static inline u_int
189get_sapi_tei(u_char *p)
190{
191 u_int sapi, tei;
192
193 sapi = *p >> 2;
194 tei = p[1] >> 1;
195 return sapi | (tei << 8);
196}
197
198void
199recv_Dchannel(struct dchannel *dch)
200{
201 struct mISDNhead *hh;
202
203 if (dch->rx_skb->len < 2) { /* at least 2 for sapi / tei */
204 dev_kfree_skb(dch->rx_skb);
205 dch->rx_skb = NULL;
206 return;
207 }
208 hh = mISDN_HEAD_P(dch->rx_skb);
209 hh->prim = PH_DATA_IND;
210 hh->id = get_sapi_tei(dch->rx_skb->data);
211 skb_queue_tail(&dch->rqueue, dch->rx_skb);
212 dch->rx_skb = NULL;
213 schedule_event(dch, FLG_RECVQUEUE);
214}
215EXPORT_SYMBOL(recv_Dchannel);
216
217void
Martin Bachem1f28fa12008-09-03 15:17:45 +0200218recv_Echannel(struct dchannel *ech, struct dchannel *dch)
219{
220 struct mISDNhead *hh;
221
222 if (ech->rx_skb->len < 2) { /* at least 2 for sapi / tei */
223 dev_kfree_skb(ech->rx_skb);
224 ech->rx_skb = NULL;
225 return;
226 }
227 hh = mISDN_HEAD_P(ech->rx_skb);
228 hh->prim = PH_DATA_E_IND;
229 hh->id = get_sapi_tei(ech->rx_skb->data);
230 skb_queue_tail(&dch->rqueue, ech->rx_skb);
231 ech->rx_skb = NULL;
232 schedule_event(dch, FLG_RECVQUEUE);
233}
234EXPORT_SYMBOL(recv_Echannel);
235
236void
Karsten Keil034005a2012-05-15 23:51:06 +0000237recv_Bchannel(struct bchannel *bch, unsigned int id, bool force)
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200238{
239 struct mISDNhead *hh;
240
Karsten Keil7206e652012-05-15 23:51:05 +0000241 /* if allocation did fail upper functions still may call us */
242 if (unlikely(!bch->rx_skb))
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200243 return;
Karsten Keil7206e652012-05-15 23:51:05 +0000244 if (unlikely(!bch->rx_skb->len)) {
245 /* we have no data to send - this may happen after recovery
246 * from overflow or too small allocation.
247 * We need to free the buffer here */
248 dev_kfree_skb(bch->rx_skb);
249 bch->rx_skb = NULL;
250 } else {
Karsten Keil034005a2012-05-15 23:51:06 +0000251 if (test_bit(FLG_TRANSPARENT, &bch->Flags) &&
252 (bch->rx_skb->len < bch->minlen) && !force)
253 return;
Karsten Keil7206e652012-05-15 23:51:05 +0000254 hh = mISDN_HEAD_P(bch->rx_skb);
255 hh->prim = PH_DATA_IND;
256 hh->id = id;
257 if (bch->rcount >= 64) {
258 printk(KERN_WARNING
259 "B%d receive queue overflow - flushing!\n",
260 bch->nr);
261 skb_queue_purge(&bch->rqueue);
262 }
263 bch->rcount++;
264 skb_queue_tail(&bch->rqueue, bch->rx_skb);
265 bch->rx_skb = NULL;
266 schedule_event(bch, FLG_RECVQUEUE);
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200267 }
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200268}
269EXPORT_SYMBOL(recv_Bchannel);
270
271void
272recv_Dchannel_skb(struct dchannel *dch, struct sk_buff *skb)
273{
274 skb_queue_tail(&dch->rqueue, skb);
275 schedule_event(dch, FLG_RECVQUEUE);
276}
277EXPORT_SYMBOL(recv_Dchannel_skb);
278
279void
280recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
281{
282 if (bch->rcount >= 64) {
Andreas Eversberg11618492008-08-06 19:13:07 +0200283 printk(KERN_WARNING "B-channel %p receive queue overflow, "
Joe Perches475be4d2012-02-19 19:52:38 -0800284 "flushing!\n", bch);
Andreas Eversberg11618492008-08-06 19:13:07 +0200285 skb_queue_purge(&bch->rqueue);
286 bch->rcount = 0;
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200287 }
288 bch->rcount++;
289 skb_queue_tail(&bch->rqueue, skb);
290 schedule_event(bch, FLG_RECVQUEUE);
291}
292EXPORT_SYMBOL(recv_Bchannel_skb);
293
294static void
295confirm_Dsend(struct dchannel *dch)
296{
297 struct sk_buff *skb;
298
299 skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(dch->tx_skb),
Joe Perches475be4d2012-02-19 19:52:38 -0800300 0, NULL, GFP_ATOMIC);
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200301 if (!skb) {
302 printk(KERN_ERR "%s: no skb id %x\n", __func__,
Joe Perches475be4d2012-02-19 19:52:38 -0800303 mISDN_HEAD_ID(dch->tx_skb));
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200304 return;
305 }
306 skb_queue_tail(&dch->rqueue, skb);
307 schedule_event(dch, FLG_RECVQUEUE);
308}
309
310int
311get_next_dframe(struct dchannel *dch)
312{
313 dch->tx_idx = 0;
314 dch->tx_skb = skb_dequeue(&dch->squeue);
315 if (dch->tx_skb) {
316 confirm_Dsend(dch);
317 return 1;
318 }
319 dch->tx_skb = NULL;
320 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
321 return 0;
322}
323EXPORT_SYMBOL(get_next_dframe);
324
Karsten Keil8bfddfb2012-05-15 23:51:02 +0000325static void
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200326confirm_Bsend(struct bchannel *bch)
327{
328 struct sk_buff *skb;
329
Andreas Eversberg11618492008-08-06 19:13:07 +0200330 if (bch->rcount >= 64) {
331 printk(KERN_WARNING "B-channel %p receive queue overflow, "
Joe Perches475be4d2012-02-19 19:52:38 -0800332 "flushing!\n", bch);
Andreas Eversberg11618492008-08-06 19:13:07 +0200333 skb_queue_purge(&bch->rqueue);
334 bch->rcount = 0;
335 }
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200336 skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb),
Joe Perches475be4d2012-02-19 19:52:38 -0800337 0, NULL, GFP_ATOMIC);
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200338 if (!skb) {
339 printk(KERN_ERR "%s: no skb id %x\n", __func__,
Joe Perches475be4d2012-02-19 19:52:38 -0800340 mISDN_HEAD_ID(bch->tx_skb));
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200341 return;
342 }
343 bch->rcount++;
344 skb_queue_tail(&bch->rqueue, skb);
345 schedule_event(bch, FLG_RECVQUEUE);
346}
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200347
348int
349get_next_bframe(struct bchannel *bch)
350{
351 bch->tx_idx = 0;
352 if (test_bit(FLG_TX_NEXT, &bch->Flags)) {
353 bch->tx_skb = bch->next_skb;
354 if (bch->tx_skb) {
355 bch->next_skb = NULL;
356 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
Karsten Keil8bfddfb2012-05-15 23:51:02 +0000357 /* confirm imediately to allow next data */
358 confirm_Bsend(bch);
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200359 return 1;
360 } else {
361 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
362 printk(KERN_WARNING "B TX_NEXT without skb\n");
363 }
364 }
365 bch->tx_skb = NULL;
366 test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
367 return 0;
368}
369EXPORT_SYMBOL(get_next_bframe);
370
371void
372queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb)
373{
374 struct mISDNhead *hh;
375
376 if (!skb) {
377 _queue_data(ch, pr, id, 0, NULL, GFP_ATOMIC);
378 } else {
379 if (ch->peer) {
380 hh = mISDN_HEAD_P(skb);
381 hh->prim = pr;
382 hh->id = id;
383 if (!ch->recv(ch->peer, skb))
384 return;
385 }
386 dev_kfree_skb(skb);
387 }
388}
389EXPORT_SYMBOL(queue_ch_frame);
390
391int
392dchannel_senddata(struct dchannel *ch, struct sk_buff *skb)
393{
394 /* check oversize */
395 if (skb->len <= 0) {
396 printk(KERN_WARNING "%s: skb too small\n", __func__);
397 return -EINVAL;
398 }
399 if (skb->len > ch->maxlen) {
400 printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
Joe Perches475be4d2012-02-19 19:52:38 -0800401 __func__, skb->len, ch->maxlen);
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200402 return -EINVAL;
403 }
404 /* HW lock must be obtained */
405 if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
406 skb_queue_tail(&ch->squeue, skb);
407 return 0;
408 } else {
409 /* write to fifo */
410 ch->tx_skb = skb;
411 ch->tx_idx = 0;
412 return 1;
413 }
414}
415EXPORT_SYMBOL(dchannel_senddata);
416
417int
418bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
419{
420
421 /* check oversize */
422 if (skb->len <= 0) {
423 printk(KERN_WARNING "%s: skb too small\n", __func__);
424 return -EINVAL;
425 }
426 if (skb->len > ch->maxlen) {
427 printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
Joe Perches475be4d2012-02-19 19:52:38 -0800428 __func__, skb->len, ch->maxlen);
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200429 return -EINVAL;
430 }
431 /* HW lock must be obtained */
432 /* check for pending next_skb */
433 if (ch->next_skb) {
434 printk(KERN_WARNING
Joe Perches475be4d2012-02-19 19:52:38 -0800435 "%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n",
436 __func__, skb->len, ch->next_skb->len);
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200437 return -EBUSY;
438 }
439 if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
440 test_and_set_bit(FLG_TX_NEXT, &ch->Flags);
441 ch->next_skb = skb;
442 return 0;
443 } else {
444 /* write to fifo */
445 ch->tx_skb = skb;
446 ch->tx_idx = 0;
Karsten Keil8bfddfb2012-05-15 23:51:02 +0000447 confirm_Bsend(ch);
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200448 return 1;
449 }
450}
451EXPORT_SYMBOL(bchannel_senddata);
Karsten Keil7206e652012-05-15 23:51:05 +0000452
453/* The function allocates a new receive skb on demand with a size for the
454 * requirements of the current protocol. It returns the tailroom of the
455 * receive skb or an error.
456 */
457int
458bchannel_get_rxbuf(struct bchannel *bch, int reqlen)
459{
460 int len;
461
462 if (bch->rx_skb) {
463 len = skb_tailroom(bch->rx_skb);
464 if (len < reqlen) {
465 pr_warning("B%d no space for %d (only %d) bytes\n",
466 bch->nr, reqlen, len);
467 if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
468 /* send what we have now and try a new buffer */
Karsten Keil034005a2012-05-15 23:51:06 +0000469 recv_Bchannel(bch, 0, true);
Karsten Keil7206e652012-05-15 23:51:05 +0000470 } else {
471 /* on HDLC we have to drop too big frames */
472 return -EMSGSIZE;
473 }
474 } else {
475 return len;
476 }
477 }
Karsten Keil034005a2012-05-15 23:51:06 +0000478 /* update current min/max length first */
479 if (unlikely(bch->maxlen != bch->next_maxlen))
480 bch->maxlen = bch->next_maxlen;
481 if (unlikely(bch->minlen != bch->next_minlen))
482 bch->minlen = bch->next_minlen;
Karsten Keil7206e652012-05-15 23:51:05 +0000483 if (unlikely(reqlen > bch->maxlen))
484 return -EMSGSIZE;
Karsten Keil034005a2012-05-15 23:51:06 +0000485 if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
486 if (reqlen >= bch->minlen) {
487 len = reqlen;
488 } else {
489 len = 2 * bch->minlen;
490 if (len > bch->maxlen)
491 len = bch->maxlen;
492 }
493 } else {
494 /* with HDLC we do not know the length yet */
Karsten Keil7206e652012-05-15 23:51:05 +0000495 len = bch->maxlen;
Karsten Keil034005a2012-05-15 23:51:06 +0000496 }
Karsten Keil7206e652012-05-15 23:51:05 +0000497 bch->rx_skb = mI_alloc_skb(len, GFP_ATOMIC);
498 if (!bch->rx_skb) {
499 pr_warning("B%d receive no memory for %d bytes\n",
500 bch->nr, len);
501 len = -ENOMEM;
502 }
503 return len;
504}
505EXPORT_SYMBOL(bchannel_get_rxbuf);