blob: 799deda2dcc1e03d723fbd7543c38c7208964240 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/wait.h>
15#include <linux/sched.h>
16#include <linux/jiffies.h>
17#include <linux/uaccess.h>
18#include <linux/atomic.h>
19#include <linux/bitops.h>
20#include <mach/qdsp6v2/rtac.h>
21#include <mach/qdsp6v2/audio_acdb.h>
22#include <sound/apr_audio.h>
23#include <sound/q6afe.h>
24#include <mach/qdsp6v2/audio_dev_ctl.h>
25
26#define TIMEOUT_MS 1000
27#define AUDIO_RX 0x0
28#define AUDIO_TX 0x1
29#define ASM_MAX_SESSION 0x8 /* To do: define in a header */
30#define RESET_COPP_ID 99
31#define INVALID_COPP_ID 0xFF
32
33struct adm_ctl {
34 void *apr;
35 atomic_t copp_id[AFE_MAX_PORTS];
36 atomic_t copp_cnt[AFE_MAX_PORTS];
37 atomic_t copp_stat[AFE_MAX_PORTS];
38 unsigned long sessions[AFE_MAX_PORTS];
39 wait_queue_head_t wait;
40};
41
42static struct adm_ctl this_adm;
43
44static int32_t adm_callback(struct apr_client_data *data, void *priv)
45{
46 uint32_t *payload;
47 int i, index;
48 payload = data->payload;
49
50 if (data->opcode == RESET_EVENTS) {
51 pr_debug("adm_callback: Reset event is received: %d %d apr[%p]\n",
52 data->reset_event, data->reset_proc,
53 this_adm.apr);
54 if (this_adm.apr) {
55 apr_reset(this_adm.apr);
56 for (i = 0; i < AFE_MAX_PORTS; i++) {
57 atomic_set(&this_adm.copp_id[i],
58 RESET_COPP_ID);
59 atomic_set(&this_adm.copp_cnt[i], 0);
60 atomic_set(&this_adm.copp_stat[i], 0);
61 }
62 this_adm.apr = NULL;
63 }
64 return 0;
65 }
66
67 pr_debug("%s: code = 0x%x %x %x size = %d\n", __func__,
68 data->opcode, payload[0], payload[1],
69 data->payload_size);
70
71 if (data->payload_size) {
72 index = afe_get_port_index(data->token);
73 pr_debug("%s: Port ID %d, index %d\n", __func__,
74 data->token, index);
75
76 if (data->opcode == APR_BASIC_RSP_RESULT) {
77 pr_debug("APR_BASIC_RSP_RESULT\n");
78 switch (payload[0]) {
79 case ADM_CMD_SET_PARAMS:
80#ifdef CONFIG_MSM8X60_RTAC
81 if (rtac_make_adm_callback(payload,
82 data->payload_size))
83 break;
84#endif
85 case ADM_CMD_COPP_CLOSE:
86 case ADM_CMD_MEMORY_MAP:
87 case ADM_CMD_MEMORY_UNMAP:
88 case ADM_CMD_MEMORY_MAP_REGIONS:
89 case ADM_CMD_MEMORY_UNMAP_REGIONS:
90 case ADM_CMD_MATRIX_MAP_ROUTINGS:
91 pr_debug("ADM_CMD_MATRIX_MAP_ROUTINGS\n");
92 atomic_set(&this_adm.copp_stat[index], 1);
93 wake_up(&this_adm.wait);
94 break;
95 default:
96 pr_err("%s: Unknown Cmd: 0x%x\n", __func__,
97 payload[0]);
98 break;
99 }
100 return 0;
101 }
102
103 switch (data->opcode) {
104 case ADM_CMDRSP_COPP_OPEN: {
105 struct adm_copp_open_respond *open = data->payload;
106 if (open->copp_id == INVALID_COPP_ID) {
107 pr_err("%s: invalid coppid rxed %d\n",
108 __func__, open->copp_id);
109 atomic_set(&this_adm.copp_stat[index], 1);
110 wake_up(&this_adm.wait);
111 break;
112 }
113 atomic_set(&this_adm.copp_id[index], open->copp_id);
114 atomic_set(&this_adm.copp_stat[index], 1);
115 pr_debug("%s: coppid rxed=%d\n", __func__,
116 open->copp_id);
117 wake_up(&this_adm.wait);
118 }
119 break;
120#ifdef CONFIG_MSM8X60_RTAC
121 case ADM_CMDRSP_GET_PARAMS:
Swaminathan Sathappan88163a72011-08-01 16:01:14 -0700122 pr_debug("%s: ADM_CMDRSP_GET_PARAMS\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700123 rtac_make_adm_callback(payload,
124 data->payload_size);
125 break;
126#endif
127 default:
128 pr_err("%s: Unknown cmd:0x%x\n", __func__,
129 data->opcode);
130 break;
131 }
132 }
133 return 0;
134}
135
136void send_cal(int port_id, struct acdb_cal_block *aud_cal)
137{
138 s32 result;
139 struct adm_set_params_command adm_params;
140 int index = afe_get_port_index(port_id);
141
142 pr_debug("%s: Port id %d, index %d\n", __func__, port_id, index);
143
144 if (!aud_cal || aud_cal->cal_size == 0) {
145 pr_err("%s: No calibration data to send!\n", __func__);
146 goto done;
147 }
148
149 adm_params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
150 APR_HDR_LEN(20), APR_PKT_VER);
151 adm_params.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
152 sizeof(adm_params));
153 adm_params.hdr.src_svc = APR_SVC_ADM;
154 adm_params.hdr.src_domain = APR_DOMAIN_APPS;
155 adm_params.hdr.src_port = port_id;
156 adm_params.hdr.dest_svc = APR_SVC_ADM;
157 adm_params.hdr.dest_domain = APR_DOMAIN_ADSP;
158 adm_params.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
159 adm_params.hdr.token = port_id;
160 adm_params.hdr.opcode = ADM_CMD_SET_PARAMS;
161 adm_params.payload = aud_cal->cal_paddr;
162 adm_params.payload_size = aud_cal->cal_size;
163
164 atomic_set(&this_adm.copp_stat[index], 0);
165 pr_debug("%s: Sending SET_PARAMS payload = 0x%x, size = %d\n",
166 __func__, adm_params.payload, adm_params.payload_size);
167 result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params);
168 if (result < 0) {
169 pr_err("%s: Set params failed port = %d payload = 0x%x\n",
170 __func__, port_id, aud_cal->cal_paddr);
171 goto done;
172 }
173 /* Wait for the callback */
174 result = wait_event_timeout(this_adm.wait,
175 atomic_read(&this_adm.copp_stat[index]),
176 msecs_to_jiffies(TIMEOUT_MS));
177 if (!result)
178 pr_err("%s: Set params timed out port = %d, payload = 0x%x\n",
179 __func__, port_id, aud_cal->cal_paddr);
180done:
181 return;
182}
183
184void send_adm_cal(int port_id, int path)
185{
186 s32 acdb_path;
187 struct acdb_cal_block aud_cal;
188
189 pr_debug("%s\n", __func__);
190
191 /* Maps audio_dev_ctrl path definition to ACDB definition */
192 acdb_path = path - 1;
193 if ((acdb_path >= NUM_AUDPROC_BUFFERS) ||
194 (acdb_path < 0)) {
195 pr_err("%s: Path is not RX or TX, path = %d\n",
196 __func__, path);
197 goto done;
198 }
199
200 pr_debug("%s: Sending audproc cal\n", __func__);
201 get_audproc_cal(acdb_path, &aud_cal);
202 send_cal(port_id, &aud_cal);
203
204 pr_debug("%s: Sending audvol cal\n", __func__);
205 get_audvol_cal(acdb_path, &aud_cal);
206 send_cal(port_id, &aud_cal);
207done:
208 return;
209}
210
211/* This function issues routing command of ASM stream
212 * to ADM mixer associated with a particular AFE port
213 */
214int adm_cmd_map(int port_id, int session_id)
215{
216 struct adm_routings_command route;
217 int ret = 0;
218 int index = afe_get_port_index(port_id);
219
220 pr_debug("%s: port %x session %x\n", __func__, port_id, session_id);
221
222 if (!atomic_read(&this_adm.copp_cnt[index]))
223 return 0;
224
225 route.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
226 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
227 route.hdr.pkt_size = sizeof(route);
228 route.hdr.src_svc = 0;
229 route.hdr.src_domain = APR_DOMAIN_APPS;
230 route.hdr.src_port = port_id;
231 route.hdr.dest_svc = APR_SVC_ADM;
232 route.hdr.dest_domain = APR_DOMAIN_ADSP;
233 route.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
234 route.hdr.token = port_id;
235 route.hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS;
236 route.num_sessions = 1;
237 route.session[0].id = session_id;
238 route.session[0].num_copps = 1;
239 route.session[0].copp_id[0] =
240 atomic_read(&this_adm.copp_id[index]);
241
242 /* This rule can change */
243 if ((port_id & 0x1))
244 route.path = AUDIO_TX;
245 else
246 route.path = AUDIO_RX;
247
248 atomic_set(&this_adm.copp_stat[index], 0);
249
250 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&route);
251 if (ret < 0) {
252 pr_err("%s: ADM routing for port %d failed\n",
253 __func__, port_id);
254 ret = -EINVAL;
255 goto fail_cmd;
256 }
257 ret = wait_event_timeout(this_adm.wait,
258 atomic_read(&this_adm.copp_stat[index]),
259 msecs_to_jiffies(TIMEOUT_MS));
260 if (!ret) {
261 pr_err("%s: ADM cmd Route failed for port %d\n",
262 __func__, port_id);
263 ret = -EINVAL;
264 }
265
Ben Rombergera8733902011-08-11 16:23:54 -0700266 /* have to convert path to dev ctrl standard */
267 send_adm_cal(port_id, (route.path + 1));
Ben Romberger974a40d2011-07-18 15:08:21 -0700268#ifdef CONFIG_MSM8X60_RTAC
269 rtac_add_adm_device(port_id, atomic_read(&this_adm.copp_id[index]),
270 (route.path + 1), session_id);
271#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272fail_cmd:
273 return ret;
274}
275
276/* This function establish routing of ASM stream to a particular
277 * ADM mixer that is routed to a particular hardware port
278 * session id must be in range of 0 ~ 31.
279 */
280int adm_route_session(int port_id, uint session_id, int set)
281{
282 int rc = 0;
283 int index;
284
285 pr_debug("%s: port %x session %x set %x\n", __func__,
286 port_id, session_id, set);
287
288 index = afe_get_port_index(port_id);
289
290 if (index >= AFE_MAX_PORTS) {
291 pr_err("%s port idi[%d] out of limit[%d]\n", __func__,
292 port_id, AFE_MAX_PORTS);
293 return -ENODEV;
294 }
295
296 if (set) {
297 set_bit(session_id, &this_adm.sessions[index]);
298 rc = adm_cmd_map(port_id, session_id); /* not thread safe */
299 } else /* Not sure how to deroute yet */
300 clear_bit(session_id, &this_adm.sessions[index]);
301
302 return rc;
303}
304
305/* This function instantiates a mixer in QDSP6 audio path for
306 * given audio hardware port. Topology should be made part
307 * of audio calibration
308 */
309int adm_open_mixer(int port_id, int path, int rate,
310 int channel_mode, int topology) {
311 struct adm_copp_open_command open;
312 int ret = 0;
313 u32 i;
314 int index;
315
316 pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__,
317 port_id, path, rate, channel_mode);
318
319 if (afe_validate_port(port_id) < 0) {
320 pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
321 return -ENODEV;
322 }
323
324 index = afe_get_port_index(port_id);
325 if (this_adm.apr == NULL) {
326 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
327 0xFFFFFFFF, &this_adm);
328 if (this_adm.apr == NULL) {
329 pr_err("%s: Unable to register ADM\n", __func__);
330 ret = -ENODEV;
331 return ret;
332 }
Ben Romberger974a40d2011-07-18 15:08:21 -0700333#ifdef CONFIG_MSM8X60_RTAC
334 rtac_set_adm_handle(this_adm.apr);
335#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336 }
337
338 if (atomic_read(&this_adm.copp_cnt[index]) == 0) {
339
340 open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
341 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
342 open.hdr.pkt_size = sizeof(open);
343 open.hdr.src_svc = APR_SVC_ADM;
344 open.hdr.src_domain = APR_DOMAIN_APPS;
345 open.hdr.src_port = port_id;
346 open.hdr.dest_svc = APR_SVC_ADM;
347 open.hdr.dest_domain = APR_DOMAIN_ADSP;
348 open.hdr.dest_port = port_id;
349 open.hdr.token = port_id;
350 open.hdr.opcode = ADM_CMD_COPP_OPEN;
351
352 open.mode = path;
353 open.endpoint_id1 = port_id;
354 open.endpoint_id2 = 0xFFFF;
355
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700356 /* convert path to acdb path */
Ben Romberger974a40d2011-07-18 15:08:21 -0700357 if (path == ADM_PATH_PLAYBACK)
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700358 open.topology_id = get_adm_rx_topology();
Jay Wang4fa2ee42011-07-18 00:21:22 -0700359 else {
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700360 open.topology_id = get_adm_tx_topology();
Jay Wang4fa2ee42011-07-18 00:21:22 -0700361 if ((open.topology_id ==
362 VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
363 (open.topology_id ==
364 VPM_TX_DM_FLUENCE_COPP_TOPOLOGY))
365 rate = 16000;
366 }
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700367
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700368 if (open.topology_id == 0)
369 open.topology_id = topology;
370
371 open.channel_config = channel_mode & 0x00FF;
372 open.rate = rate;
373
374 pr_debug("%s: channel_config=%d port_id=%d rate=%d\
375 topology_id=0x%X\n", __func__, open.channel_config,\
376 open.endpoint_id1, open.rate,\
377 open.topology_id);
378
379 atomic_set(&this_adm.copp_stat[index], 0);
380
381 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
382 if (ret < 0) {
383 pr_err("%s:ADM enable for port %d failed\n",
384 __func__, port_id);
385 ret = -EINVAL;
386 goto fail_cmd;
387 }
388 /* Wait for the callback with copp id */
389 ret = wait_event_timeout(this_adm.wait,
390 atomic_read(&this_adm.copp_stat[index]),
391 msecs_to_jiffies(TIMEOUT_MS));
392 if (!ret) {
393 pr_err("%s ADM open failed for port %d\n", __func__,
394 port_id);
395 ret = -EINVAL;
396 goto fail_cmd;
397 }
398 }
399 atomic_inc(&this_adm.copp_cnt[index]);
400
401 /* Set up routing for cached session */
402 for (i = find_first_bit(&this_adm.sessions[index], ASM_MAX_SESSION);
403 i < ASM_MAX_SESSION; i = find_next_bit(&this_adm.sessions[index],
404 ASM_MAX_SESSION, i + 1))
405 adm_cmd_map(port_id, i); /* Not thread safe */
406
407fail_cmd:
408 return ret;
409}
410
411int adm_open(int port_id, int path, int rate, int channel_mode, int topology)
412{
413 struct adm_copp_open_command open;
414 int ret = 0;
415 int index;
416
417 pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__,
418 port_id, path, rate, channel_mode);
419
420 if (afe_validate_port(port_id) < 0) {
421 pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
422 return -ENODEV;
423 }
424
425 index = afe_get_port_index(port_id);
426 pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);
427
428 if (this_adm.apr == NULL) {
429 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
430 0xFFFFFFFF, &this_adm);
431 if (this_adm.apr == NULL) {
432 pr_err("%s: Unable to register ADM\n", __func__);
433 ret = -ENODEV;
434 return ret;
435 }
436#ifdef CONFIG_MSM8X60_RTAC
437 rtac_set_adm_handle(this_adm.apr);
438#endif
439 }
440
441
442 /* Create a COPP if port id are not enabled */
443 if (atomic_read(&this_adm.copp_cnt[index]) == 0) {
444
445 open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
446 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
447 open.hdr.pkt_size = sizeof(open);
448 open.hdr.src_svc = APR_SVC_ADM;
449 open.hdr.src_domain = APR_DOMAIN_APPS;
450 open.hdr.src_port = port_id;
451 open.hdr.dest_svc = APR_SVC_ADM;
452 open.hdr.dest_domain = APR_DOMAIN_ADSP;
453 open.hdr.dest_port = port_id;
454 open.hdr.token = port_id;
455 open.hdr.opcode = ADM_CMD_COPP_OPEN;
456
457 open.mode = path;
458 open.endpoint_id1 = port_id;
459 open.endpoint_id2 = 0xFFFF;
460
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700461 /* convert path to acdb path */
Ben Romberger974a40d2011-07-18 15:08:21 -0700462 if (path == ADM_PATH_PLAYBACK)
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700463 open.topology_id = get_adm_rx_topology();
Jay Wang4fa2ee42011-07-18 00:21:22 -0700464 else {
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700465 open.topology_id = get_adm_tx_topology();
Jay Wang4fa2ee42011-07-18 00:21:22 -0700466 if ((open.topology_id ==
467 VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
468 (open.topology_id ==
469 VPM_TX_DM_FLUENCE_COPP_TOPOLOGY))
470 rate = 16000;
471 }
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700472
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473 if (open.topology_id == 0)
474 open.topology_id = topology;
475
476 open.channel_config = channel_mode & 0x00FF;
477 open.rate = rate;
478
479 pr_debug("%s: channel_config=%d port_id=%d rate=%d\
480 topology_id=0x%X\n", __func__, open.channel_config,\
481 open.endpoint_id1, open.rate,\
482 open.topology_id);
483
484 atomic_set(&this_adm.copp_stat[index], 0);
485
486 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
487 if (ret < 0) {
488 pr_err("%s:ADM enable for port %d failed\n",
489 __func__, port_id);
490 ret = -EINVAL;
491 goto fail_cmd;
492 }
493 /* Wait for the callback with copp id */
494 ret = wait_event_timeout(this_adm.wait,
495 atomic_read(&this_adm.copp_stat[index]),
496 msecs_to_jiffies(TIMEOUT_MS));
497 if (!ret) {
498 pr_err("%s ADM open failed for port %d\n", __func__,
499 port_id);
500 ret = -EINVAL;
501 goto fail_cmd;
502 }
503 }
504 atomic_inc(&this_adm.copp_cnt[index]);
505 return 0;
506
507fail_cmd:
508
509 return ret;
510}
511
512int adm_matrix_map(int session_id, int path, int num_copps,
513 unsigned int *port_id, int copp_id)
514{
515 struct adm_routings_command route;
516 int ret = 0, i = 0;
517 /* Assumes port_ids have already been validated during adm_open */
518 int index = afe_get_port_index(copp_id);
519
520 pr_debug("%s: session 0x%x path:%d num_copps:%d port_id[0]:%d\n",
521 __func__, session_id, path, num_copps, port_id[0]);
522
523 route.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
524 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
525 route.hdr.pkt_size = sizeof(route);
526 route.hdr.src_svc = 0;
527 route.hdr.src_domain = APR_DOMAIN_APPS;
528 route.hdr.src_port = copp_id;
529 route.hdr.dest_svc = APR_SVC_ADM;
530 route.hdr.dest_domain = APR_DOMAIN_ADSP;
531 route.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
532 route.hdr.token = copp_id;
533 route.hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS;
534 route.num_sessions = 1;
535 route.session[0].id = session_id;
536 route.session[0].num_copps = num_copps;
537
538 for (i = 0; i < num_copps; i++) {
539 int tmp;
540 tmp = afe_get_port_index(port_id[i]);
541
542 pr_debug("%s: port_id[%d]: %d, index: %d\n", __func__, i,
543 port_id[i], tmp);
544
545 route.session[0].copp_id[i] =
546 atomic_read(&this_adm.copp_id[tmp]);
547 }
548 if (num_copps % 2)
549 route.session[0].copp_id[i] = 0;
550
551 switch (path) {
552 case 0x1:
553 route.path = AUDIO_RX;
554 break;
555 case 0x2:
556 case 0x3:
557 route.path = AUDIO_TX;
558 break;
559 default:
560 pr_err("%s: Wrong path set[%d]\n", __func__, path);
561 break;
562 }
563 atomic_set(&this_adm.copp_stat[index], 0);
564
565 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&route);
566 if (ret < 0) {
567 pr_err("%s: ADM routing for port %d failed\n",
568 __func__, port_id[0]);
569 ret = -EINVAL;
570 goto fail_cmd;
571 }
572 ret = wait_event_timeout(this_adm.wait,
573 atomic_read(&this_adm.copp_stat[index]),
574 msecs_to_jiffies(TIMEOUT_MS));
575 if (!ret) {
576 pr_err("%s: ADM cmd Route failed for port %d\n",
577 __func__, port_id[0]);
578 ret = -EINVAL;
579 goto fail_cmd;
580 }
581
582 for (i = 0; i < num_copps; i++)
583 send_adm_cal(port_id[i], path);
584
Ben Romberger974a40d2011-07-18 15:08:21 -0700585#ifdef CONFIG_MSM8X60_RTAC
586 for (i = 0; i < num_copps; i++)
587 rtac_add_adm_device(port_id[i], atomic_read(&this_adm.copp_id
588 [afe_get_port_index(port_id[i])]),
589 path, session_id);
590#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700591 return 0;
592
593fail_cmd:
594
595 return ret;
596}
597
598int adm_memory_map_regions(uint32_t *buf_add, uint32_t mempool_id,
599 uint32_t *bufsz, uint32_t bufcnt)
600{
601 struct adm_cmd_memory_map_regions *mmap_regions = NULL;
602 struct adm_memory_map_regions *mregions = NULL;
603 void *mmap_region_cmd = NULL;
604 void *payload = NULL;
605 int ret = 0;
606 int i = 0;
607 int cmd_size = 0;
608
609 pr_info("%s\n", __func__);
610 if (this_adm.apr == NULL) {
611 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
612 0xFFFFFFFF, &this_adm);
613 if (this_adm.apr == NULL) {
614 pr_err("%s: Unable to register ADM\n", __func__);
615 ret = -ENODEV;
616 return ret;
617 }
618#ifdef CONFIG_MSM8X60_RTAC
619 rtac_set_adm_handle(this_adm.apr);
620#endif
621 }
622
623 cmd_size = sizeof(struct adm_cmd_memory_map_regions)
624 + sizeof(struct adm_memory_map_regions) * bufcnt;
625
626 mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
627 if (!mmap_region_cmd) {
628 pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
629 return -ENOMEM;
630 }
631 mmap_regions = (struct adm_cmd_memory_map_regions *)mmap_region_cmd;
632 mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
633 APR_HDR_LEN(APR_HDR_SIZE),
634 APR_PKT_VER);
635 mmap_regions->hdr.pkt_size = cmd_size;
636 mmap_regions->hdr.src_port = 0;
637 mmap_regions->hdr.dest_port = 0;
638 mmap_regions->hdr.token = 0;
639 mmap_regions->hdr.opcode = ADM_CMD_MEMORY_MAP_REGIONS;
640 mmap_regions->mempool_id = mempool_id & 0x00ff;
641 mmap_regions->nregions = bufcnt & 0x00ff;
642 pr_debug("%s: map_regions->nregions = %d\n", __func__,
643 mmap_regions->nregions);
644 payload = ((u8 *) mmap_region_cmd +
645 sizeof(struct adm_cmd_memory_map_regions));
646 mregions = (struct adm_memory_map_regions *)payload;
647
648 for (i = 0; i < bufcnt; i++) {
649 mregions->phys = buf_add[i];
650 mregions->buf_size = bufsz[i];
651 ++mregions;
652 }
653
654 atomic_set(&this_adm.copp_stat[0], 0);
655 ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd);
656 if (ret < 0) {
657 pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
658 mmap_regions->hdr.opcode, ret);
659 ret = -EINVAL;
660 goto fail_cmd;
661 }
662
663 ret = wait_event_timeout(this_adm.wait,
664 atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
665 if (!ret) {
666 pr_err("%s: timeout. waited for memory_map\n", __func__);
667 ret = -EINVAL;
668 goto fail_cmd;
669 }
670fail_cmd:
671 kfree(mmap_region_cmd);
672 return ret;
673}
674
675int adm_memory_unmap_regions(uint32_t *buf_add, uint32_t *bufsz,
676 uint32_t bufcnt)
677{
678 struct adm_cmd_memory_unmap_regions *unmap_regions = NULL;
679 struct adm_memory_unmap_regions *mregions = NULL;
680 void *unmap_region_cmd = NULL;
681 void *payload = NULL;
682 int ret = 0;
683 int i = 0;
684 int cmd_size = 0;
685
686 pr_info("%s\n", __func__);
687
688 if (this_adm.apr == NULL) {
689 pr_err("%s APR handle NULL\n", __func__);
690 return -EINVAL;
691 }
692
693 cmd_size = sizeof(struct adm_cmd_memory_unmap_regions)
694 + sizeof(struct adm_memory_unmap_regions) * bufcnt;
695
696 unmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
697 if (!unmap_region_cmd) {
698 pr_err("%s: allocate unmap_region_cmd failed\n", __func__);
699 return -ENOMEM;
700 }
701 unmap_regions = (struct adm_cmd_memory_unmap_regions *)
702 unmap_region_cmd;
703 unmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
704 APR_HDR_LEN(APR_HDR_SIZE),
705 APR_PKT_VER);
706 unmap_regions->hdr.pkt_size = cmd_size;
707 unmap_regions->hdr.src_port = 0;
708 unmap_regions->hdr.dest_port = 0;
709 unmap_regions->hdr.token = 0;
710 unmap_regions->hdr.opcode = ADM_CMD_MEMORY_UNMAP_REGIONS;
711 unmap_regions->nregions = bufcnt & 0x00ff;
712 unmap_regions->reserved = 0;
713 pr_debug("%s: unmap_regions->nregions = %d\n", __func__,
714 unmap_regions->nregions);
715 payload = ((u8 *) unmap_region_cmd +
716 sizeof(struct adm_cmd_memory_unmap_regions));
717 mregions = (struct adm_memory_unmap_regions *)payload;
718
719 for (i = 0; i < bufcnt; i++) {
720 mregions->phys = buf_add[i];
721 ++mregions;
722 }
723 atomic_set(&this_adm.copp_stat[0], 0);
724 ret = apr_send_pkt(this_adm.apr, (uint32_t *) unmap_region_cmd);
725 if (ret < 0) {
726 pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
727 unmap_regions->hdr.opcode, ret);
728 ret = -EINVAL;
729 goto fail_cmd;
730 }
731
732 ret = wait_event_timeout(this_adm.wait,
733 atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
734 if (!ret) {
735 pr_err("%s: timeout. waited for memory_unmap\n", __func__);
736 ret = -EINVAL;
737 goto fail_cmd;
738 }
739fail_cmd:
740 kfree(unmap_region_cmd);
741 return ret;
742}
743
744#ifdef CONFIG_MSM8X60_RTAC
Ben Romberger974a40d2011-07-18 15:08:21 -0700745int adm_get_copp_id(int port_index)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700746{
747 pr_debug("%s\n", __func__);
748
Ben Romberger974a40d2011-07-18 15:08:21 -0700749 if (port_index < 0) {
750 pr_err("%s: invalid port_id = %d\n", __func__, port_index);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700751 return -EINVAL;
752 }
753
Ben Romberger974a40d2011-07-18 15:08:21 -0700754 return atomic_read(&this_adm.copp_id[port_index]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700755}
756#endif
757
758int adm_close(int port_id)
759{
760 struct apr_hdr close;
761
762 int ret = 0;
763 int index = afe_get_port_index(port_id);
Bharath Ramachandramurthy51a86212011-07-29 12:43:43 -0700764 if (afe_validate_port(port_id) < 0)
765 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700766
767 pr_info("%s port_id=%d index %d\n", __func__, port_id, index);
768
769 if (!(atomic_read(&this_adm.copp_cnt[index]))) {
770 pr_err("%s: copp count for port[%d]is 0\n", __func__, port_id);
771
772 goto fail_cmd;
773 }
774 atomic_dec(&this_adm.copp_cnt[index]);
775 if (!(atomic_read(&this_adm.copp_cnt[index]))) {
776
777 close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
778 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
779 close.pkt_size = sizeof(close);
780 close.src_svc = APR_SVC_ADM;
781 close.src_domain = APR_DOMAIN_APPS;
782 close.src_port = port_id;
783 close.dest_svc = APR_SVC_ADM;
784 close.dest_domain = APR_DOMAIN_ADSP;
785 close.dest_port = atomic_read(&this_adm.copp_id[index]);
786 close.token = port_id;
787 close.opcode = ADM_CMD_COPP_CLOSE;
788
789 atomic_set(&this_adm.copp_id[index], RESET_COPP_ID);
790 atomic_set(&this_adm.copp_stat[index], 0);
791
792
793 pr_debug("%s:coppid %d portid=%d index=%d coppcnt=%d\n",
794 __func__,
795 atomic_read(&this_adm.copp_id[index]),
796 port_id, index,
797 atomic_read(&this_adm.copp_cnt[index]));
798
799 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close);
800 if (ret < 0) {
801 pr_err("%s ADM close failed\n", __func__);
802 ret = -EINVAL;
803 goto fail_cmd;
804 }
805
806 ret = wait_event_timeout(this_adm.wait,
807 atomic_read(&this_adm.copp_stat[index]),
808 msecs_to_jiffies(TIMEOUT_MS));
809 if (!ret) {
810 pr_err("%s: ADM cmd Route failed for port %d\n",
811 __func__, port_id);
812 ret = -EINVAL;
813 goto fail_cmd;
814 }
815
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700816 }
817
818fail_cmd:
819 return ret;
820}
821
822static int __init adm_init(void)
823{
824 int i = 0;
825 init_waitqueue_head(&this_adm.wait);
826 this_adm.apr = NULL;
827
828 for (i = 0; i < AFE_MAX_PORTS; i++) {
829 atomic_set(&this_adm.copp_id[i], RESET_COPP_ID);
830 atomic_set(&this_adm.copp_cnt[i], 0);
831 atomic_set(&this_adm.copp_stat[i], 0);
832 }
833 return 0;
834}
835
836device_initcall(adm_init);