blob: 5c1dfd8fa2c65320bd1f24ba2d69d534cde4f962 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/wait.h>
15#include <linux/sched.h>
16#include <linux/jiffies.h>
17#include <linux/uaccess.h>
18#include <linux/atomic.h>
19#include <linux/bitops.h>
Ben Rombergerfce8f512011-07-18 16:46:09 -070020
21#include <mach/qdsp6v2/audio_dev_ctl.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include <mach/qdsp6v2/audio_acdb.h>
Ben Rombergerfce8f512011-07-18 16:46:09 -070023#include <mach/qdsp6v2/rtac.h>
24
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#include <sound/apr_audio.h>
26#include <sound/q6afe.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027
28#define TIMEOUT_MS 1000
29#define AUDIO_RX 0x0
30#define AUDIO_TX 0x1
31#define ASM_MAX_SESSION 0x8 /* To do: define in a header */
32#define RESET_COPP_ID 99
33#define INVALID_COPP_ID 0xFF
34
35struct adm_ctl {
36 void *apr;
37 atomic_t copp_id[AFE_MAX_PORTS];
38 atomic_t copp_cnt[AFE_MAX_PORTS];
39 atomic_t copp_stat[AFE_MAX_PORTS];
40 unsigned long sessions[AFE_MAX_PORTS];
41 wait_queue_head_t wait;
42};
43
44static struct adm_ctl this_adm;
45
46static int32_t adm_callback(struct apr_client_data *data, void *priv)
47{
48 uint32_t *payload;
49 int i, index;
50 payload = data->payload;
51
52 if (data->opcode == RESET_EVENTS) {
53 pr_debug("adm_callback: Reset event is received: %d %d apr[%p]\n",
54 data->reset_event, data->reset_proc,
55 this_adm.apr);
56 if (this_adm.apr) {
57 apr_reset(this_adm.apr);
58 for (i = 0; i < AFE_MAX_PORTS; i++) {
59 atomic_set(&this_adm.copp_id[i],
60 RESET_COPP_ID);
61 atomic_set(&this_adm.copp_cnt[i], 0);
62 atomic_set(&this_adm.copp_stat[i], 0);
63 }
64 this_adm.apr = NULL;
65 }
66 return 0;
67 }
68
69 pr_debug("%s: code = 0x%x %x %x size = %d\n", __func__,
70 data->opcode, payload[0], payload[1],
71 data->payload_size);
72
73 if (data->payload_size) {
74 index = afe_get_port_index(data->token);
75 pr_debug("%s: Port ID %d, index %d\n", __func__,
76 data->token, index);
77
78 if (data->opcode == APR_BASIC_RSP_RESULT) {
79 pr_debug("APR_BASIC_RSP_RESULT\n");
80 switch (payload[0]) {
81 case ADM_CMD_SET_PARAMS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082 if (rtac_make_adm_callback(payload,
83 data->payload_size))
84 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085 case ADM_CMD_COPP_CLOSE:
86 case ADM_CMD_MEMORY_MAP:
87 case ADM_CMD_MEMORY_UNMAP:
88 case ADM_CMD_MEMORY_MAP_REGIONS:
89 case ADM_CMD_MEMORY_UNMAP_REGIONS:
90 case ADM_CMD_MATRIX_MAP_ROUTINGS:
91 pr_debug("ADM_CMD_MATRIX_MAP_ROUTINGS\n");
92 atomic_set(&this_adm.copp_stat[index], 1);
93 wake_up(&this_adm.wait);
94 break;
95 default:
96 pr_err("%s: Unknown Cmd: 0x%x\n", __func__,
97 payload[0]);
98 break;
99 }
100 return 0;
101 }
102
103 switch (data->opcode) {
104 case ADM_CMDRSP_COPP_OPEN: {
105 struct adm_copp_open_respond *open = data->payload;
106 if (open->copp_id == INVALID_COPP_ID) {
107 pr_err("%s: invalid coppid rxed %d\n",
108 __func__, open->copp_id);
109 atomic_set(&this_adm.copp_stat[index], 1);
110 wake_up(&this_adm.wait);
111 break;
112 }
113 atomic_set(&this_adm.copp_id[index], open->copp_id);
114 atomic_set(&this_adm.copp_stat[index], 1);
115 pr_debug("%s: coppid rxed=%d\n", __func__,
116 open->copp_id);
117 wake_up(&this_adm.wait);
118 }
119 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700120 case ADM_CMDRSP_GET_PARAMS:
Swaminathan Sathappan88163a72011-08-01 16:01:14 -0700121 pr_debug("%s: ADM_CMDRSP_GET_PARAMS\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122 rtac_make_adm_callback(payload,
123 data->payload_size);
124 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700125 default:
126 pr_err("%s: Unknown cmd:0x%x\n", __func__,
127 data->opcode);
128 break;
129 }
130 }
131 return 0;
132}
133
134void send_cal(int port_id, struct acdb_cal_block *aud_cal)
135{
136 s32 result;
137 struct adm_set_params_command adm_params;
138 int index = afe_get_port_index(port_id);
139
140 pr_debug("%s: Port id %d, index %d\n", __func__, port_id, index);
141
142 if (!aud_cal || aud_cal->cal_size == 0) {
143 pr_err("%s: No calibration data to send!\n", __func__);
144 goto done;
145 }
146
147 adm_params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
148 APR_HDR_LEN(20), APR_PKT_VER);
149 adm_params.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
150 sizeof(adm_params));
151 adm_params.hdr.src_svc = APR_SVC_ADM;
152 adm_params.hdr.src_domain = APR_DOMAIN_APPS;
153 adm_params.hdr.src_port = port_id;
154 adm_params.hdr.dest_svc = APR_SVC_ADM;
155 adm_params.hdr.dest_domain = APR_DOMAIN_ADSP;
156 adm_params.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
157 adm_params.hdr.token = port_id;
158 adm_params.hdr.opcode = ADM_CMD_SET_PARAMS;
159 adm_params.payload = aud_cal->cal_paddr;
160 adm_params.payload_size = aud_cal->cal_size;
161
162 atomic_set(&this_adm.copp_stat[index], 0);
163 pr_debug("%s: Sending SET_PARAMS payload = 0x%x, size = %d\n",
164 __func__, adm_params.payload, adm_params.payload_size);
165 result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params);
166 if (result < 0) {
167 pr_err("%s: Set params failed port = %d payload = 0x%x\n",
168 __func__, port_id, aud_cal->cal_paddr);
169 goto done;
170 }
171 /* Wait for the callback */
172 result = wait_event_timeout(this_adm.wait,
173 atomic_read(&this_adm.copp_stat[index]),
174 msecs_to_jiffies(TIMEOUT_MS));
175 if (!result)
176 pr_err("%s: Set params timed out port = %d, payload = 0x%x\n",
177 __func__, port_id, aud_cal->cal_paddr);
178done:
179 return;
180}
181
182void send_adm_cal(int port_id, int path)
183{
184 s32 acdb_path;
185 struct acdb_cal_block aud_cal;
186
187 pr_debug("%s\n", __func__);
188
189 /* Maps audio_dev_ctrl path definition to ACDB definition */
190 acdb_path = path - 1;
191 if ((acdb_path >= NUM_AUDPROC_BUFFERS) ||
192 (acdb_path < 0)) {
193 pr_err("%s: Path is not RX or TX, path = %d\n",
194 __func__, path);
195 goto done;
196 }
197
198 pr_debug("%s: Sending audproc cal\n", __func__);
199 get_audproc_cal(acdb_path, &aud_cal);
200 send_cal(port_id, &aud_cal);
201
202 pr_debug("%s: Sending audvol cal\n", __func__);
203 get_audvol_cal(acdb_path, &aud_cal);
204 send_cal(port_id, &aud_cal);
205done:
206 return;
207}
208
209/* This function issues routing command of ASM stream
210 * to ADM mixer associated with a particular AFE port
211 */
212int adm_cmd_map(int port_id, int session_id)
213{
214 struct adm_routings_command route;
215 int ret = 0;
216 int index = afe_get_port_index(port_id);
217
218 pr_debug("%s: port %x session %x\n", __func__, port_id, session_id);
219
220 if (!atomic_read(&this_adm.copp_cnt[index]))
221 return 0;
222
223 route.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
224 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
225 route.hdr.pkt_size = sizeof(route);
226 route.hdr.src_svc = 0;
227 route.hdr.src_domain = APR_DOMAIN_APPS;
228 route.hdr.src_port = port_id;
229 route.hdr.dest_svc = APR_SVC_ADM;
230 route.hdr.dest_domain = APR_DOMAIN_ADSP;
231 route.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
232 route.hdr.token = port_id;
233 route.hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS;
234 route.num_sessions = 1;
235 route.session[0].id = session_id;
236 route.session[0].num_copps = 1;
237 route.session[0].copp_id[0] =
238 atomic_read(&this_adm.copp_id[index]);
239
240 /* This rule can change */
241 if ((port_id & 0x1))
242 route.path = AUDIO_TX;
243 else
244 route.path = AUDIO_RX;
245
246 atomic_set(&this_adm.copp_stat[index], 0);
247
248 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&route);
249 if (ret < 0) {
250 pr_err("%s: ADM routing for port %d failed\n",
251 __func__, port_id);
252 ret = -EINVAL;
253 goto fail_cmd;
254 }
255 ret = wait_event_timeout(this_adm.wait,
256 atomic_read(&this_adm.copp_stat[index]),
257 msecs_to_jiffies(TIMEOUT_MS));
258 if (!ret) {
259 pr_err("%s: ADM cmd Route failed for port %d\n",
260 __func__, port_id);
261 ret = -EINVAL;
262 }
263
Ben Rombergera8733902011-08-11 16:23:54 -0700264 /* have to convert path to dev ctrl standard */
265 send_adm_cal(port_id, (route.path + 1));
Ben Romberger974a40d2011-07-18 15:08:21 -0700266 rtac_add_adm_device(port_id, atomic_read(&this_adm.copp_id[index]),
267 (route.path + 1), session_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268fail_cmd:
269 return ret;
270}
271
272/* This function establish routing of ASM stream to a particular
273 * ADM mixer that is routed to a particular hardware port
274 * session id must be in range of 0 ~ 31.
275 */
276int adm_route_session(int port_id, uint session_id, int set)
277{
278 int rc = 0;
279 int index;
280
281 pr_debug("%s: port %x session %x set %x\n", __func__,
282 port_id, session_id, set);
283
284 index = afe_get_port_index(port_id);
285
286 if (index >= AFE_MAX_PORTS) {
287 pr_err("%s port idi[%d] out of limit[%d]\n", __func__,
288 port_id, AFE_MAX_PORTS);
289 return -ENODEV;
290 }
291
292 if (set) {
293 set_bit(session_id, &this_adm.sessions[index]);
294 rc = adm_cmd_map(port_id, session_id); /* not thread safe */
295 } else /* Not sure how to deroute yet */
296 clear_bit(session_id, &this_adm.sessions[index]);
297
298 return rc;
299}
300
301/* This function instantiates a mixer in QDSP6 audio path for
302 * given audio hardware port. Topology should be made part
303 * of audio calibration
304 */
305int adm_open_mixer(int port_id, int path, int rate,
306 int channel_mode, int topology) {
307 struct adm_copp_open_command open;
308 int ret = 0;
309 u32 i;
310 int index;
311
312 pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__,
313 port_id, path, rate, channel_mode);
314
315 if (afe_validate_port(port_id) < 0) {
316 pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
317 return -ENODEV;
318 }
319
320 index = afe_get_port_index(port_id);
321 if (this_adm.apr == NULL) {
322 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
323 0xFFFFFFFF, &this_adm);
324 if (this_adm.apr == NULL) {
325 pr_err("%s: Unable to register ADM\n", __func__);
326 ret = -ENODEV;
327 return ret;
328 }
Ben Romberger974a40d2011-07-18 15:08:21 -0700329 rtac_set_adm_handle(this_adm.apr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700330 }
331
332 if (atomic_read(&this_adm.copp_cnt[index]) == 0) {
333
334 open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
335 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
336 open.hdr.pkt_size = sizeof(open);
337 open.hdr.src_svc = APR_SVC_ADM;
338 open.hdr.src_domain = APR_DOMAIN_APPS;
339 open.hdr.src_port = port_id;
340 open.hdr.dest_svc = APR_SVC_ADM;
341 open.hdr.dest_domain = APR_DOMAIN_ADSP;
342 open.hdr.dest_port = port_id;
343 open.hdr.token = port_id;
344 open.hdr.opcode = ADM_CMD_COPP_OPEN;
345
346 open.mode = path;
347 open.endpoint_id1 = port_id;
348 open.endpoint_id2 = 0xFFFF;
349
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700350 /* convert path to acdb path */
Ben Romberger974a40d2011-07-18 15:08:21 -0700351 if (path == ADM_PATH_PLAYBACK)
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700352 open.topology_id = get_adm_rx_topology();
Jay Wang4fa2ee42011-07-18 00:21:22 -0700353 else {
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700354 open.topology_id = get_adm_tx_topology();
Jay Wang4fa2ee42011-07-18 00:21:22 -0700355 if ((open.topology_id ==
356 VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
357 (open.topology_id ==
358 VPM_TX_DM_FLUENCE_COPP_TOPOLOGY))
359 rate = 16000;
360 }
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700361
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362 if (open.topology_id == 0)
363 open.topology_id = topology;
364
365 open.channel_config = channel_mode & 0x00FF;
366 open.rate = rate;
367
368 pr_debug("%s: channel_config=%d port_id=%d rate=%d\
369 topology_id=0x%X\n", __func__, open.channel_config,\
370 open.endpoint_id1, open.rate,\
371 open.topology_id);
372
373 atomic_set(&this_adm.copp_stat[index], 0);
374
375 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
376 if (ret < 0) {
377 pr_err("%s:ADM enable for port %d failed\n",
378 __func__, port_id);
379 ret = -EINVAL;
380 goto fail_cmd;
381 }
382 /* Wait for the callback with copp id */
383 ret = wait_event_timeout(this_adm.wait,
384 atomic_read(&this_adm.copp_stat[index]),
385 msecs_to_jiffies(TIMEOUT_MS));
386 if (!ret) {
387 pr_err("%s ADM open failed for port %d\n", __func__,
388 port_id);
389 ret = -EINVAL;
390 goto fail_cmd;
391 }
392 }
393 atomic_inc(&this_adm.copp_cnt[index]);
394
395 /* Set up routing for cached session */
396 for (i = find_first_bit(&this_adm.sessions[index], ASM_MAX_SESSION);
397 i < ASM_MAX_SESSION; i = find_next_bit(&this_adm.sessions[index],
398 ASM_MAX_SESSION, i + 1))
399 adm_cmd_map(port_id, i); /* Not thread safe */
400
401fail_cmd:
402 return ret;
403}
404
405int adm_open(int port_id, int path, int rate, int channel_mode, int topology)
406{
407 struct adm_copp_open_command open;
408 int ret = 0;
409 int index;
410
411 pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__,
412 port_id, path, rate, channel_mode);
413
414 if (afe_validate_port(port_id) < 0) {
415 pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
416 return -ENODEV;
417 }
418
419 index = afe_get_port_index(port_id);
420 pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);
421
422 if (this_adm.apr == NULL) {
423 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
424 0xFFFFFFFF, &this_adm);
425 if (this_adm.apr == NULL) {
426 pr_err("%s: Unable to register ADM\n", __func__);
427 ret = -ENODEV;
428 return ret;
429 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430 rtac_set_adm_handle(this_adm.apr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700431 }
432
433
434 /* Create a COPP if port id are not enabled */
435 if (atomic_read(&this_adm.copp_cnt[index]) == 0) {
436
437 open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
438 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
439 open.hdr.pkt_size = sizeof(open);
440 open.hdr.src_svc = APR_SVC_ADM;
441 open.hdr.src_domain = APR_DOMAIN_APPS;
442 open.hdr.src_port = port_id;
443 open.hdr.dest_svc = APR_SVC_ADM;
444 open.hdr.dest_domain = APR_DOMAIN_ADSP;
445 open.hdr.dest_port = port_id;
446 open.hdr.token = port_id;
447 open.hdr.opcode = ADM_CMD_COPP_OPEN;
448
449 open.mode = path;
450 open.endpoint_id1 = port_id;
451 open.endpoint_id2 = 0xFFFF;
452
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700453 /* convert path to acdb path */
Ben Romberger974a40d2011-07-18 15:08:21 -0700454 if (path == ADM_PATH_PLAYBACK)
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700455 open.topology_id = get_adm_rx_topology();
Jay Wang4fa2ee42011-07-18 00:21:22 -0700456 else {
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700457 open.topology_id = get_adm_tx_topology();
Jay Wang4fa2ee42011-07-18 00:21:22 -0700458 if ((open.topology_id ==
459 VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
460 (open.topology_id ==
461 VPM_TX_DM_FLUENCE_COPP_TOPOLOGY))
462 rate = 16000;
463 }
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700464
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465 if (open.topology_id == 0)
466 open.topology_id = topology;
467
468 open.channel_config = channel_mode & 0x00FF;
469 open.rate = rate;
470
471 pr_debug("%s: channel_config=%d port_id=%d rate=%d\
472 topology_id=0x%X\n", __func__, open.channel_config,\
473 open.endpoint_id1, open.rate,\
474 open.topology_id);
475
476 atomic_set(&this_adm.copp_stat[index], 0);
477
478 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
479 if (ret < 0) {
480 pr_err("%s:ADM enable for port %d failed\n",
481 __func__, port_id);
482 ret = -EINVAL;
483 goto fail_cmd;
484 }
485 /* Wait for the callback with copp id */
486 ret = wait_event_timeout(this_adm.wait,
487 atomic_read(&this_adm.copp_stat[index]),
488 msecs_to_jiffies(TIMEOUT_MS));
489 if (!ret) {
490 pr_err("%s ADM open failed for port %d\n", __func__,
491 port_id);
492 ret = -EINVAL;
493 goto fail_cmd;
494 }
495 }
496 atomic_inc(&this_adm.copp_cnt[index]);
497 return 0;
498
499fail_cmd:
500
501 return ret;
502}
503
504int adm_matrix_map(int session_id, int path, int num_copps,
505 unsigned int *port_id, int copp_id)
506{
507 struct adm_routings_command route;
508 int ret = 0, i = 0;
509 /* Assumes port_ids have already been validated during adm_open */
510 int index = afe_get_port_index(copp_id);
511
512 pr_debug("%s: session 0x%x path:%d num_copps:%d port_id[0]:%d\n",
513 __func__, session_id, path, num_copps, port_id[0]);
514
515 route.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
516 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
517 route.hdr.pkt_size = sizeof(route);
518 route.hdr.src_svc = 0;
519 route.hdr.src_domain = APR_DOMAIN_APPS;
520 route.hdr.src_port = copp_id;
521 route.hdr.dest_svc = APR_SVC_ADM;
522 route.hdr.dest_domain = APR_DOMAIN_ADSP;
523 route.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
524 route.hdr.token = copp_id;
525 route.hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS;
526 route.num_sessions = 1;
527 route.session[0].id = session_id;
528 route.session[0].num_copps = num_copps;
529
530 for (i = 0; i < num_copps; i++) {
531 int tmp;
532 tmp = afe_get_port_index(port_id[i]);
533
534 pr_debug("%s: port_id[%d]: %d, index: %d\n", __func__, i,
535 port_id[i], tmp);
536
537 route.session[0].copp_id[i] =
538 atomic_read(&this_adm.copp_id[tmp]);
539 }
540 if (num_copps % 2)
541 route.session[0].copp_id[i] = 0;
542
543 switch (path) {
544 case 0x1:
545 route.path = AUDIO_RX;
546 break;
547 case 0x2:
548 case 0x3:
549 route.path = AUDIO_TX;
550 break;
551 default:
552 pr_err("%s: Wrong path set[%d]\n", __func__, path);
553 break;
554 }
555 atomic_set(&this_adm.copp_stat[index], 0);
556
557 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&route);
558 if (ret < 0) {
559 pr_err("%s: ADM routing for port %d failed\n",
560 __func__, port_id[0]);
561 ret = -EINVAL;
562 goto fail_cmd;
563 }
564 ret = wait_event_timeout(this_adm.wait,
565 atomic_read(&this_adm.copp_stat[index]),
566 msecs_to_jiffies(TIMEOUT_MS));
567 if (!ret) {
568 pr_err("%s: ADM cmd Route failed for port %d\n",
569 __func__, port_id[0]);
570 ret = -EINVAL;
571 goto fail_cmd;
572 }
573
574 for (i = 0; i < num_copps; i++)
575 send_adm_cal(port_id[i], path);
576
Ben Romberger974a40d2011-07-18 15:08:21 -0700577 for (i = 0; i < num_copps; i++)
578 rtac_add_adm_device(port_id[i], atomic_read(&this_adm.copp_id
579 [afe_get_port_index(port_id[i])]),
580 path, session_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581 return 0;
582
583fail_cmd:
584
585 return ret;
586}
587
588int adm_memory_map_regions(uint32_t *buf_add, uint32_t mempool_id,
589 uint32_t *bufsz, uint32_t bufcnt)
590{
591 struct adm_cmd_memory_map_regions *mmap_regions = NULL;
592 struct adm_memory_map_regions *mregions = NULL;
593 void *mmap_region_cmd = NULL;
594 void *payload = NULL;
595 int ret = 0;
596 int i = 0;
597 int cmd_size = 0;
598
599 pr_info("%s\n", __func__);
600 if (this_adm.apr == NULL) {
601 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
602 0xFFFFFFFF, &this_adm);
603 if (this_adm.apr == NULL) {
604 pr_err("%s: Unable to register ADM\n", __func__);
605 ret = -ENODEV;
606 return ret;
607 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700608 rtac_set_adm_handle(this_adm.apr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700609 }
610
611 cmd_size = sizeof(struct adm_cmd_memory_map_regions)
612 + sizeof(struct adm_memory_map_regions) * bufcnt;
613
614 mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
615 if (!mmap_region_cmd) {
616 pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
617 return -ENOMEM;
618 }
619 mmap_regions = (struct adm_cmd_memory_map_regions *)mmap_region_cmd;
620 mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
621 APR_HDR_LEN(APR_HDR_SIZE),
622 APR_PKT_VER);
623 mmap_regions->hdr.pkt_size = cmd_size;
624 mmap_regions->hdr.src_port = 0;
625 mmap_regions->hdr.dest_port = 0;
626 mmap_regions->hdr.token = 0;
627 mmap_regions->hdr.opcode = ADM_CMD_MEMORY_MAP_REGIONS;
628 mmap_regions->mempool_id = mempool_id & 0x00ff;
629 mmap_regions->nregions = bufcnt & 0x00ff;
630 pr_debug("%s: map_regions->nregions = %d\n", __func__,
631 mmap_regions->nregions);
632 payload = ((u8 *) mmap_region_cmd +
633 sizeof(struct adm_cmd_memory_map_regions));
634 mregions = (struct adm_memory_map_regions *)payload;
635
636 for (i = 0; i < bufcnt; i++) {
637 mregions->phys = buf_add[i];
638 mregions->buf_size = bufsz[i];
639 ++mregions;
640 }
641
642 atomic_set(&this_adm.copp_stat[0], 0);
643 ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd);
644 if (ret < 0) {
645 pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
646 mmap_regions->hdr.opcode, ret);
647 ret = -EINVAL;
648 goto fail_cmd;
649 }
650
651 ret = wait_event_timeout(this_adm.wait,
652 atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
653 if (!ret) {
654 pr_err("%s: timeout. waited for memory_map\n", __func__);
655 ret = -EINVAL;
656 goto fail_cmd;
657 }
658fail_cmd:
659 kfree(mmap_region_cmd);
660 return ret;
661}
662
663int adm_memory_unmap_regions(uint32_t *buf_add, uint32_t *bufsz,
664 uint32_t bufcnt)
665{
666 struct adm_cmd_memory_unmap_regions *unmap_regions = NULL;
667 struct adm_memory_unmap_regions *mregions = NULL;
668 void *unmap_region_cmd = NULL;
669 void *payload = NULL;
670 int ret = 0;
671 int i = 0;
672 int cmd_size = 0;
673
674 pr_info("%s\n", __func__);
675
676 if (this_adm.apr == NULL) {
677 pr_err("%s APR handle NULL\n", __func__);
678 return -EINVAL;
679 }
680
681 cmd_size = sizeof(struct adm_cmd_memory_unmap_regions)
682 + sizeof(struct adm_memory_unmap_regions) * bufcnt;
683
684 unmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
685 if (!unmap_region_cmd) {
686 pr_err("%s: allocate unmap_region_cmd failed\n", __func__);
687 return -ENOMEM;
688 }
689 unmap_regions = (struct adm_cmd_memory_unmap_regions *)
690 unmap_region_cmd;
691 unmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
692 APR_HDR_LEN(APR_HDR_SIZE),
693 APR_PKT_VER);
694 unmap_regions->hdr.pkt_size = cmd_size;
695 unmap_regions->hdr.src_port = 0;
696 unmap_regions->hdr.dest_port = 0;
697 unmap_regions->hdr.token = 0;
698 unmap_regions->hdr.opcode = ADM_CMD_MEMORY_UNMAP_REGIONS;
699 unmap_regions->nregions = bufcnt & 0x00ff;
700 unmap_regions->reserved = 0;
701 pr_debug("%s: unmap_regions->nregions = %d\n", __func__,
702 unmap_regions->nregions);
703 payload = ((u8 *) unmap_region_cmd +
704 sizeof(struct adm_cmd_memory_unmap_regions));
705 mregions = (struct adm_memory_unmap_regions *)payload;
706
707 for (i = 0; i < bufcnt; i++) {
708 mregions->phys = buf_add[i];
709 ++mregions;
710 }
711 atomic_set(&this_adm.copp_stat[0], 0);
712 ret = apr_send_pkt(this_adm.apr, (uint32_t *) unmap_region_cmd);
713 if (ret < 0) {
714 pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
715 unmap_regions->hdr.opcode, ret);
716 ret = -EINVAL;
717 goto fail_cmd;
718 }
719
720 ret = wait_event_timeout(this_adm.wait,
721 atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
722 if (!ret) {
723 pr_err("%s: timeout. waited for memory_unmap\n", __func__);
724 ret = -EINVAL;
725 goto fail_cmd;
726 }
727fail_cmd:
728 kfree(unmap_region_cmd);
729 return ret;
730}
731
Ben Romberger974a40d2011-07-18 15:08:21 -0700732int adm_get_copp_id(int port_index)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700733{
734 pr_debug("%s\n", __func__);
735
Ben Romberger974a40d2011-07-18 15:08:21 -0700736 if (port_index < 0) {
737 pr_err("%s: invalid port_id = %d\n", __func__, port_index);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738 return -EINVAL;
739 }
740
Ben Romberger974a40d2011-07-18 15:08:21 -0700741 return atomic_read(&this_adm.copp_id[port_index]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700742}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700743
744int adm_close(int port_id)
745{
746 struct apr_hdr close;
747
748 int ret = 0;
749 int index = afe_get_port_index(port_id);
Bharath Ramachandramurthy51a86212011-07-29 12:43:43 -0700750 if (afe_validate_port(port_id) < 0)
751 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700752
753 pr_info("%s port_id=%d index %d\n", __func__, port_id, index);
754
755 if (!(atomic_read(&this_adm.copp_cnt[index]))) {
756 pr_err("%s: copp count for port[%d]is 0\n", __func__, port_id);
757
758 goto fail_cmd;
759 }
760 atomic_dec(&this_adm.copp_cnt[index]);
761 if (!(atomic_read(&this_adm.copp_cnt[index]))) {
762
763 close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
764 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
765 close.pkt_size = sizeof(close);
766 close.src_svc = APR_SVC_ADM;
767 close.src_domain = APR_DOMAIN_APPS;
768 close.src_port = port_id;
769 close.dest_svc = APR_SVC_ADM;
770 close.dest_domain = APR_DOMAIN_ADSP;
771 close.dest_port = atomic_read(&this_adm.copp_id[index]);
772 close.token = port_id;
773 close.opcode = ADM_CMD_COPP_CLOSE;
774
775 atomic_set(&this_adm.copp_id[index], RESET_COPP_ID);
776 atomic_set(&this_adm.copp_stat[index], 0);
777
778
779 pr_debug("%s:coppid %d portid=%d index=%d coppcnt=%d\n",
780 __func__,
781 atomic_read(&this_adm.copp_id[index]),
782 port_id, index,
783 atomic_read(&this_adm.copp_cnt[index]));
784
785 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close);
786 if (ret < 0) {
787 pr_err("%s ADM close failed\n", __func__);
788 ret = -EINVAL;
789 goto fail_cmd;
790 }
791
792 ret = wait_event_timeout(this_adm.wait,
793 atomic_read(&this_adm.copp_stat[index]),
794 msecs_to_jiffies(TIMEOUT_MS));
795 if (!ret) {
796 pr_err("%s: ADM cmd Route failed for port %d\n",
797 __func__, port_id);
798 ret = -EINVAL;
799 goto fail_cmd;
800 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700801 }
802
803fail_cmd:
804 return ret;
805}
806
807static int __init adm_init(void)
808{
809 int i = 0;
810 init_waitqueue_head(&this_adm.wait);
811 this_adm.apr = NULL;
812
813 for (i = 0; i < AFE_MAX_PORTS; i++) {
814 atomic_set(&this_adm.copp_id[i], RESET_COPP_ID);
815 atomic_set(&this_adm.copp_cnt[i], 0);
816 atomic_set(&this_adm.copp_stat[i], 0);
817 }
818 return 0;
819}
820
821device_initcall(adm_init);