blob: baac49842df16a4c28c0fe0c416f5cfa354d61a4 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/wait.h>
15#include <linux/sched.h>
16#include <linux/jiffies.h>
17#include <linux/uaccess.h>
18#include <linux/atomic.h>
19#include <linux/bitops.h>
Ben Rombergerfce8f512011-07-18 16:46:09 -070020
21#include <mach/qdsp6v2/audio_dev_ctl.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include <mach/qdsp6v2/audio_acdb.h>
Ben Rombergerfce8f512011-07-18 16:46:09 -070023#include <mach/qdsp6v2/rtac.h>
24
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#include <sound/apr_audio.h>
26#include <sound/q6afe.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027
28#define TIMEOUT_MS 1000
29#define AUDIO_RX 0x0
30#define AUDIO_TX 0x1
31#define ASM_MAX_SESSION 0x8 /* To do: define in a header */
32#define RESET_COPP_ID 99
33#define INVALID_COPP_ID 0xFF
34
35struct adm_ctl {
36 void *apr;
37 atomic_t copp_id[AFE_MAX_PORTS];
38 atomic_t copp_cnt[AFE_MAX_PORTS];
39 atomic_t copp_stat[AFE_MAX_PORTS];
40 unsigned long sessions[AFE_MAX_PORTS];
41 wait_queue_head_t wait;
42};
43
44static struct adm_ctl this_adm;
45
46static int32_t adm_callback(struct apr_client_data *data, void *priv)
47{
48 uint32_t *payload;
49 int i, index;
50 payload = data->payload;
51
52 if (data->opcode == RESET_EVENTS) {
53 pr_debug("adm_callback: Reset event is received: %d %d apr[%p]\n",
54 data->reset_event, data->reset_proc,
55 this_adm.apr);
56 if (this_adm.apr) {
57 apr_reset(this_adm.apr);
58 for (i = 0; i < AFE_MAX_PORTS; i++) {
59 atomic_set(&this_adm.copp_id[i],
60 RESET_COPP_ID);
61 atomic_set(&this_adm.copp_cnt[i], 0);
62 atomic_set(&this_adm.copp_stat[i], 0);
63 }
64 this_adm.apr = NULL;
65 }
66 return 0;
67 }
68
69 pr_debug("%s: code = 0x%x %x %x size = %d\n", __func__,
70 data->opcode, payload[0], payload[1],
71 data->payload_size);
72
73 if (data->payload_size) {
74 index = afe_get_port_index(data->token);
75 pr_debug("%s: Port ID %d, index %d\n", __func__,
76 data->token, index);
77
78 if (data->opcode == APR_BASIC_RSP_RESULT) {
79 pr_debug("APR_BASIC_RSP_RESULT\n");
80 switch (payload[0]) {
81 case ADM_CMD_SET_PARAMS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082 if (rtac_make_adm_callback(payload,
83 data->payload_size))
84 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085 case ADM_CMD_COPP_CLOSE:
86 case ADM_CMD_MEMORY_MAP:
87 case ADM_CMD_MEMORY_UNMAP:
88 case ADM_CMD_MEMORY_MAP_REGIONS:
89 case ADM_CMD_MEMORY_UNMAP_REGIONS:
90 case ADM_CMD_MATRIX_MAP_ROUTINGS:
91 pr_debug("ADM_CMD_MATRIX_MAP_ROUTINGS\n");
92 atomic_set(&this_adm.copp_stat[index], 1);
93 wake_up(&this_adm.wait);
94 break;
95 default:
96 pr_err("%s: Unknown Cmd: 0x%x\n", __func__,
97 payload[0]);
98 break;
99 }
100 return 0;
101 }
102
103 switch (data->opcode) {
104 case ADM_CMDRSP_COPP_OPEN: {
105 struct adm_copp_open_respond *open = data->payload;
106 if (open->copp_id == INVALID_COPP_ID) {
107 pr_err("%s: invalid coppid rxed %d\n",
108 __func__, open->copp_id);
109 atomic_set(&this_adm.copp_stat[index], 1);
110 wake_up(&this_adm.wait);
111 break;
112 }
113 atomic_set(&this_adm.copp_id[index], open->copp_id);
114 atomic_set(&this_adm.copp_stat[index], 1);
115 pr_debug("%s: coppid rxed=%d\n", __func__,
116 open->copp_id);
117 wake_up(&this_adm.wait);
118 }
119 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700120 case ADM_CMDRSP_GET_PARAMS:
Swaminathan Sathappan88163a72011-08-01 16:01:14 -0700121 pr_debug("%s: ADM_CMDRSP_GET_PARAMS\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122 rtac_make_adm_callback(payload,
123 data->payload_size);
124 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700125 default:
126 pr_err("%s: Unknown cmd:0x%x\n", __func__,
127 data->opcode);
128 break;
129 }
130 }
131 return 0;
132}
133
134void send_cal(int port_id, struct acdb_cal_block *aud_cal)
135{
136 s32 result;
137 struct adm_set_params_command adm_params;
138 int index = afe_get_port_index(port_id);
139
140 pr_debug("%s: Port id %d, index %d\n", __func__, port_id, index);
141
142 if (!aud_cal || aud_cal->cal_size == 0) {
143 pr_err("%s: No calibration data to send!\n", __func__);
144 goto done;
145 }
146
147 adm_params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
148 APR_HDR_LEN(20), APR_PKT_VER);
149 adm_params.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
150 sizeof(adm_params));
151 adm_params.hdr.src_svc = APR_SVC_ADM;
152 adm_params.hdr.src_domain = APR_DOMAIN_APPS;
153 adm_params.hdr.src_port = port_id;
154 adm_params.hdr.dest_svc = APR_SVC_ADM;
155 adm_params.hdr.dest_domain = APR_DOMAIN_ADSP;
156 adm_params.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
157 adm_params.hdr.token = port_id;
158 adm_params.hdr.opcode = ADM_CMD_SET_PARAMS;
159 adm_params.payload = aud_cal->cal_paddr;
160 adm_params.payload_size = aud_cal->cal_size;
161
162 atomic_set(&this_adm.copp_stat[index], 0);
163 pr_debug("%s: Sending SET_PARAMS payload = 0x%x, size = %d\n",
164 __func__, adm_params.payload, adm_params.payload_size);
165 result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params);
166 if (result < 0) {
167 pr_err("%s: Set params failed port = %d payload = 0x%x\n",
168 __func__, port_id, aud_cal->cal_paddr);
169 goto done;
170 }
171 /* Wait for the callback */
172 result = wait_event_timeout(this_adm.wait,
173 atomic_read(&this_adm.copp_stat[index]),
174 msecs_to_jiffies(TIMEOUT_MS));
175 if (!result)
176 pr_err("%s: Set params timed out port = %d, payload = 0x%x\n",
177 __func__, port_id, aud_cal->cal_paddr);
178done:
179 return;
180}
181
182void send_adm_cal(int port_id, int path)
183{
184 s32 acdb_path;
185 struct acdb_cal_block aud_cal;
186
187 pr_debug("%s\n", __func__);
188
189 /* Maps audio_dev_ctrl path definition to ACDB definition */
190 acdb_path = path - 1;
191 if ((acdb_path >= NUM_AUDPROC_BUFFERS) ||
192 (acdb_path < 0)) {
193 pr_err("%s: Path is not RX or TX, path = %d\n",
194 __func__, path);
195 goto done;
196 }
197
198 pr_debug("%s: Sending audproc cal\n", __func__);
199 get_audproc_cal(acdb_path, &aud_cal);
200 send_cal(port_id, &aud_cal);
201
202 pr_debug("%s: Sending audvol cal\n", __func__);
203 get_audvol_cal(acdb_path, &aud_cal);
204 send_cal(port_id, &aud_cal);
205done:
206 return;
207}
208
209/* This function issues routing command of ASM stream
210 * to ADM mixer associated with a particular AFE port
211 */
212int adm_cmd_map(int port_id, int session_id)
213{
214 struct adm_routings_command route;
215 int ret = 0;
216 int index = afe_get_port_index(port_id);
217
218 pr_debug("%s: port %x session %x\n", __func__, port_id, session_id);
219
220 if (!atomic_read(&this_adm.copp_cnt[index]))
221 return 0;
222
223 route.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
224 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
225 route.hdr.pkt_size = sizeof(route);
226 route.hdr.src_svc = 0;
227 route.hdr.src_domain = APR_DOMAIN_APPS;
228 route.hdr.src_port = port_id;
229 route.hdr.dest_svc = APR_SVC_ADM;
230 route.hdr.dest_domain = APR_DOMAIN_ADSP;
231 route.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
232 route.hdr.token = port_id;
233 route.hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS;
234 route.num_sessions = 1;
235 route.session[0].id = session_id;
236 route.session[0].num_copps = 1;
237 route.session[0].copp_id[0] =
238 atomic_read(&this_adm.copp_id[index]);
239
240 /* This rule can change */
241 if ((port_id & 0x1))
242 route.path = AUDIO_TX;
243 else
244 route.path = AUDIO_RX;
245
246 atomic_set(&this_adm.copp_stat[index], 0);
247
248 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&route);
249 if (ret < 0) {
250 pr_err("%s: ADM routing for port %d failed\n",
251 __func__, port_id);
252 ret = -EINVAL;
253 goto fail_cmd;
254 }
255 ret = wait_event_timeout(this_adm.wait,
256 atomic_read(&this_adm.copp_stat[index]),
257 msecs_to_jiffies(TIMEOUT_MS));
258 if (!ret) {
259 pr_err("%s: ADM cmd Route failed for port %d\n",
260 __func__, port_id);
261 ret = -EINVAL;
262 }
263
Ben Rombergera8733902011-08-11 16:23:54 -0700264 /* have to convert path to dev ctrl standard */
265 send_adm_cal(port_id, (route.path + 1));
Ben Romberger974a40d2011-07-18 15:08:21 -0700266 rtac_add_adm_device(port_id, atomic_read(&this_adm.copp_id[index]),
267 (route.path + 1), session_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268fail_cmd:
269 return ret;
270}
271
272/* This function establish routing of ASM stream to a particular
273 * ADM mixer that is routed to a particular hardware port
274 * session id must be in range of 0 ~ 31.
275 */
276int adm_route_session(int port_id, uint session_id, int set)
277{
278 int rc = 0;
279 int index;
280
281 pr_debug("%s: port %x session %x set %x\n", __func__,
282 port_id, session_id, set);
283
Laxminath Kasam32657ec2011-08-01 19:26:57 +0530284 port_id = afe_convert_virtual_to_portid(port_id);
285
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700286 index = afe_get_port_index(port_id);
287
288 if (index >= AFE_MAX_PORTS) {
289 pr_err("%s port idi[%d] out of limit[%d]\n", __func__,
290 port_id, AFE_MAX_PORTS);
291 return -ENODEV;
292 }
293
294 if (set) {
295 set_bit(session_id, &this_adm.sessions[index]);
296 rc = adm_cmd_map(port_id, session_id); /* not thread safe */
297 } else /* Not sure how to deroute yet */
298 clear_bit(session_id, &this_adm.sessions[index]);
299
300 return rc;
301}
302
303/* This function instantiates a mixer in QDSP6 audio path for
304 * given audio hardware port. Topology should be made part
305 * of audio calibration
306 */
307int adm_open_mixer(int port_id, int path, int rate,
308 int channel_mode, int topology) {
309 struct adm_copp_open_command open;
310 int ret = 0;
311 u32 i;
312 int index;
313
314 pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__,
315 port_id, path, rate, channel_mode);
316
Laxminath Kasam32657ec2011-08-01 19:26:57 +0530317 port_id = afe_convert_virtual_to_portid(port_id);
318
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319 if (afe_validate_port(port_id) < 0) {
320 pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
321 return -ENODEV;
322 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700323 index = afe_get_port_index(port_id);
324 if (this_adm.apr == NULL) {
325 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
326 0xFFFFFFFF, &this_adm);
327 if (this_adm.apr == NULL) {
328 pr_err("%s: Unable to register ADM\n", __func__);
329 ret = -ENODEV;
330 return ret;
331 }
Ben Romberger974a40d2011-07-18 15:08:21 -0700332 rtac_set_adm_handle(this_adm.apr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700333 }
334
335 if (atomic_read(&this_adm.copp_cnt[index]) == 0) {
336
337 open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
338 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
339 open.hdr.pkt_size = sizeof(open);
340 open.hdr.src_svc = APR_SVC_ADM;
341 open.hdr.src_domain = APR_DOMAIN_APPS;
342 open.hdr.src_port = port_id;
343 open.hdr.dest_svc = APR_SVC_ADM;
344 open.hdr.dest_domain = APR_DOMAIN_ADSP;
345 open.hdr.dest_port = port_id;
346 open.hdr.token = port_id;
347 open.hdr.opcode = ADM_CMD_COPP_OPEN;
348
349 open.mode = path;
350 open.endpoint_id1 = port_id;
351 open.endpoint_id2 = 0xFFFF;
352
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700353 /* convert path to acdb path */
Ben Romberger974a40d2011-07-18 15:08:21 -0700354 if (path == ADM_PATH_PLAYBACK)
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700355 open.topology_id = get_adm_rx_topology();
Jay Wang4fa2ee42011-07-18 00:21:22 -0700356 else {
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700357 open.topology_id = get_adm_tx_topology();
Jay Wang4fa2ee42011-07-18 00:21:22 -0700358 if ((open.topology_id ==
359 VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
360 (open.topology_id ==
361 VPM_TX_DM_FLUENCE_COPP_TOPOLOGY))
362 rate = 16000;
363 }
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700364
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700365 if (open.topology_id == 0)
366 open.topology_id = topology;
367
368 open.channel_config = channel_mode & 0x00FF;
369 open.rate = rate;
370
371 pr_debug("%s: channel_config=%d port_id=%d rate=%d\
372 topology_id=0x%X\n", __func__, open.channel_config,\
373 open.endpoint_id1, open.rate,\
374 open.topology_id);
375
376 atomic_set(&this_adm.copp_stat[index], 0);
377
378 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
379 if (ret < 0) {
380 pr_err("%s:ADM enable for port %d failed\n",
381 __func__, port_id);
382 ret = -EINVAL;
383 goto fail_cmd;
384 }
385 /* Wait for the callback with copp id */
386 ret = wait_event_timeout(this_adm.wait,
387 atomic_read(&this_adm.copp_stat[index]),
388 msecs_to_jiffies(TIMEOUT_MS));
389 if (!ret) {
390 pr_err("%s ADM open failed for port %d\n", __func__,
391 port_id);
392 ret = -EINVAL;
393 goto fail_cmd;
394 }
395 }
396 atomic_inc(&this_adm.copp_cnt[index]);
397
398 /* Set up routing for cached session */
399 for (i = find_first_bit(&this_adm.sessions[index], ASM_MAX_SESSION);
400 i < ASM_MAX_SESSION; i = find_next_bit(&this_adm.sessions[index],
401 ASM_MAX_SESSION, i + 1))
402 adm_cmd_map(port_id, i); /* Not thread safe */
403
404fail_cmd:
405 return ret;
406}
407
408int adm_open(int port_id, int path, int rate, int channel_mode, int topology)
409{
410 struct adm_copp_open_command open;
411 int ret = 0;
412 int index;
413
414 pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__,
415 port_id, path, rate, channel_mode);
416
Laxminath Kasam32657ec2011-08-01 19:26:57 +0530417 port_id = afe_convert_virtual_to_portid(port_id);
418
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700419 if (afe_validate_port(port_id) < 0) {
420 pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
421 return -ENODEV;
422 }
423
424 index = afe_get_port_index(port_id);
425 pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);
426
427 if (this_adm.apr == NULL) {
428 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
429 0xFFFFFFFF, &this_adm);
430 if (this_adm.apr == NULL) {
431 pr_err("%s: Unable to register ADM\n", __func__);
432 ret = -ENODEV;
433 return ret;
434 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700435 rtac_set_adm_handle(this_adm.apr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700436 }
437
438
439 /* Create a COPP if port id are not enabled */
440 if (atomic_read(&this_adm.copp_cnt[index]) == 0) {
441
442 open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
443 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
444 open.hdr.pkt_size = sizeof(open);
445 open.hdr.src_svc = APR_SVC_ADM;
446 open.hdr.src_domain = APR_DOMAIN_APPS;
447 open.hdr.src_port = port_id;
448 open.hdr.dest_svc = APR_SVC_ADM;
449 open.hdr.dest_domain = APR_DOMAIN_ADSP;
450 open.hdr.dest_port = port_id;
451 open.hdr.token = port_id;
452 open.hdr.opcode = ADM_CMD_COPP_OPEN;
453
454 open.mode = path;
455 open.endpoint_id1 = port_id;
456 open.endpoint_id2 = 0xFFFF;
457
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700458 /* convert path to acdb path */
Ben Romberger974a40d2011-07-18 15:08:21 -0700459 if (path == ADM_PATH_PLAYBACK)
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700460 open.topology_id = get_adm_rx_topology();
Jay Wang4fa2ee42011-07-18 00:21:22 -0700461 else {
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700462 open.topology_id = get_adm_tx_topology();
Jay Wang4fa2ee42011-07-18 00:21:22 -0700463 if ((open.topology_id ==
464 VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
465 (open.topology_id ==
466 VPM_TX_DM_FLUENCE_COPP_TOPOLOGY))
467 rate = 16000;
468 }
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700469
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700470 if (open.topology_id == 0)
471 open.topology_id = topology;
472
473 open.channel_config = channel_mode & 0x00FF;
474 open.rate = rate;
475
476 pr_debug("%s: channel_config=%d port_id=%d rate=%d\
477 topology_id=0x%X\n", __func__, open.channel_config,\
478 open.endpoint_id1, open.rate,\
479 open.topology_id);
480
481 atomic_set(&this_adm.copp_stat[index], 0);
482
483 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
484 if (ret < 0) {
485 pr_err("%s:ADM enable for port %d failed\n",
486 __func__, port_id);
487 ret = -EINVAL;
488 goto fail_cmd;
489 }
490 /* Wait for the callback with copp id */
491 ret = wait_event_timeout(this_adm.wait,
492 atomic_read(&this_adm.copp_stat[index]),
493 msecs_to_jiffies(TIMEOUT_MS));
494 if (!ret) {
495 pr_err("%s ADM open failed for port %d\n", __func__,
496 port_id);
497 ret = -EINVAL;
498 goto fail_cmd;
499 }
500 }
501 atomic_inc(&this_adm.copp_cnt[index]);
502 return 0;
503
504fail_cmd:
505
506 return ret;
507}
508
509int adm_matrix_map(int session_id, int path, int num_copps,
510 unsigned int *port_id, int copp_id)
511{
512 struct adm_routings_command route;
513 int ret = 0, i = 0;
514 /* Assumes port_ids have already been validated during adm_open */
515 int index = afe_get_port_index(copp_id);
516
517 pr_debug("%s: session 0x%x path:%d num_copps:%d port_id[0]:%d\n",
518 __func__, session_id, path, num_copps, port_id[0]);
519
520 route.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
521 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
522 route.hdr.pkt_size = sizeof(route);
523 route.hdr.src_svc = 0;
524 route.hdr.src_domain = APR_DOMAIN_APPS;
525 route.hdr.src_port = copp_id;
526 route.hdr.dest_svc = APR_SVC_ADM;
527 route.hdr.dest_domain = APR_DOMAIN_ADSP;
528 route.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
529 route.hdr.token = copp_id;
530 route.hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS;
531 route.num_sessions = 1;
532 route.session[0].id = session_id;
533 route.session[0].num_copps = num_copps;
534
535 for (i = 0; i < num_copps; i++) {
536 int tmp;
Laxminath Kasam32657ec2011-08-01 19:26:57 +0530537 port_id[i] = afe_convert_virtual_to_portid(port_id[i]);
538
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700539 tmp = afe_get_port_index(port_id[i]);
540
541 pr_debug("%s: port_id[%d]: %d, index: %d\n", __func__, i,
542 port_id[i], tmp);
543
544 route.session[0].copp_id[i] =
545 atomic_read(&this_adm.copp_id[tmp]);
546 }
547 if (num_copps % 2)
548 route.session[0].copp_id[i] = 0;
549
550 switch (path) {
551 case 0x1:
552 route.path = AUDIO_RX;
553 break;
554 case 0x2:
555 case 0x3:
556 route.path = AUDIO_TX;
557 break;
558 default:
559 pr_err("%s: Wrong path set[%d]\n", __func__, path);
560 break;
561 }
562 atomic_set(&this_adm.copp_stat[index], 0);
563
564 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&route);
565 if (ret < 0) {
566 pr_err("%s: ADM routing for port %d failed\n",
567 __func__, port_id[0]);
568 ret = -EINVAL;
569 goto fail_cmd;
570 }
571 ret = wait_event_timeout(this_adm.wait,
572 atomic_read(&this_adm.copp_stat[index]),
573 msecs_to_jiffies(TIMEOUT_MS));
574 if (!ret) {
575 pr_err("%s: ADM cmd Route failed for port %d\n",
576 __func__, port_id[0]);
577 ret = -EINVAL;
578 goto fail_cmd;
579 }
580
581 for (i = 0; i < num_copps; i++)
582 send_adm_cal(port_id[i], path);
583
Ben Romberger974a40d2011-07-18 15:08:21 -0700584 for (i = 0; i < num_copps; i++)
585 rtac_add_adm_device(port_id[i], atomic_read(&this_adm.copp_id
586 [afe_get_port_index(port_id[i])]),
587 path, session_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700588 return 0;
589
590fail_cmd:
591
592 return ret;
593}
594
595int adm_memory_map_regions(uint32_t *buf_add, uint32_t mempool_id,
596 uint32_t *bufsz, uint32_t bufcnt)
597{
598 struct adm_cmd_memory_map_regions *mmap_regions = NULL;
599 struct adm_memory_map_regions *mregions = NULL;
600 void *mmap_region_cmd = NULL;
601 void *payload = NULL;
602 int ret = 0;
603 int i = 0;
604 int cmd_size = 0;
605
606 pr_info("%s\n", __func__);
607 if (this_adm.apr == NULL) {
608 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
609 0xFFFFFFFF, &this_adm);
610 if (this_adm.apr == NULL) {
611 pr_err("%s: Unable to register ADM\n", __func__);
612 ret = -ENODEV;
613 return ret;
614 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700615 rtac_set_adm_handle(this_adm.apr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616 }
617
618 cmd_size = sizeof(struct adm_cmd_memory_map_regions)
619 + sizeof(struct adm_memory_map_regions) * bufcnt;
620
621 mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
622 if (!mmap_region_cmd) {
623 pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
624 return -ENOMEM;
625 }
626 mmap_regions = (struct adm_cmd_memory_map_regions *)mmap_region_cmd;
627 mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
628 APR_HDR_LEN(APR_HDR_SIZE),
629 APR_PKT_VER);
630 mmap_regions->hdr.pkt_size = cmd_size;
631 mmap_regions->hdr.src_port = 0;
632 mmap_regions->hdr.dest_port = 0;
633 mmap_regions->hdr.token = 0;
634 mmap_regions->hdr.opcode = ADM_CMD_MEMORY_MAP_REGIONS;
635 mmap_regions->mempool_id = mempool_id & 0x00ff;
636 mmap_regions->nregions = bufcnt & 0x00ff;
637 pr_debug("%s: map_regions->nregions = %d\n", __func__,
638 mmap_regions->nregions);
639 payload = ((u8 *) mmap_region_cmd +
640 sizeof(struct adm_cmd_memory_map_regions));
641 mregions = (struct adm_memory_map_regions *)payload;
642
643 for (i = 0; i < bufcnt; i++) {
644 mregions->phys = buf_add[i];
645 mregions->buf_size = bufsz[i];
646 ++mregions;
647 }
648
649 atomic_set(&this_adm.copp_stat[0], 0);
650 ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd);
651 if (ret < 0) {
652 pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
653 mmap_regions->hdr.opcode, ret);
654 ret = -EINVAL;
655 goto fail_cmd;
656 }
657
658 ret = wait_event_timeout(this_adm.wait,
659 atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
660 if (!ret) {
661 pr_err("%s: timeout. waited for memory_map\n", __func__);
662 ret = -EINVAL;
663 goto fail_cmd;
664 }
665fail_cmd:
666 kfree(mmap_region_cmd);
667 return ret;
668}
669
670int adm_memory_unmap_regions(uint32_t *buf_add, uint32_t *bufsz,
671 uint32_t bufcnt)
672{
673 struct adm_cmd_memory_unmap_regions *unmap_regions = NULL;
674 struct adm_memory_unmap_regions *mregions = NULL;
675 void *unmap_region_cmd = NULL;
676 void *payload = NULL;
677 int ret = 0;
678 int i = 0;
679 int cmd_size = 0;
680
681 pr_info("%s\n", __func__);
682
683 if (this_adm.apr == NULL) {
684 pr_err("%s APR handle NULL\n", __func__);
685 return -EINVAL;
686 }
687
688 cmd_size = sizeof(struct adm_cmd_memory_unmap_regions)
689 + sizeof(struct adm_memory_unmap_regions) * bufcnt;
690
691 unmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
692 if (!unmap_region_cmd) {
693 pr_err("%s: allocate unmap_region_cmd failed\n", __func__);
694 return -ENOMEM;
695 }
696 unmap_regions = (struct adm_cmd_memory_unmap_regions *)
697 unmap_region_cmd;
698 unmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
699 APR_HDR_LEN(APR_HDR_SIZE),
700 APR_PKT_VER);
701 unmap_regions->hdr.pkt_size = cmd_size;
702 unmap_regions->hdr.src_port = 0;
703 unmap_regions->hdr.dest_port = 0;
704 unmap_regions->hdr.token = 0;
705 unmap_regions->hdr.opcode = ADM_CMD_MEMORY_UNMAP_REGIONS;
706 unmap_regions->nregions = bufcnt & 0x00ff;
707 unmap_regions->reserved = 0;
708 pr_debug("%s: unmap_regions->nregions = %d\n", __func__,
709 unmap_regions->nregions);
710 payload = ((u8 *) unmap_region_cmd +
711 sizeof(struct adm_cmd_memory_unmap_regions));
712 mregions = (struct adm_memory_unmap_regions *)payload;
713
714 for (i = 0; i < bufcnt; i++) {
715 mregions->phys = buf_add[i];
716 ++mregions;
717 }
718 atomic_set(&this_adm.copp_stat[0], 0);
719 ret = apr_send_pkt(this_adm.apr, (uint32_t *) unmap_region_cmd);
720 if (ret < 0) {
721 pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
722 unmap_regions->hdr.opcode, ret);
723 ret = -EINVAL;
724 goto fail_cmd;
725 }
726
727 ret = wait_event_timeout(this_adm.wait,
728 atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
729 if (!ret) {
730 pr_err("%s: timeout. waited for memory_unmap\n", __func__);
731 ret = -EINVAL;
732 goto fail_cmd;
733 }
734fail_cmd:
735 kfree(unmap_region_cmd);
736 return ret;
737}
738
Ben Romberger974a40d2011-07-18 15:08:21 -0700739int adm_get_copp_id(int port_index)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700740{
741 pr_debug("%s\n", __func__);
742
Ben Romberger974a40d2011-07-18 15:08:21 -0700743 if (port_index < 0) {
744 pr_err("%s: invalid port_id = %d\n", __func__, port_index);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700745 return -EINVAL;
746 }
747
Ben Romberger974a40d2011-07-18 15:08:21 -0700748 return atomic_read(&this_adm.copp_id[port_index]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700749}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700750
751int adm_close(int port_id)
752{
753 struct apr_hdr close;
754
755 int ret = 0;
Laxminath Kasam32657ec2011-08-01 19:26:57 +0530756 int index = 0;
757
758 port_id = afe_convert_virtual_to_portid(port_id);
759
760 index = afe_get_port_index(port_id);
Bharath Ramachandramurthy51a86212011-07-29 12:43:43 -0700761 if (afe_validate_port(port_id) < 0)
762 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700763
764 pr_info("%s port_id=%d index %d\n", __func__, port_id, index);
765
766 if (!(atomic_read(&this_adm.copp_cnt[index]))) {
767 pr_err("%s: copp count for port[%d]is 0\n", __func__, port_id);
768
769 goto fail_cmd;
770 }
771 atomic_dec(&this_adm.copp_cnt[index]);
772 if (!(atomic_read(&this_adm.copp_cnt[index]))) {
773
774 close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
775 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
776 close.pkt_size = sizeof(close);
777 close.src_svc = APR_SVC_ADM;
778 close.src_domain = APR_DOMAIN_APPS;
779 close.src_port = port_id;
780 close.dest_svc = APR_SVC_ADM;
781 close.dest_domain = APR_DOMAIN_ADSP;
782 close.dest_port = atomic_read(&this_adm.copp_id[index]);
783 close.token = port_id;
784 close.opcode = ADM_CMD_COPP_CLOSE;
785
786 atomic_set(&this_adm.copp_id[index], RESET_COPP_ID);
787 atomic_set(&this_adm.copp_stat[index], 0);
788
789
790 pr_debug("%s:coppid %d portid=%d index=%d coppcnt=%d\n",
791 __func__,
792 atomic_read(&this_adm.copp_id[index]),
793 port_id, index,
794 atomic_read(&this_adm.copp_cnt[index]));
795
796 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close);
797 if (ret < 0) {
798 pr_err("%s ADM close failed\n", __func__);
799 ret = -EINVAL;
800 goto fail_cmd;
801 }
802
803 ret = wait_event_timeout(this_adm.wait,
804 atomic_read(&this_adm.copp_stat[index]),
805 msecs_to_jiffies(TIMEOUT_MS));
806 if (!ret) {
807 pr_err("%s: ADM cmd Route failed for port %d\n",
808 __func__, port_id);
809 ret = -EINVAL;
810 goto fail_cmd;
811 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700812 }
813
814fail_cmd:
815 return ret;
816}
817
818static int __init adm_init(void)
819{
820 int i = 0;
821 init_waitqueue_head(&this_adm.wait);
822 this_adm.apr = NULL;
823
824 for (i = 0; i < AFE_MAX_PORTS; i++) {
825 atomic_set(&this_adm.copp_id[i], RESET_COPP_ID);
826 atomic_set(&this_adm.copp_cnt[i], 0);
827 atomic_set(&this_adm.copp_stat[i], 0);
828 }
829 return 0;
830}
831
832device_initcall(adm_init);