blob: 68cb2b47f1c3c988494b39b53472c0365de58b0d [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/wait.h>
15#include <linux/sched.h>
16#include <linux/jiffies.h>
17#include <linux/uaccess.h>
18#include <linux/atomic.h>
Ben Rombergerfce8f512011-07-18 16:46:09 -070019
20#include <mach/qdsp6v2/audio_dev_ctl.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021#include <mach/qdsp6v2/audio_acdb.h>
Ben Rombergerfce8f512011-07-18 16:46:09 -070022#include <mach/qdsp6v2/rtac.h>
23
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024#include <sound/apr_audio.h>
25#include <sound/q6afe.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026
27#define TIMEOUT_MS 1000
28#define AUDIO_RX 0x0
29#define AUDIO_TX 0x1
Patrick Laicf999112011-08-23 11:27:20 -070030
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031#define ASM_MAX_SESSION 0x8 /* To do: define in a header */
32#define RESET_COPP_ID 99
33#define INVALID_COPP_ID 0xFF
34
35struct adm_ctl {
36 void *apr;
37 atomic_t copp_id[AFE_MAX_PORTS];
38 atomic_t copp_cnt[AFE_MAX_PORTS];
39 atomic_t copp_stat[AFE_MAX_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040 wait_queue_head_t wait;
41};
42
43static struct adm_ctl this_adm;
44
45static int32_t adm_callback(struct apr_client_data *data, void *priv)
46{
47 uint32_t *payload;
48 int i, index;
49 payload = data->payload;
50
51 if (data->opcode == RESET_EVENTS) {
52 pr_debug("adm_callback: Reset event is received: %d %d apr[%p]\n",
53 data->reset_event, data->reset_proc,
54 this_adm.apr);
55 if (this_adm.apr) {
56 apr_reset(this_adm.apr);
57 for (i = 0; i < AFE_MAX_PORTS; i++) {
58 atomic_set(&this_adm.copp_id[i],
59 RESET_COPP_ID);
60 atomic_set(&this_adm.copp_cnt[i], 0);
61 atomic_set(&this_adm.copp_stat[i], 0);
62 }
63 this_adm.apr = NULL;
64 }
65 return 0;
66 }
67
68 pr_debug("%s: code = 0x%x %x %x size = %d\n", __func__,
69 data->opcode, payload[0], payload[1],
70 data->payload_size);
71
72 if (data->payload_size) {
73 index = afe_get_port_index(data->token);
74 pr_debug("%s: Port ID %d, index %d\n", __func__,
75 data->token, index);
76
77 if (data->opcode == APR_BASIC_RSP_RESULT) {
78 pr_debug("APR_BASIC_RSP_RESULT\n");
79 switch (payload[0]) {
80 case ADM_CMD_SET_PARAMS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081 if (rtac_make_adm_callback(payload,
82 data->payload_size))
83 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084 case ADM_CMD_COPP_CLOSE:
85 case ADM_CMD_MEMORY_MAP:
86 case ADM_CMD_MEMORY_UNMAP:
87 case ADM_CMD_MEMORY_MAP_REGIONS:
88 case ADM_CMD_MEMORY_UNMAP_REGIONS:
89 case ADM_CMD_MATRIX_MAP_ROUTINGS:
90 pr_debug("ADM_CMD_MATRIX_MAP_ROUTINGS\n");
91 atomic_set(&this_adm.copp_stat[index], 1);
92 wake_up(&this_adm.wait);
93 break;
94 default:
95 pr_err("%s: Unknown Cmd: 0x%x\n", __func__,
96 payload[0]);
97 break;
98 }
99 return 0;
100 }
101
102 switch (data->opcode) {
103 case ADM_CMDRSP_COPP_OPEN: {
104 struct adm_copp_open_respond *open = data->payload;
105 if (open->copp_id == INVALID_COPP_ID) {
106 pr_err("%s: invalid coppid rxed %d\n",
107 __func__, open->copp_id);
108 atomic_set(&this_adm.copp_stat[index], 1);
109 wake_up(&this_adm.wait);
110 break;
111 }
112 atomic_set(&this_adm.copp_id[index], open->copp_id);
113 atomic_set(&this_adm.copp_stat[index], 1);
114 pr_debug("%s: coppid rxed=%d\n", __func__,
115 open->copp_id);
116 wake_up(&this_adm.wait);
117 }
118 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119 case ADM_CMDRSP_GET_PARAMS:
Swaminathan Sathappan88163a72011-08-01 16:01:14 -0700120 pr_debug("%s: ADM_CMDRSP_GET_PARAMS\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700121 rtac_make_adm_callback(payload,
122 data->payload_size);
123 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700124 default:
125 pr_err("%s: Unknown cmd:0x%x\n", __func__,
126 data->opcode);
127 break;
128 }
129 }
130 return 0;
131}
132
Ben Rombergerdcab5472011-12-08 19:20:12 -0800133int send_adm_cal_block(int port_id, struct acdb_cal_block *aud_cal)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134{
Ben Rombergerdcab5472011-12-08 19:20:12 -0800135 s32 result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700136 struct adm_set_params_command adm_params;
137 int index = afe_get_port_index(port_id);
138
139 pr_debug("%s: Port id %d, index %d\n", __func__, port_id, index);
140
141 if (!aud_cal || aud_cal->cal_size == 0) {
Ben Rombergerdcab5472011-12-08 19:20:12 -0800142 pr_debug("%s: No ADM cal to send for port_id = %d!\n",
143 __func__, port_id);
144 result = -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700145 goto done;
146 }
147
148 adm_params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
149 APR_HDR_LEN(20), APR_PKT_VER);
150 adm_params.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
151 sizeof(adm_params));
152 adm_params.hdr.src_svc = APR_SVC_ADM;
153 adm_params.hdr.src_domain = APR_DOMAIN_APPS;
154 adm_params.hdr.src_port = port_id;
155 adm_params.hdr.dest_svc = APR_SVC_ADM;
156 adm_params.hdr.dest_domain = APR_DOMAIN_ADSP;
157 adm_params.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
158 adm_params.hdr.token = port_id;
159 adm_params.hdr.opcode = ADM_CMD_SET_PARAMS;
160 adm_params.payload = aud_cal->cal_paddr;
161 adm_params.payload_size = aud_cal->cal_size;
162
163 atomic_set(&this_adm.copp_stat[index], 0);
164 pr_debug("%s: Sending SET_PARAMS payload = 0x%x, size = %d\n",
165 __func__, adm_params.payload, adm_params.payload_size);
166 result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params);
167 if (result < 0) {
168 pr_err("%s: Set params failed port = %d payload = 0x%x\n",
169 __func__, port_id, aud_cal->cal_paddr);
Ben Rombergerdcab5472011-12-08 19:20:12 -0800170 result = -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171 goto done;
172 }
173 /* Wait for the callback */
174 result = wait_event_timeout(this_adm.wait,
175 atomic_read(&this_adm.copp_stat[index]),
176 msecs_to_jiffies(TIMEOUT_MS));
Ben Rombergerdcab5472011-12-08 19:20:12 -0800177 if (!result) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 pr_err("%s: Set params timed out port = %d, payload = 0x%x\n",
179 __func__, port_id, aud_cal->cal_paddr);
Ben Rombergerdcab5472011-12-08 19:20:12 -0800180 result = -EINVAL;
181 goto done;
182 }
183
184 result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185done:
Ben Rombergerdcab5472011-12-08 19:20:12 -0800186 return result;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700187}
188
189void send_adm_cal(int port_id, int path)
190{
191 s32 acdb_path;
192 struct acdb_cal_block aud_cal;
193
194 pr_debug("%s\n", __func__);
195
196 /* Maps audio_dev_ctrl path definition to ACDB definition */
197 acdb_path = path - 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700198
199 pr_debug("%s: Sending audproc cal\n", __func__);
200 get_audproc_cal(acdb_path, &aud_cal);
Ben Rombergerdcab5472011-12-08 19:20:12 -0800201 if (!send_adm_cal_block(port_id, &aud_cal))
202 pr_info("%s: Audproc cal sent for port id: %d, path %d\n",
203 __func__, port_id, acdb_path);
204 else
205 pr_info("%s: Audproc cal not sent for port id: %d, path %d\n",
206 __func__, port_id, acdb_path);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207
208 pr_debug("%s: Sending audvol cal\n", __func__);
209 get_audvol_cal(acdb_path, &aud_cal);
Ben Rombergerdcab5472011-12-08 19:20:12 -0800210 if (!send_adm_cal_block(port_id, &aud_cal))
211 pr_info("%s: Audvol cal sent for port id: %d, path %d\n",
212 __func__, port_id, acdb_path);
213 else
214 pr_info("%s: Audvol cal not sent for port id: %d, path %d\n",
215 __func__, port_id, acdb_path);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216}
217
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218int adm_open(int port_id, int path, int rate, int channel_mode, int topology)
219{
220 struct adm_copp_open_command open;
221 int ret = 0;
222 int index;
223
224 pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__,
225 port_id, path, rate, channel_mode);
226
Laxminath Kasam32657ec2011-08-01 19:26:57 +0530227 port_id = afe_convert_virtual_to_portid(port_id);
228
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700229 if (afe_validate_port(port_id) < 0) {
230 pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
231 return -ENODEV;
232 }
233
234 index = afe_get_port_index(port_id);
235 pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);
236
237 if (this_adm.apr == NULL) {
238 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
239 0xFFFFFFFF, &this_adm);
240 if (this_adm.apr == NULL) {
241 pr_err("%s: Unable to register ADM\n", __func__);
242 ret = -ENODEV;
243 return ret;
244 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700245 rtac_set_adm_handle(this_adm.apr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700246 }
247
248
249 /* Create a COPP if port id are not enabled */
250 if (atomic_read(&this_adm.copp_cnt[index]) == 0) {
251
252 open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
253 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
254 open.hdr.pkt_size = sizeof(open);
255 open.hdr.src_svc = APR_SVC_ADM;
256 open.hdr.src_domain = APR_DOMAIN_APPS;
257 open.hdr.src_port = port_id;
258 open.hdr.dest_svc = APR_SVC_ADM;
259 open.hdr.dest_domain = APR_DOMAIN_ADSP;
260 open.hdr.dest_port = port_id;
261 open.hdr.token = port_id;
262 open.hdr.opcode = ADM_CMD_COPP_OPEN;
263
264 open.mode = path;
265 open.endpoint_id1 = port_id;
266 open.endpoint_id2 = 0xFFFF;
267
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700268 /* convert path to acdb path */
Ben Romberger974a40d2011-07-18 15:08:21 -0700269 if (path == ADM_PATH_PLAYBACK)
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700270 open.topology_id = get_adm_rx_topology();
Jay Wang4fa2ee42011-07-18 00:21:22 -0700271 else {
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700272 open.topology_id = get_adm_tx_topology();
Jay Wang4fa2ee42011-07-18 00:21:22 -0700273 if ((open.topology_id ==
274 VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
275 (open.topology_id ==
Jayasena Sangaraboina0fc197d2011-12-09 13:20:33 -0800276 VPM_TX_DM_FLUENCE_COPP_TOPOLOGY) ||
277 (open.topology_id ==
278 VPM_TX_QMIC_FLUENCE_COPP_TOPOLOGY))
Jay Wang4fa2ee42011-07-18 00:21:22 -0700279 rate = 16000;
280 }
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700281
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700282 if (open.topology_id == 0)
283 open.topology_id = topology;
284
285 open.channel_config = channel_mode & 0x00FF;
286 open.rate = rate;
287
288 pr_debug("%s: channel_config=%d port_id=%d rate=%d\
289 topology_id=0x%X\n", __func__, open.channel_config,\
290 open.endpoint_id1, open.rate,\
291 open.topology_id);
292
293 atomic_set(&this_adm.copp_stat[index], 0);
294
295 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
296 if (ret < 0) {
297 pr_err("%s:ADM enable for port %d failed\n",
298 __func__, port_id);
299 ret = -EINVAL;
300 goto fail_cmd;
301 }
302 /* Wait for the callback with copp id */
303 ret = wait_event_timeout(this_adm.wait,
304 atomic_read(&this_adm.copp_stat[index]),
305 msecs_to_jiffies(TIMEOUT_MS));
306 if (!ret) {
307 pr_err("%s ADM open failed for port %d\n", __func__,
308 port_id);
309 ret = -EINVAL;
310 goto fail_cmd;
311 }
312 }
313 atomic_inc(&this_adm.copp_cnt[index]);
314 return 0;
315
316fail_cmd:
317
318 return ret;
319}
320
321int adm_matrix_map(int session_id, int path, int num_copps,
322 unsigned int *port_id, int copp_id)
323{
324 struct adm_routings_command route;
325 int ret = 0, i = 0;
326 /* Assumes port_ids have already been validated during adm_open */
327 int index = afe_get_port_index(copp_id);
328
329 pr_debug("%s: session 0x%x path:%d num_copps:%d port_id[0]:%d\n",
330 __func__, session_id, path, num_copps, port_id[0]);
331
332 route.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
333 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
334 route.hdr.pkt_size = sizeof(route);
335 route.hdr.src_svc = 0;
336 route.hdr.src_domain = APR_DOMAIN_APPS;
337 route.hdr.src_port = copp_id;
338 route.hdr.dest_svc = APR_SVC_ADM;
339 route.hdr.dest_domain = APR_DOMAIN_ADSP;
340 route.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
341 route.hdr.token = copp_id;
342 route.hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS;
343 route.num_sessions = 1;
344 route.session[0].id = session_id;
345 route.session[0].num_copps = num_copps;
346
347 for (i = 0; i < num_copps; i++) {
348 int tmp;
Laxminath Kasam32657ec2011-08-01 19:26:57 +0530349 port_id[i] = afe_convert_virtual_to_portid(port_id[i]);
350
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351 tmp = afe_get_port_index(port_id[i]);
352
353 pr_debug("%s: port_id[%d]: %d, index: %d\n", __func__, i,
354 port_id[i], tmp);
355
356 route.session[0].copp_id[i] =
357 atomic_read(&this_adm.copp_id[tmp]);
358 }
359 if (num_copps % 2)
360 route.session[0].copp_id[i] = 0;
361
362 switch (path) {
363 case 0x1:
364 route.path = AUDIO_RX;
365 break;
366 case 0x2:
367 case 0x3:
368 route.path = AUDIO_TX;
369 break;
370 default:
371 pr_err("%s: Wrong path set[%d]\n", __func__, path);
372 break;
373 }
374 atomic_set(&this_adm.copp_stat[index], 0);
375
376 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&route);
377 if (ret < 0) {
378 pr_err("%s: ADM routing for port %d failed\n",
379 __func__, port_id[0]);
380 ret = -EINVAL;
381 goto fail_cmd;
382 }
383 ret = wait_event_timeout(this_adm.wait,
384 atomic_read(&this_adm.copp_stat[index]),
385 msecs_to_jiffies(TIMEOUT_MS));
386 if (!ret) {
387 pr_err("%s: ADM cmd Route failed for port %d\n",
388 __func__, port_id[0]);
389 ret = -EINVAL;
390 goto fail_cmd;
391 }
392
393 for (i = 0; i < num_copps; i++)
394 send_adm_cal(port_id[i], path);
395
Ben Romberger974a40d2011-07-18 15:08:21 -0700396 for (i = 0; i < num_copps; i++)
397 rtac_add_adm_device(port_id[i], atomic_read(&this_adm.copp_id
398 [afe_get_port_index(port_id[i])]),
399 path, session_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700400 return 0;
401
402fail_cmd:
403
404 return ret;
405}
406
407int adm_memory_map_regions(uint32_t *buf_add, uint32_t mempool_id,
408 uint32_t *bufsz, uint32_t bufcnt)
409{
410 struct adm_cmd_memory_map_regions *mmap_regions = NULL;
411 struct adm_memory_map_regions *mregions = NULL;
412 void *mmap_region_cmd = NULL;
413 void *payload = NULL;
414 int ret = 0;
415 int i = 0;
416 int cmd_size = 0;
417
418 pr_info("%s\n", __func__);
419 if (this_adm.apr == NULL) {
420 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
421 0xFFFFFFFF, &this_adm);
422 if (this_adm.apr == NULL) {
423 pr_err("%s: Unable to register ADM\n", __func__);
424 ret = -ENODEV;
425 return ret;
426 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427 rtac_set_adm_handle(this_adm.apr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700428 }
429
430 cmd_size = sizeof(struct adm_cmd_memory_map_regions)
431 + sizeof(struct adm_memory_map_regions) * bufcnt;
432
433 mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
434 if (!mmap_region_cmd) {
435 pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
436 return -ENOMEM;
437 }
438 mmap_regions = (struct adm_cmd_memory_map_regions *)mmap_region_cmd;
439 mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
440 APR_HDR_LEN(APR_HDR_SIZE),
441 APR_PKT_VER);
442 mmap_regions->hdr.pkt_size = cmd_size;
443 mmap_regions->hdr.src_port = 0;
444 mmap_regions->hdr.dest_port = 0;
445 mmap_regions->hdr.token = 0;
446 mmap_regions->hdr.opcode = ADM_CMD_MEMORY_MAP_REGIONS;
447 mmap_regions->mempool_id = mempool_id & 0x00ff;
448 mmap_regions->nregions = bufcnt & 0x00ff;
449 pr_debug("%s: map_regions->nregions = %d\n", __func__,
450 mmap_regions->nregions);
451 payload = ((u8 *) mmap_region_cmd +
452 sizeof(struct adm_cmd_memory_map_regions));
453 mregions = (struct adm_memory_map_regions *)payload;
454
455 for (i = 0; i < bufcnt; i++) {
456 mregions->phys = buf_add[i];
457 mregions->buf_size = bufsz[i];
458 ++mregions;
459 }
460
461 atomic_set(&this_adm.copp_stat[0], 0);
462 ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd);
463 if (ret < 0) {
464 pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
465 mmap_regions->hdr.opcode, ret);
466 ret = -EINVAL;
467 goto fail_cmd;
468 }
469
470 ret = wait_event_timeout(this_adm.wait,
471 atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
472 if (!ret) {
473 pr_err("%s: timeout. waited for memory_map\n", __func__);
474 ret = -EINVAL;
475 goto fail_cmd;
476 }
477fail_cmd:
478 kfree(mmap_region_cmd);
479 return ret;
480}
481
482int adm_memory_unmap_regions(uint32_t *buf_add, uint32_t *bufsz,
483 uint32_t bufcnt)
484{
485 struct adm_cmd_memory_unmap_regions *unmap_regions = NULL;
486 struct adm_memory_unmap_regions *mregions = NULL;
487 void *unmap_region_cmd = NULL;
488 void *payload = NULL;
489 int ret = 0;
490 int i = 0;
491 int cmd_size = 0;
492
493 pr_info("%s\n", __func__);
494
495 if (this_adm.apr == NULL) {
496 pr_err("%s APR handle NULL\n", __func__);
497 return -EINVAL;
498 }
499
500 cmd_size = sizeof(struct adm_cmd_memory_unmap_regions)
501 + sizeof(struct adm_memory_unmap_regions) * bufcnt;
502
503 unmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
504 if (!unmap_region_cmd) {
505 pr_err("%s: allocate unmap_region_cmd failed\n", __func__);
506 return -ENOMEM;
507 }
508 unmap_regions = (struct adm_cmd_memory_unmap_regions *)
509 unmap_region_cmd;
510 unmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
511 APR_HDR_LEN(APR_HDR_SIZE),
512 APR_PKT_VER);
513 unmap_regions->hdr.pkt_size = cmd_size;
514 unmap_regions->hdr.src_port = 0;
515 unmap_regions->hdr.dest_port = 0;
516 unmap_regions->hdr.token = 0;
517 unmap_regions->hdr.opcode = ADM_CMD_MEMORY_UNMAP_REGIONS;
518 unmap_regions->nregions = bufcnt & 0x00ff;
519 unmap_regions->reserved = 0;
520 pr_debug("%s: unmap_regions->nregions = %d\n", __func__,
521 unmap_regions->nregions);
522 payload = ((u8 *) unmap_region_cmd +
523 sizeof(struct adm_cmd_memory_unmap_regions));
524 mregions = (struct adm_memory_unmap_regions *)payload;
525
526 for (i = 0; i < bufcnt; i++) {
527 mregions->phys = buf_add[i];
528 ++mregions;
529 }
530 atomic_set(&this_adm.copp_stat[0], 0);
531 ret = apr_send_pkt(this_adm.apr, (uint32_t *) unmap_region_cmd);
532 if (ret < 0) {
533 pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
534 unmap_regions->hdr.opcode, ret);
535 ret = -EINVAL;
536 goto fail_cmd;
537 }
538
539 ret = wait_event_timeout(this_adm.wait,
540 atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
541 if (!ret) {
542 pr_err("%s: timeout. waited for memory_unmap\n", __func__);
543 ret = -EINVAL;
544 goto fail_cmd;
545 }
546fail_cmd:
547 kfree(unmap_region_cmd);
548 return ret;
549}
550
Ben Romberger974a40d2011-07-18 15:08:21 -0700551int adm_get_copp_id(int port_index)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700552{
553 pr_debug("%s\n", __func__);
554
Ben Romberger974a40d2011-07-18 15:08:21 -0700555 if (port_index < 0) {
556 pr_err("%s: invalid port_id = %d\n", __func__, port_index);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700557 return -EINVAL;
558 }
559
Ben Romberger974a40d2011-07-18 15:08:21 -0700560 return atomic_read(&this_adm.copp_id[port_index]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700561}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700562
563int adm_close(int port_id)
564{
565 struct apr_hdr close;
566
567 int ret = 0;
Laxminath Kasam32657ec2011-08-01 19:26:57 +0530568 int index = 0;
569
570 port_id = afe_convert_virtual_to_portid(port_id);
571
572 index = afe_get_port_index(port_id);
Bharath Ramachandramurthy51a86212011-07-29 12:43:43 -0700573 if (afe_validate_port(port_id) < 0)
574 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700575
576 pr_info("%s port_id=%d index %d\n", __func__, port_id, index);
577
578 if (!(atomic_read(&this_adm.copp_cnt[index]))) {
579 pr_err("%s: copp count for port[%d]is 0\n", __func__, port_id);
580
581 goto fail_cmd;
582 }
583 atomic_dec(&this_adm.copp_cnt[index]);
584 if (!(atomic_read(&this_adm.copp_cnt[index]))) {
585
586 close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
587 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
588 close.pkt_size = sizeof(close);
589 close.src_svc = APR_SVC_ADM;
590 close.src_domain = APR_DOMAIN_APPS;
591 close.src_port = port_id;
592 close.dest_svc = APR_SVC_ADM;
593 close.dest_domain = APR_DOMAIN_ADSP;
594 close.dest_port = atomic_read(&this_adm.copp_id[index]);
595 close.token = port_id;
596 close.opcode = ADM_CMD_COPP_CLOSE;
597
598 atomic_set(&this_adm.copp_id[index], RESET_COPP_ID);
599 atomic_set(&this_adm.copp_stat[index], 0);
600
601
602 pr_debug("%s:coppid %d portid=%d index=%d coppcnt=%d\n",
603 __func__,
604 atomic_read(&this_adm.copp_id[index]),
605 port_id, index,
606 atomic_read(&this_adm.copp_cnt[index]));
607
608 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close);
609 if (ret < 0) {
610 pr_err("%s ADM close failed\n", __func__);
611 ret = -EINVAL;
612 goto fail_cmd;
613 }
614
615 ret = wait_event_timeout(this_adm.wait,
616 atomic_read(&this_adm.copp_stat[index]),
617 msecs_to_jiffies(TIMEOUT_MS));
618 if (!ret) {
619 pr_err("%s: ADM cmd Route failed for port %d\n",
620 __func__, port_id);
621 ret = -EINVAL;
622 goto fail_cmd;
623 }
Ben Romberger93d4d2d2011-10-19 23:04:02 -0700624
625 rtac_remove_adm_device(port_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626 }
627
628fail_cmd:
629 return ret;
630}
631
632static int __init adm_init(void)
633{
634 int i = 0;
635 init_waitqueue_head(&this_adm.wait);
636 this_adm.apr = NULL;
637
638 for (i = 0; i < AFE_MAX_PORTS; i++) {
639 atomic_set(&this_adm.copp_id[i], RESET_COPP_ID);
640 atomic_set(&this_adm.copp_cnt[i], 0);
641 atomic_set(&this_adm.copp_stat[i], 0);
642 }
643 return 0;
644}
645
646device_initcall(adm_init);