blob: 2710fbb3e8afad1e1eda58b41a95241acc462869 [file] [log] [blame]
Ben Romberger48fabc32012-01-06 17:39:39 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/wait.h>
15#include <linux/sched.h>
16#include <linux/jiffies.h>
17#include <linux/uaccess.h>
18#include <linux/atomic.h>
Ben Rombergerfce8f512011-07-18 16:46:09 -070019
20#include <mach/qdsp6v2/audio_dev_ctl.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021#include <mach/qdsp6v2/audio_acdb.h>
Ben Rombergerfce8f512011-07-18 16:46:09 -070022#include <mach/qdsp6v2/rtac.h>
23
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024#include <sound/apr_audio.h>
25#include <sound/q6afe.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026
27#define TIMEOUT_MS 1000
28#define AUDIO_RX 0x0
29#define AUDIO_TX 0x1
Patrick Laicf999112011-08-23 11:27:20 -070030
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031#define ASM_MAX_SESSION 0x8 /* To do: define in a header */
32#define RESET_COPP_ID 99
33#define INVALID_COPP_ID 0xFF
34
35struct adm_ctl {
36 void *apr;
37 atomic_t copp_id[AFE_MAX_PORTS];
38 atomic_t copp_cnt[AFE_MAX_PORTS];
39 atomic_t copp_stat[AFE_MAX_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040 wait_queue_head_t wait;
41};
42
Ben Romberger48fabc32012-01-06 17:39:39 -080043static struct acdb_cal_block mem_addr_audproc[MAX_AUDPROC_TYPES];
44static struct acdb_cal_block mem_addr_audvol[MAX_AUDPROC_TYPES];
45
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046static struct adm_ctl this_adm;
47
48static int32_t adm_callback(struct apr_client_data *data, void *priv)
49{
50 uint32_t *payload;
51 int i, index;
52 payload = data->payload;
53
54 if (data->opcode == RESET_EVENTS) {
55 pr_debug("adm_callback: Reset event is received: %d %d apr[%p]\n",
56 data->reset_event, data->reset_proc,
57 this_adm.apr);
58 if (this_adm.apr) {
59 apr_reset(this_adm.apr);
60 for (i = 0; i < AFE_MAX_PORTS; i++) {
61 atomic_set(&this_adm.copp_id[i],
62 RESET_COPP_ID);
63 atomic_set(&this_adm.copp_cnt[i], 0);
64 atomic_set(&this_adm.copp_stat[i], 0);
65 }
66 this_adm.apr = NULL;
67 }
68 return 0;
69 }
70
71 pr_debug("%s: code = 0x%x %x %x size = %d\n", __func__,
72 data->opcode, payload[0], payload[1],
73 data->payload_size);
74
75 if (data->payload_size) {
76 index = afe_get_port_index(data->token);
77 pr_debug("%s: Port ID %d, index %d\n", __func__,
78 data->token, index);
79
80 if (data->opcode == APR_BASIC_RSP_RESULT) {
81 pr_debug("APR_BASIC_RSP_RESULT\n");
82 switch (payload[0]) {
83 case ADM_CMD_SET_PARAMS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084 if (rtac_make_adm_callback(payload,
85 data->payload_size))
86 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087 case ADM_CMD_COPP_CLOSE:
88 case ADM_CMD_MEMORY_MAP:
89 case ADM_CMD_MEMORY_UNMAP:
90 case ADM_CMD_MEMORY_MAP_REGIONS:
91 case ADM_CMD_MEMORY_UNMAP_REGIONS:
92 case ADM_CMD_MATRIX_MAP_ROUTINGS:
93 pr_debug("ADM_CMD_MATRIX_MAP_ROUTINGS\n");
94 atomic_set(&this_adm.copp_stat[index], 1);
95 wake_up(&this_adm.wait);
96 break;
97 default:
98 pr_err("%s: Unknown Cmd: 0x%x\n", __func__,
99 payload[0]);
100 break;
101 }
102 return 0;
103 }
104
105 switch (data->opcode) {
Kiran Kandi5e809b02012-01-31 00:24:33 -0800106 case ADM_CMDRSP_COPP_OPEN:
107 case ADM_CMDRSP_MULTI_CHANNEL_COPP_OPEN: {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700108 struct adm_copp_open_respond *open = data->payload;
109 if (open->copp_id == INVALID_COPP_ID) {
110 pr_err("%s: invalid coppid rxed %d\n",
111 __func__, open->copp_id);
112 atomic_set(&this_adm.copp_stat[index], 1);
113 wake_up(&this_adm.wait);
114 break;
115 }
116 atomic_set(&this_adm.copp_id[index], open->copp_id);
117 atomic_set(&this_adm.copp_stat[index], 1);
118 pr_debug("%s: coppid rxed=%d\n", __func__,
119 open->copp_id);
120 wake_up(&this_adm.wait);
121 }
122 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700123 case ADM_CMDRSP_GET_PARAMS:
Swaminathan Sathappan88163a72011-08-01 16:01:14 -0700124 pr_debug("%s: ADM_CMDRSP_GET_PARAMS\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700125 rtac_make_adm_callback(payload,
126 data->payload_size);
127 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700128 default:
129 pr_err("%s: Unknown cmd:0x%x\n", __func__,
130 data->opcode);
131 break;
132 }
133 }
134 return 0;
135}
136
Ben Romberger48fabc32012-01-06 17:39:39 -0800137static int send_adm_cal_block(int port_id, struct acdb_cal_block *aud_cal)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700138{
Ben Rombergerdcab5472011-12-08 19:20:12 -0800139 s32 result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700140 struct adm_set_params_command adm_params;
141 int index = afe_get_port_index(port_id);
142
143 pr_debug("%s: Port id %d, index %d\n", __func__, port_id, index);
144
145 if (!aud_cal || aud_cal->cal_size == 0) {
Ben Rombergerdcab5472011-12-08 19:20:12 -0800146 pr_debug("%s: No ADM cal to send for port_id = %d!\n",
147 __func__, port_id);
148 result = -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700149 goto done;
150 }
151
152 adm_params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
153 APR_HDR_LEN(20), APR_PKT_VER);
154 adm_params.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
155 sizeof(adm_params));
156 adm_params.hdr.src_svc = APR_SVC_ADM;
157 adm_params.hdr.src_domain = APR_DOMAIN_APPS;
158 adm_params.hdr.src_port = port_id;
159 adm_params.hdr.dest_svc = APR_SVC_ADM;
160 adm_params.hdr.dest_domain = APR_DOMAIN_ADSP;
161 adm_params.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
162 adm_params.hdr.token = port_id;
163 adm_params.hdr.opcode = ADM_CMD_SET_PARAMS;
164 adm_params.payload = aud_cal->cal_paddr;
165 adm_params.payload_size = aud_cal->cal_size;
166
167 atomic_set(&this_adm.copp_stat[index], 0);
168 pr_debug("%s: Sending SET_PARAMS payload = 0x%x, size = %d\n",
169 __func__, adm_params.payload, adm_params.payload_size);
170 result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params);
171 if (result < 0) {
172 pr_err("%s: Set params failed port = %d payload = 0x%x\n",
173 __func__, port_id, aud_cal->cal_paddr);
Ben Rombergerdcab5472011-12-08 19:20:12 -0800174 result = -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175 goto done;
176 }
177 /* Wait for the callback */
178 result = wait_event_timeout(this_adm.wait,
179 atomic_read(&this_adm.copp_stat[index]),
180 msecs_to_jiffies(TIMEOUT_MS));
Ben Rombergerdcab5472011-12-08 19:20:12 -0800181 if (!result) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182 pr_err("%s: Set params timed out port = %d, payload = 0x%x\n",
183 __func__, port_id, aud_cal->cal_paddr);
Ben Rombergerdcab5472011-12-08 19:20:12 -0800184 result = -EINVAL;
185 goto done;
186 }
187
188 result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189done:
Ben Rombergerdcab5472011-12-08 19:20:12 -0800190 return result;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191}
192
Ben Romberger48fabc32012-01-06 17:39:39 -0800193static void send_adm_cal(int port_id, int path)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194{
Ben Romberger48fabc32012-01-06 17:39:39 -0800195 int result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196 s32 acdb_path;
197 struct acdb_cal_block aud_cal;
198
199 pr_debug("%s\n", __func__);
200
201 /* Maps audio_dev_ctrl path definition to ACDB definition */
202 acdb_path = path - 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700203
204 pr_debug("%s: Sending audproc cal\n", __func__);
205 get_audproc_cal(acdb_path, &aud_cal);
Ben Romberger48fabc32012-01-06 17:39:39 -0800206
207 /* map & cache buffers used */
208 if ((mem_addr_audproc[acdb_path].cal_paddr != aud_cal.cal_paddr) &&
209 (aud_cal.cal_size > 0)) {
210 if (mem_addr_audproc[acdb_path].cal_paddr != 0)
211 adm_memory_unmap_regions(
212 &mem_addr_audproc[acdb_path].cal_paddr,
213 &mem_addr_audproc[acdb_path].cal_size, 1);
214
215 result = adm_memory_map_regions(&aud_cal.cal_paddr, 0,
216 &aud_cal.cal_size, 1);
217 if (result < 0)
218 pr_err("ADM audproc mmap did not work! path = %d, "
219 "addr = 0x%x, size = %d\n", acdb_path,
220 aud_cal.cal_paddr, aud_cal.cal_size);
221 else
222 mem_addr_audproc[acdb_path] = aud_cal;
223 }
224
Ben Rombergerdcab5472011-12-08 19:20:12 -0800225 if (!send_adm_cal_block(port_id, &aud_cal))
Ben Romberger48fabc32012-01-06 17:39:39 -0800226 pr_debug("%s: Audproc cal sent for port id: %d, path %d\n",
Ben Rombergerdcab5472011-12-08 19:20:12 -0800227 __func__, port_id, acdb_path);
228 else
Ben Romberger48fabc32012-01-06 17:39:39 -0800229 pr_debug("%s: Audproc cal not sent for port id: %d, path %d\n",
Ben Rombergerdcab5472011-12-08 19:20:12 -0800230 __func__, port_id, acdb_path);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700231
232 pr_debug("%s: Sending audvol cal\n", __func__);
233 get_audvol_cal(acdb_path, &aud_cal);
Ben Romberger48fabc32012-01-06 17:39:39 -0800234
235 /* map & cache buffers used */
236 if ((mem_addr_audvol[acdb_path].cal_paddr != aud_cal.cal_paddr) &&
237 (aud_cal.cal_size > 0)) {
238 if (mem_addr_audvol[acdb_path].cal_paddr != 0)
239 adm_memory_unmap_regions(
240 &mem_addr_audvol[acdb_path].cal_paddr,
241 &mem_addr_audvol[acdb_path].cal_size, 1);
242
243 result = adm_memory_map_regions(&aud_cal.cal_paddr, 0,
244 &aud_cal.cal_size, 1);
245 if (result < 0)
246 pr_err("ADM audvol mmap did not work! path = %d, "
247 "addr = 0x%x, size = %d\n", acdb_path,
248 aud_cal.cal_paddr, aud_cal.cal_size);
249 else
250 mem_addr_audvol[acdb_path] = aud_cal;
251 }
252
Ben Rombergerdcab5472011-12-08 19:20:12 -0800253 if (!send_adm_cal_block(port_id, &aud_cal))
Ben Romberger48fabc32012-01-06 17:39:39 -0800254 pr_debug("%s: Audvol cal sent for port id: %d, path %d\n",
Ben Rombergerdcab5472011-12-08 19:20:12 -0800255 __func__, port_id, acdb_path);
256 else
Ben Romberger48fabc32012-01-06 17:39:39 -0800257 pr_debug("%s: Audvol cal not sent for port id: %d, path %d\n",
Ben Rombergerdcab5472011-12-08 19:20:12 -0800258 __func__, port_id, acdb_path);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259}
260
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700261int adm_open(int port_id, int path, int rate, int channel_mode, int topology)
262{
263 struct adm_copp_open_command open;
264 int ret = 0;
265 int index;
266
267 pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__,
268 port_id, path, rate, channel_mode);
269
Laxminath Kasam32657ec2011-08-01 19:26:57 +0530270 port_id = afe_convert_virtual_to_portid(port_id);
271
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272 if (afe_validate_port(port_id) < 0) {
273 pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
274 return -ENODEV;
275 }
276
277 index = afe_get_port_index(port_id);
278 pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);
279
280 if (this_adm.apr == NULL) {
281 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
282 0xFFFFFFFF, &this_adm);
283 if (this_adm.apr == NULL) {
284 pr_err("%s: Unable to register ADM\n", __func__);
285 ret = -ENODEV;
286 return ret;
287 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288 rtac_set_adm_handle(this_adm.apr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700289 }
290
291
292 /* Create a COPP if port id are not enabled */
293 if (atomic_read(&this_adm.copp_cnt[index]) == 0) {
294
295 open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
296 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
297 open.hdr.pkt_size = sizeof(open);
298 open.hdr.src_svc = APR_SVC_ADM;
299 open.hdr.src_domain = APR_DOMAIN_APPS;
300 open.hdr.src_port = port_id;
301 open.hdr.dest_svc = APR_SVC_ADM;
302 open.hdr.dest_domain = APR_DOMAIN_ADSP;
303 open.hdr.dest_port = port_id;
304 open.hdr.token = port_id;
305 open.hdr.opcode = ADM_CMD_COPP_OPEN;
306
307 open.mode = path;
308 open.endpoint_id1 = port_id;
309 open.endpoint_id2 = 0xFFFF;
310
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700311 /* convert path to acdb path */
Ben Romberger974a40d2011-07-18 15:08:21 -0700312 if (path == ADM_PATH_PLAYBACK)
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700313 open.topology_id = get_adm_rx_topology();
Jay Wang4fa2ee42011-07-18 00:21:22 -0700314 else {
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700315 open.topology_id = get_adm_tx_topology();
Jay Wang4fa2ee42011-07-18 00:21:22 -0700316 if ((open.topology_id ==
317 VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
318 (open.topology_id ==
Jayasena Sangaraboina0fc197d2011-12-09 13:20:33 -0800319 VPM_TX_DM_FLUENCE_COPP_TOPOLOGY) ||
320 (open.topology_id ==
321 VPM_TX_QMIC_FLUENCE_COPP_TOPOLOGY))
Jay Wang4fa2ee42011-07-18 00:21:22 -0700322 rate = 16000;
323 }
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700324
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700325 if (open.topology_id == 0)
326 open.topology_id = topology;
327
328 open.channel_config = channel_mode & 0x00FF;
329 open.rate = rate;
330
331 pr_debug("%s: channel_config=%d port_id=%d rate=%d\
332 topology_id=0x%X\n", __func__, open.channel_config,\
333 open.endpoint_id1, open.rate,\
334 open.topology_id);
335
336 atomic_set(&this_adm.copp_stat[index], 0);
337
338 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
339 if (ret < 0) {
340 pr_err("%s:ADM enable for port %d failed\n",
341 __func__, port_id);
342 ret = -EINVAL;
343 goto fail_cmd;
344 }
345 /* Wait for the callback with copp id */
346 ret = wait_event_timeout(this_adm.wait,
347 atomic_read(&this_adm.copp_stat[index]),
348 msecs_to_jiffies(TIMEOUT_MS));
349 if (!ret) {
350 pr_err("%s ADM open failed for port %d\n", __func__,
351 port_id);
352 ret = -EINVAL;
353 goto fail_cmd;
354 }
355 }
356 atomic_inc(&this_adm.copp_cnt[index]);
357 return 0;
358
359fail_cmd:
360
361 return ret;
362}
363
Kiran Kandi5e809b02012-01-31 00:24:33 -0800364
365int adm_multi_ch_copp_open(int port_id, int path, int rate, int channel_mode,
366 int topology)
367{
368 struct adm_multi_ch_copp_open_command open;
369 int ret = 0;
370 int index;
371
372 pr_debug("%s: port %d path:%d rate:%d channel :%d\n", __func__,
373 port_id, path, rate, channel_mode);
374
375 port_id = afe_convert_virtual_to_portid(port_id);
376
377 if (afe_validate_port(port_id) < 0) {
378 pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
379 return -ENODEV;
380 }
381
382 index = afe_get_port_index(port_id);
383 pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);
384
385 if (this_adm.apr == NULL) {
386 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
387 0xFFFFFFFF, &this_adm);
388 if (this_adm.apr == NULL) {
389 pr_err("%s: Unable to register ADM\n", __func__);
390 ret = -ENODEV;
391 return ret;
392 }
393 rtac_set_adm_handle(this_adm.apr);
394 }
395
396 /* Create a COPP if port id are not enabled */
397 if (atomic_read(&this_adm.copp_cnt[index]) == 0) {
398
399 open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
400 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
401
402 open.hdr.pkt_size =
403 sizeof(struct adm_multi_ch_copp_open_command);
404 open.hdr.opcode = ADM_CMD_MULTI_CHANNEL_COPP_OPEN;
405 memset(open.dev_channel_mapping, 0, 8);
406
407 if (channel_mode == 1) {
408 open.dev_channel_mapping[0] = PCM_CHANNEL_FC;
409 } else if (channel_mode == 2) {
410 open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
411 open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
412 } else if (channel_mode == 6) {
413 open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
414 open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
415 open.dev_channel_mapping[2] = PCM_CHANNEL_LFE;
416 open.dev_channel_mapping[3] = PCM_CHANNEL_FC;
417 open.dev_channel_mapping[4] = PCM_CHANNEL_LB;
418 open.dev_channel_mapping[5] = PCM_CHANNEL_RB;
419 } else {
420 pr_err("%s invalid num_chan %d\n", __func__,
421 channel_mode);
422 return -EINVAL;
423 }
424
425
426 open.hdr.src_svc = APR_SVC_ADM;
427 open.hdr.src_domain = APR_DOMAIN_APPS;
428 open.hdr.src_port = port_id;
429 open.hdr.dest_svc = APR_SVC_ADM;
430 open.hdr.dest_domain = APR_DOMAIN_ADSP;
431 open.hdr.dest_port = port_id;
432 open.hdr.token = port_id;
433
434 open.mode = path;
435 open.endpoint_id1 = port_id;
436 open.endpoint_id2 = 0xFFFF;
437
438 /* convert path to acdb path */
439 if (path == ADM_PATH_PLAYBACK)
440 open.topology_id = get_adm_rx_topology();
441 else {
442 open.topology_id = get_adm_tx_topology();
443 if ((open.topology_id ==
444 VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
445 (open.topology_id ==
446 VPM_TX_DM_FLUENCE_COPP_TOPOLOGY) ||
447 (open.topology_id ==
448 VPM_TX_QMIC_FLUENCE_COPP_TOPOLOGY))
449 rate = 16000;
450 }
451
452 if (open.topology_id == 0)
453 open.topology_id = topology;
454
455 open.channel_config = channel_mode & 0x00FF;
456 open.rate = rate;
457
458 pr_debug("%s: channel_config=%d port_id=%d rate=%d"
459 " topology_id=0x%X\n", __func__, open.channel_config,
460 open.endpoint_id1, open.rate,
461 open.topology_id);
462
463 atomic_set(&this_adm.copp_stat[index], 0);
464
465 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
466 if (ret < 0) {
467 pr_err("%s:ADM enable for port %d failed\n",
468 __func__, port_id);
469 ret = -EINVAL;
470 goto fail_cmd;
471 }
472 /* Wait for the callback with copp id */
473 ret = wait_event_timeout(this_adm.wait,
474 atomic_read(&this_adm.copp_stat[index]),
475 msecs_to_jiffies(TIMEOUT_MS));
476 if (!ret) {
477 pr_err("%s ADM open failed for port %d\n", __func__,
478 port_id);
479 ret = -EINVAL;
480 goto fail_cmd;
481 }
482 }
483 atomic_inc(&this_adm.copp_cnt[index]);
484 return 0;
485
486fail_cmd:
487
488 return ret;
489}
490
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491int adm_matrix_map(int session_id, int path, int num_copps,
492 unsigned int *port_id, int copp_id)
493{
494 struct adm_routings_command route;
495 int ret = 0, i = 0;
496 /* Assumes port_ids have already been validated during adm_open */
497 int index = afe_get_port_index(copp_id);
498
499 pr_debug("%s: session 0x%x path:%d num_copps:%d port_id[0]:%d\n",
500 __func__, session_id, path, num_copps, port_id[0]);
501
502 route.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
503 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
504 route.hdr.pkt_size = sizeof(route);
505 route.hdr.src_svc = 0;
506 route.hdr.src_domain = APR_DOMAIN_APPS;
507 route.hdr.src_port = copp_id;
508 route.hdr.dest_svc = APR_SVC_ADM;
509 route.hdr.dest_domain = APR_DOMAIN_ADSP;
510 route.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
511 route.hdr.token = copp_id;
512 route.hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS;
513 route.num_sessions = 1;
514 route.session[0].id = session_id;
515 route.session[0].num_copps = num_copps;
516
517 for (i = 0; i < num_copps; i++) {
518 int tmp;
Laxminath Kasam32657ec2011-08-01 19:26:57 +0530519 port_id[i] = afe_convert_virtual_to_portid(port_id[i]);
520
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700521 tmp = afe_get_port_index(port_id[i]);
522
523 pr_debug("%s: port_id[%d]: %d, index: %d\n", __func__, i,
524 port_id[i], tmp);
525
526 route.session[0].copp_id[i] =
527 atomic_read(&this_adm.copp_id[tmp]);
528 }
529 if (num_copps % 2)
530 route.session[0].copp_id[i] = 0;
531
532 switch (path) {
533 case 0x1:
534 route.path = AUDIO_RX;
535 break;
536 case 0x2:
537 case 0x3:
538 route.path = AUDIO_TX;
539 break;
540 default:
541 pr_err("%s: Wrong path set[%d]\n", __func__, path);
542 break;
543 }
544 atomic_set(&this_adm.copp_stat[index], 0);
545
546 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&route);
547 if (ret < 0) {
548 pr_err("%s: ADM routing for port %d failed\n",
549 __func__, port_id[0]);
550 ret = -EINVAL;
551 goto fail_cmd;
552 }
553 ret = wait_event_timeout(this_adm.wait,
554 atomic_read(&this_adm.copp_stat[index]),
555 msecs_to_jiffies(TIMEOUT_MS));
556 if (!ret) {
557 pr_err("%s: ADM cmd Route failed for port %d\n",
558 __func__, port_id[0]);
559 ret = -EINVAL;
560 goto fail_cmd;
561 }
562
563 for (i = 0; i < num_copps; i++)
564 send_adm_cal(port_id[i], path);
565
Ben Romberger974a40d2011-07-18 15:08:21 -0700566 for (i = 0; i < num_copps; i++)
567 rtac_add_adm_device(port_id[i], atomic_read(&this_adm.copp_id
568 [afe_get_port_index(port_id[i])]),
569 path, session_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700570 return 0;
571
572fail_cmd:
573
574 return ret;
575}
576
577int adm_memory_map_regions(uint32_t *buf_add, uint32_t mempool_id,
578 uint32_t *bufsz, uint32_t bufcnt)
579{
580 struct adm_cmd_memory_map_regions *mmap_regions = NULL;
581 struct adm_memory_map_regions *mregions = NULL;
582 void *mmap_region_cmd = NULL;
583 void *payload = NULL;
584 int ret = 0;
585 int i = 0;
586 int cmd_size = 0;
587
Ben Rombergerb7603232011-11-23 17:16:27 -0800588 pr_debug("%s\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589 if (this_adm.apr == NULL) {
590 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
591 0xFFFFFFFF, &this_adm);
592 if (this_adm.apr == NULL) {
593 pr_err("%s: Unable to register ADM\n", __func__);
594 ret = -ENODEV;
595 return ret;
596 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700597 rtac_set_adm_handle(this_adm.apr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700598 }
599
600 cmd_size = sizeof(struct adm_cmd_memory_map_regions)
601 + sizeof(struct adm_memory_map_regions) * bufcnt;
602
603 mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
604 if (!mmap_region_cmd) {
605 pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
606 return -ENOMEM;
607 }
608 mmap_regions = (struct adm_cmd_memory_map_regions *)mmap_region_cmd;
609 mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
610 APR_HDR_LEN(APR_HDR_SIZE),
611 APR_PKT_VER);
612 mmap_regions->hdr.pkt_size = cmd_size;
613 mmap_regions->hdr.src_port = 0;
614 mmap_regions->hdr.dest_port = 0;
615 mmap_regions->hdr.token = 0;
616 mmap_regions->hdr.opcode = ADM_CMD_MEMORY_MAP_REGIONS;
617 mmap_regions->mempool_id = mempool_id & 0x00ff;
618 mmap_regions->nregions = bufcnt & 0x00ff;
619 pr_debug("%s: map_regions->nregions = %d\n", __func__,
620 mmap_regions->nregions);
621 payload = ((u8 *) mmap_region_cmd +
622 sizeof(struct adm_cmd_memory_map_regions));
623 mregions = (struct adm_memory_map_regions *)payload;
624
625 for (i = 0; i < bufcnt; i++) {
626 mregions->phys = buf_add[i];
627 mregions->buf_size = bufsz[i];
628 ++mregions;
629 }
630
631 atomic_set(&this_adm.copp_stat[0], 0);
632 ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd);
633 if (ret < 0) {
634 pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
635 mmap_regions->hdr.opcode, ret);
636 ret = -EINVAL;
637 goto fail_cmd;
638 }
639
640 ret = wait_event_timeout(this_adm.wait,
641 atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
642 if (!ret) {
643 pr_err("%s: timeout. waited for memory_map\n", __func__);
644 ret = -EINVAL;
645 goto fail_cmd;
646 }
647fail_cmd:
648 kfree(mmap_region_cmd);
649 return ret;
650}
651
652int adm_memory_unmap_regions(uint32_t *buf_add, uint32_t *bufsz,
653 uint32_t bufcnt)
654{
655 struct adm_cmd_memory_unmap_regions *unmap_regions = NULL;
656 struct adm_memory_unmap_regions *mregions = NULL;
657 void *unmap_region_cmd = NULL;
658 void *payload = NULL;
659 int ret = 0;
660 int i = 0;
661 int cmd_size = 0;
662
Ben Rombergerb7603232011-11-23 17:16:27 -0800663 pr_debug("%s\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700664
665 if (this_adm.apr == NULL) {
666 pr_err("%s APR handle NULL\n", __func__);
667 return -EINVAL;
668 }
669
670 cmd_size = sizeof(struct adm_cmd_memory_unmap_regions)
671 + sizeof(struct adm_memory_unmap_regions) * bufcnt;
672
673 unmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
674 if (!unmap_region_cmd) {
675 pr_err("%s: allocate unmap_region_cmd failed\n", __func__);
676 return -ENOMEM;
677 }
678 unmap_regions = (struct adm_cmd_memory_unmap_regions *)
679 unmap_region_cmd;
680 unmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
681 APR_HDR_LEN(APR_HDR_SIZE),
682 APR_PKT_VER);
683 unmap_regions->hdr.pkt_size = cmd_size;
684 unmap_regions->hdr.src_port = 0;
685 unmap_regions->hdr.dest_port = 0;
686 unmap_regions->hdr.token = 0;
687 unmap_regions->hdr.opcode = ADM_CMD_MEMORY_UNMAP_REGIONS;
688 unmap_regions->nregions = bufcnt & 0x00ff;
689 unmap_regions->reserved = 0;
690 pr_debug("%s: unmap_regions->nregions = %d\n", __func__,
691 unmap_regions->nregions);
692 payload = ((u8 *) unmap_region_cmd +
693 sizeof(struct adm_cmd_memory_unmap_regions));
694 mregions = (struct adm_memory_unmap_regions *)payload;
695
696 for (i = 0; i < bufcnt; i++) {
697 mregions->phys = buf_add[i];
698 ++mregions;
699 }
700 atomic_set(&this_adm.copp_stat[0], 0);
701 ret = apr_send_pkt(this_adm.apr, (uint32_t *) unmap_region_cmd);
702 if (ret < 0) {
703 pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
704 unmap_regions->hdr.opcode, ret);
705 ret = -EINVAL;
706 goto fail_cmd;
707 }
708
709 ret = wait_event_timeout(this_adm.wait,
710 atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
711 if (!ret) {
712 pr_err("%s: timeout. waited for memory_unmap\n", __func__);
713 ret = -EINVAL;
714 goto fail_cmd;
715 }
716fail_cmd:
717 kfree(unmap_region_cmd);
718 return ret;
719}
720
Ben Romberger974a40d2011-07-18 15:08:21 -0700721int adm_get_copp_id(int port_index)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700722{
723 pr_debug("%s\n", __func__);
724
Ben Romberger974a40d2011-07-18 15:08:21 -0700725 if (port_index < 0) {
726 pr_err("%s: invalid port_id = %d\n", __func__, port_index);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700727 return -EINVAL;
728 }
729
Ben Romberger974a40d2011-07-18 15:08:21 -0700730 return atomic_read(&this_adm.copp_id[port_index]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700731}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700732
733int adm_close(int port_id)
734{
735 struct apr_hdr close;
736
737 int ret = 0;
Laxminath Kasam32657ec2011-08-01 19:26:57 +0530738 int index = 0;
739
740 port_id = afe_convert_virtual_to_portid(port_id);
741
742 index = afe_get_port_index(port_id);
Bharath Ramachandramurthy51a86212011-07-29 12:43:43 -0700743 if (afe_validate_port(port_id) < 0)
744 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700745
Jeff Ohlstein293b91f2011-12-16 13:22:46 -0800746 pr_debug("%s port_id=%d index %d\n", __func__, port_id, index);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700747
748 if (!(atomic_read(&this_adm.copp_cnt[index]))) {
749 pr_err("%s: copp count for port[%d]is 0\n", __func__, port_id);
750
751 goto fail_cmd;
752 }
753 atomic_dec(&this_adm.copp_cnt[index]);
754 if (!(atomic_read(&this_adm.copp_cnt[index]))) {
755
756 close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
757 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
758 close.pkt_size = sizeof(close);
759 close.src_svc = APR_SVC_ADM;
760 close.src_domain = APR_DOMAIN_APPS;
761 close.src_port = port_id;
762 close.dest_svc = APR_SVC_ADM;
763 close.dest_domain = APR_DOMAIN_ADSP;
764 close.dest_port = atomic_read(&this_adm.copp_id[index]);
765 close.token = port_id;
766 close.opcode = ADM_CMD_COPP_CLOSE;
767
768 atomic_set(&this_adm.copp_id[index], RESET_COPP_ID);
769 atomic_set(&this_adm.copp_stat[index], 0);
770
771
772 pr_debug("%s:coppid %d portid=%d index=%d coppcnt=%d\n",
773 __func__,
774 atomic_read(&this_adm.copp_id[index]),
775 port_id, index,
776 atomic_read(&this_adm.copp_cnt[index]));
777
778 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close);
779 if (ret < 0) {
780 pr_err("%s ADM close failed\n", __func__);
781 ret = -EINVAL;
782 goto fail_cmd;
783 }
784
785 ret = wait_event_timeout(this_adm.wait,
786 atomic_read(&this_adm.copp_stat[index]),
787 msecs_to_jiffies(TIMEOUT_MS));
788 if (!ret) {
789 pr_err("%s: ADM cmd Route failed for port %d\n",
790 __func__, port_id);
791 ret = -EINVAL;
792 goto fail_cmd;
793 }
Ben Romberger93d4d2d2011-10-19 23:04:02 -0700794
795 rtac_remove_adm_device(port_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700796 }
797
798fail_cmd:
799 return ret;
800}
801
802static int __init adm_init(void)
803{
804 int i = 0;
805 init_waitqueue_head(&this_adm.wait);
806 this_adm.apr = NULL;
807
808 for (i = 0; i < AFE_MAX_PORTS; i++) {
809 atomic_set(&this_adm.copp_id[i], RESET_COPP_ID);
810 atomic_set(&this_adm.copp_cnt[i], 0);
811 atomic_set(&this_adm.copp_stat[i], 0);
812 }
813 return 0;
814}
815
816device_initcall(adm_init);