blob: 2ada94f0eb2574d23914154c78e005b2d3bcc47a [file] [log] [blame]
Ben Romberger48fabc32012-01-06 17:39:39 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/wait.h>
15#include <linux/sched.h>
16#include <linux/jiffies.h>
17#include <linux/uaccess.h>
18#include <linux/atomic.h>
Ben Rombergerfce8f512011-07-18 16:46:09 -070019
20#include <mach/qdsp6v2/audio_dev_ctl.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021#include <mach/qdsp6v2/audio_acdb.h>
Ben Rombergerfce8f512011-07-18 16:46:09 -070022#include <mach/qdsp6v2/rtac.h>
23
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024#include <sound/apr_audio.h>
25#include <sound/q6afe.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026
27#define TIMEOUT_MS 1000
28#define AUDIO_RX 0x0
29#define AUDIO_TX 0x1
Patrick Laicf999112011-08-23 11:27:20 -070030
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031#define ASM_MAX_SESSION 0x8 /* To do: define in a header */
32#define RESET_COPP_ID 99
33#define INVALID_COPP_ID 0xFF
34
35struct adm_ctl {
36 void *apr;
37 atomic_t copp_id[AFE_MAX_PORTS];
38 atomic_t copp_cnt[AFE_MAX_PORTS];
39 atomic_t copp_stat[AFE_MAX_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040 wait_queue_head_t wait;
41};
42
Ben Romberger48fabc32012-01-06 17:39:39 -080043static struct acdb_cal_block mem_addr_audproc[MAX_AUDPROC_TYPES];
44static struct acdb_cal_block mem_addr_audvol[MAX_AUDPROC_TYPES];
45
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046static struct adm_ctl this_adm;
47
48static int32_t adm_callback(struct apr_client_data *data, void *priv)
49{
50 uint32_t *payload;
51 int i, index;
52 payload = data->payload;
53
54 if (data->opcode == RESET_EVENTS) {
55 pr_debug("adm_callback: Reset event is received: %d %d apr[%p]\n",
56 data->reset_event, data->reset_proc,
57 this_adm.apr);
58 if (this_adm.apr) {
59 apr_reset(this_adm.apr);
60 for (i = 0; i < AFE_MAX_PORTS; i++) {
61 atomic_set(&this_adm.copp_id[i],
62 RESET_COPP_ID);
63 atomic_set(&this_adm.copp_cnt[i], 0);
64 atomic_set(&this_adm.copp_stat[i], 0);
65 }
66 this_adm.apr = NULL;
67 }
68 return 0;
69 }
70
71 pr_debug("%s: code = 0x%x %x %x size = %d\n", __func__,
72 data->opcode, payload[0], payload[1],
73 data->payload_size);
74
75 if (data->payload_size) {
76 index = afe_get_port_index(data->token);
77 pr_debug("%s: Port ID %d, index %d\n", __func__,
78 data->token, index);
Bharath Ramachandramurthy94ad7e22012-02-28 18:44:07 -080079 if (index < 0 || index >= AFE_MAX_PORTS) {
80 pr_err("%s: invalid port idx %d token %d\n",
81 __func__, index, data->token);
82 return 0;
83 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084 if (data->opcode == APR_BASIC_RSP_RESULT) {
Santosh Mardi23321202012-03-22 04:33:25 +053085 pr_debug("APR_BASIC_RSP_RESULT id %x\n", payload[0]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086 switch (payload[0]) {
87 case ADM_CMD_SET_PARAMS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088 if (rtac_make_adm_callback(payload,
89 data->payload_size))
90 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070091 case ADM_CMD_COPP_CLOSE:
92 case ADM_CMD_MEMORY_MAP:
93 case ADM_CMD_MEMORY_UNMAP:
94 case ADM_CMD_MEMORY_MAP_REGIONS:
95 case ADM_CMD_MEMORY_UNMAP_REGIONS:
96 case ADM_CMD_MATRIX_MAP_ROUTINGS:
Santosh Mardi23321202012-03-22 04:33:25 +053097 case ADM_CMD_CONNECT_AFE_PORT:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098 atomic_set(&this_adm.copp_stat[index], 1);
99 wake_up(&this_adm.wait);
100 break;
101 default:
102 pr_err("%s: Unknown Cmd: 0x%x\n", __func__,
103 payload[0]);
104 break;
105 }
106 return 0;
107 }
108
109 switch (data->opcode) {
Kiran Kandi5e809b02012-01-31 00:24:33 -0800110 case ADM_CMDRSP_COPP_OPEN:
111 case ADM_CMDRSP_MULTI_CHANNEL_COPP_OPEN: {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700112 struct adm_copp_open_respond *open = data->payload;
113 if (open->copp_id == INVALID_COPP_ID) {
114 pr_err("%s: invalid coppid rxed %d\n",
115 __func__, open->copp_id);
116 atomic_set(&this_adm.copp_stat[index], 1);
117 wake_up(&this_adm.wait);
118 break;
119 }
120 atomic_set(&this_adm.copp_id[index], open->copp_id);
121 atomic_set(&this_adm.copp_stat[index], 1);
122 pr_debug("%s: coppid rxed=%d\n", __func__,
123 open->copp_id);
124 wake_up(&this_adm.wait);
125 }
126 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127 case ADM_CMDRSP_GET_PARAMS:
Swaminathan Sathappan88163a72011-08-01 16:01:14 -0700128 pr_debug("%s: ADM_CMDRSP_GET_PARAMS\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700129 rtac_make_adm_callback(payload,
130 data->payload_size);
131 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700132 default:
133 pr_err("%s: Unknown cmd:0x%x\n", __func__,
134 data->opcode);
135 break;
136 }
137 }
138 return 0;
139}
140
Ben Romberger48fabc32012-01-06 17:39:39 -0800141static int send_adm_cal_block(int port_id, struct acdb_cal_block *aud_cal)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142{
Ben Rombergerdcab5472011-12-08 19:20:12 -0800143 s32 result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144 struct adm_set_params_command adm_params;
145 int index = afe_get_port_index(port_id);
Ben Romberger09dc65f2012-03-22 20:06:55 -0700146 if (index < 0 || index >= AFE_MAX_PORTS) {
147 pr_err("%s: invalid port idx %d portid %d\n",
148 __func__, index, port_id);
Mingming Yinc09967e2012-04-27 15:09:43 -0700149 return 0;
Ben Romberger09dc65f2012-03-22 20:06:55 -0700150 }
Mingming Yinc09967e2012-04-27 15:09:43 -0700151
152 pr_debug("%s: Port id %d, index %d\n", __func__, port_id, index);
153
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700154 if (!aud_cal || aud_cal->cal_size == 0) {
Ben Rombergerdcab5472011-12-08 19:20:12 -0800155 pr_debug("%s: No ADM cal to send for port_id = %d!\n",
156 __func__, port_id);
157 result = -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700158 goto done;
159 }
160
161 adm_params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
162 APR_HDR_LEN(20), APR_PKT_VER);
163 adm_params.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
164 sizeof(adm_params));
165 adm_params.hdr.src_svc = APR_SVC_ADM;
166 adm_params.hdr.src_domain = APR_DOMAIN_APPS;
167 adm_params.hdr.src_port = port_id;
168 adm_params.hdr.dest_svc = APR_SVC_ADM;
169 adm_params.hdr.dest_domain = APR_DOMAIN_ADSP;
170 adm_params.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
171 adm_params.hdr.token = port_id;
172 adm_params.hdr.opcode = ADM_CMD_SET_PARAMS;
173 adm_params.payload = aud_cal->cal_paddr;
174 adm_params.payload_size = aud_cal->cal_size;
175
176 atomic_set(&this_adm.copp_stat[index], 0);
177 pr_debug("%s: Sending SET_PARAMS payload = 0x%x, size = %d\n",
178 __func__, adm_params.payload, adm_params.payload_size);
179 result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params);
180 if (result < 0) {
181 pr_err("%s: Set params failed port = %d payload = 0x%x\n",
182 __func__, port_id, aud_cal->cal_paddr);
Ben Rombergerdcab5472011-12-08 19:20:12 -0800183 result = -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184 goto done;
185 }
186 /* Wait for the callback */
187 result = wait_event_timeout(this_adm.wait,
188 atomic_read(&this_adm.copp_stat[index]),
189 msecs_to_jiffies(TIMEOUT_MS));
Ben Rombergerdcab5472011-12-08 19:20:12 -0800190 if (!result) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 pr_err("%s: Set params timed out port = %d, payload = 0x%x\n",
192 __func__, port_id, aud_cal->cal_paddr);
Ben Rombergerdcab5472011-12-08 19:20:12 -0800193 result = -EINVAL;
194 goto done;
195 }
196
197 result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700198done:
Ben Rombergerdcab5472011-12-08 19:20:12 -0800199 return result;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700200}
201
Ben Romberger48fabc32012-01-06 17:39:39 -0800202static void send_adm_cal(int port_id, int path)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700203{
Ben Romberger48fabc32012-01-06 17:39:39 -0800204 int result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700205 s32 acdb_path;
206 struct acdb_cal_block aud_cal;
207
208 pr_debug("%s\n", __func__);
209
210 /* Maps audio_dev_ctrl path definition to ACDB definition */
211 acdb_path = path - 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700212
213 pr_debug("%s: Sending audproc cal\n", __func__);
214 get_audproc_cal(acdb_path, &aud_cal);
Ben Romberger48fabc32012-01-06 17:39:39 -0800215
216 /* map & cache buffers used */
Ben Romberger9e792562012-02-24 12:29:01 -0800217 if (((mem_addr_audproc[acdb_path].cal_paddr != aud_cal.cal_paddr) &&
218 (aud_cal.cal_size > 0)) ||
219 (aud_cal.cal_size > mem_addr_audproc[acdb_path].cal_size)) {
220
Ben Romberger48fabc32012-01-06 17:39:39 -0800221 if (mem_addr_audproc[acdb_path].cal_paddr != 0)
222 adm_memory_unmap_regions(
223 &mem_addr_audproc[acdb_path].cal_paddr,
224 &mem_addr_audproc[acdb_path].cal_size, 1);
225
226 result = adm_memory_map_regions(&aud_cal.cal_paddr, 0,
227 &aud_cal.cal_size, 1);
228 if (result < 0)
229 pr_err("ADM audproc mmap did not work! path = %d, "
230 "addr = 0x%x, size = %d\n", acdb_path,
231 aud_cal.cal_paddr, aud_cal.cal_size);
232 else
233 mem_addr_audproc[acdb_path] = aud_cal;
234 }
235
Ben Rombergerdcab5472011-12-08 19:20:12 -0800236 if (!send_adm_cal_block(port_id, &aud_cal))
Ben Romberger48fabc32012-01-06 17:39:39 -0800237 pr_debug("%s: Audproc cal sent for port id: %d, path %d\n",
Ben Rombergerdcab5472011-12-08 19:20:12 -0800238 __func__, port_id, acdb_path);
239 else
Ben Romberger48fabc32012-01-06 17:39:39 -0800240 pr_debug("%s: Audproc cal not sent for port id: %d, path %d\n",
Ben Rombergerdcab5472011-12-08 19:20:12 -0800241 __func__, port_id, acdb_path);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700242
243 pr_debug("%s: Sending audvol cal\n", __func__);
244 get_audvol_cal(acdb_path, &aud_cal);
Ben Romberger48fabc32012-01-06 17:39:39 -0800245
246 /* map & cache buffers used */
Ben Romberger9e792562012-02-24 12:29:01 -0800247 if (((mem_addr_audvol[acdb_path].cal_paddr != aud_cal.cal_paddr) &&
248 (aud_cal.cal_size > 0)) ||
249 (aud_cal.cal_size > mem_addr_audvol[acdb_path].cal_size)) {
Ben Romberger48fabc32012-01-06 17:39:39 -0800250 if (mem_addr_audvol[acdb_path].cal_paddr != 0)
251 adm_memory_unmap_regions(
252 &mem_addr_audvol[acdb_path].cal_paddr,
253 &mem_addr_audvol[acdb_path].cal_size, 1);
254
255 result = adm_memory_map_regions(&aud_cal.cal_paddr, 0,
256 &aud_cal.cal_size, 1);
257 if (result < 0)
258 pr_err("ADM audvol mmap did not work! path = %d, "
259 "addr = 0x%x, size = %d\n", acdb_path,
260 aud_cal.cal_paddr, aud_cal.cal_size);
261 else
262 mem_addr_audvol[acdb_path] = aud_cal;
263 }
264
Ben Rombergerdcab5472011-12-08 19:20:12 -0800265 if (!send_adm_cal_block(port_id, &aud_cal))
Ben Romberger48fabc32012-01-06 17:39:39 -0800266 pr_debug("%s: Audvol cal sent for port id: %d, path %d\n",
Ben Rombergerdcab5472011-12-08 19:20:12 -0800267 __func__, port_id, acdb_path);
268 else
Ben Romberger48fabc32012-01-06 17:39:39 -0800269 pr_debug("%s: Audvol cal not sent for port id: %d, path %d\n",
Ben Rombergerdcab5472011-12-08 19:20:12 -0800270 __func__, port_id, acdb_path);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271}
272
Santosh Mardi23321202012-03-22 04:33:25 +0530273int adm_connect_afe_port(int mode, int session_id, int port_id)
274{
275 struct adm_cmd_connect_afe_port cmd;
276 int ret = 0;
277 int index;
278
279 pr_debug("%s: port %d session id:%d mode:%d\n", __func__,
280 port_id, session_id, mode);
281
282 port_id = afe_convert_virtual_to_portid(port_id);
283
284 if (afe_validate_port(port_id) < 0) {
285 pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
286 return -ENODEV;
287 }
288 if (this_adm.apr == NULL) {
289 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
290 0xFFFFFFFF, &this_adm);
291 if (this_adm.apr == NULL) {
292 pr_err("%s: Unable to register ADM\n", __func__);
293 ret = -ENODEV;
294 return ret;
295 }
296 rtac_set_adm_handle(this_adm.apr);
297 }
298 index = afe_get_port_index(port_id);
299 pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);
300
301 cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
302 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
303 cmd.hdr.pkt_size = sizeof(cmd);
304 cmd.hdr.src_svc = APR_SVC_ADM;
305 cmd.hdr.src_domain = APR_DOMAIN_APPS;
306 cmd.hdr.src_port = port_id;
307 cmd.hdr.dest_svc = APR_SVC_ADM;
308 cmd.hdr.dest_domain = APR_DOMAIN_ADSP;
309 cmd.hdr.dest_port = port_id;
310 cmd.hdr.token = port_id;
311 cmd.hdr.opcode = ADM_CMD_CONNECT_AFE_PORT;
312
313 cmd.mode = mode;
314 cmd.session_id = session_id;
315 cmd.afe_port_id = port_id;
316
317 atomic_set(&this_adm.copp_stat[index], 0);
318 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&cmd);
319 if (ret < 0) {
320 pr_err("%s:ADM enable for port %d failed\n",
321 __func__, port_id);
322 ret = -EINVAL;
323 goto fail_cmd;
324 }
325 /* Wait for the callback with copp id */
326 ret = wait_event_timeout(this_adm.wait,
327 atomic_read(&this_adm.copp_stat[index]),
328 msecs_to_jiffies(TIMEOUT_MS));
329 if (!ret) {
330 pr_err("%s ADM connect AFE failed for port %d\n", __func__,
331 port_id);
332 ret = -EINVAL;
333 goto fail_cmd;
334 }
335 atomic_inc(&this_adm.copp_cnt[index]);
336 return 0;
337
338fail_cmd:
339
340 return ret;
341}
342
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700343int adm_open(int port_id, int path, int rate, int channel_mode, int topology)
344{
345 struct adm_copp_open_command open;
346 int ret = 0;
347 int index;
348
349 pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__,
350 port_id, path, rate, channel_mode);
351
Laxminath Kasam32657ec2011-08-01 19:26:57 +0530352 port_id = afe_convert_virtual_to_portid(port_id);
353
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354 if (afe_validate_port(port_id) < 0) {
355 pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
356 return -ENODEV;
357 }
358
359 index = afe_get_port_index(port_id);
360 pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);
361
362 if (this_adm.apr == NULL) {
363 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
364 0xFFFFFFFF, &this_adm);
365 if (this_adm.apr == NULL) {
366 pr_err("%s: Unable to register ADM\n", __func__);
367 ret = -ENODEV;
368 return ret;
369 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700370 rtac_set_adm_handle(this_adm.apr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700371 }
372
373
374 /* Create a COPP if port id are not enabled */
375 if (atomic_read(&this_adm.copp_cnt[index]) == 0) {
376
377 open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
378 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
379 open.hdr.pkt_size = sizeof(open);
380 open.hdr.src_svc = APR_SVC_ADM;
381 open.hdr.src_domain = APR_DOMAIN_APPS;
382 open.hdr.src_port = port_id;
383 open.hdr.dest_svc = APR_SVC_ADM;
384 open.hdr.dest_domain = APR_DOMAIN_ADSP;
385 open.hdr.dest_port = port_id;
386 open.hdr.token = port_id;
387 open.hdr.opcode = ADM_CMD_COPP_OPEN;
388
389 open.mode = path;
390 open.endpoint_id1 = port_id;
391 open.endpoint_id2 = 0xFFFF;
392
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700393 /* convert path to acdb path */
Ben Romberger974a40d2011-07-18 15:08:21 -0700394 if (path == ADM_PATH_PLAYBACK)
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700395 open.topology_id = get_adm_rx_topology();
Jay Wang4fa2ee42011-07-18 00:21:22 -0700396 else {
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700397 open.topology_id = get_adm_tx_topology();
Jay Wang4fa2ee42011-07-18 00:21:22 -0700398 if ((open.topology_id ==
399 VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
400 (open.topology_id ==
Jay Wang17283132012-03-30 15:18:12 -0700401 VPM_TX_DM_FLUENCE_COPP_TOPOLOGY))
Jay Wang4fa2ee42011-07-18 00:21:22 -0700402 rate = 16000;
403 }
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700404
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700405 if (open.topology_id == 0)
406 open.topology_id = topology;
407
408 open.channel_config = channel_mode & 0x00FF;
409 open.rate = rate;
410
Bharath Ramachandramurthy94ad7e22012-02-28 18:44:07 -0800411 pr_debug("%s: channel_config=%d port_id=%d rate=%d"
412 "topology_id=0x%X\n", __func__, open.channel_config,\
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413 open.endpoint_id1, open.rate,\
414 open.topology_id);
415
416 atomic_set(&this_adm.copp_stat[index], 0);
417
418 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
419 if (ret < 0) {
420 pr_err("%s:ADM enable for port %d failed\n",
421 __func__, port_id);
422 ret = -EINVAL;
423 goto fail_cmd;
424 }
425 /* Wait for the callback with copp id */
426 ret = wait_event_timeout(this_adm.wait,
427 atomic_read(&this_adm.copp_stat[index]),
428 msecs_to_jiffies(TIMEOUT_MS));
429 if (!ret) {
430 pr_err("%s ADM open failed for port %d\n", __func__,
431 port_id);
432 ret = -EINVAL;
433 goto fail_cmd;
434 }
435 }
436 atomic_inc(&this_adm.copp_cnt[index]);
437 return 0;
438
439fail_cmd:
440
441 return ret;
442}
443
Kiran Kandi5e809b02012-01-31 00:24:33 -0800444
445int adm_multi_ch_copp_open(int port_id, int path, int rate, int channel_mode,
446 int topology)
447{
448 struct adm_multi_ch_copp_open_command open;
449 int ret = 0;
450 int index;
451
452 pr_debug("%s: port %d path:%d rate:%d channel :%d\n", __func__,
453 port_id, path, rate, channel_mode);
454
455 port_id = afe_convert_virtual_to_portid(port_id);
456
457 if (afe_validate_port(port_id) < 0) {
458 pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
459 return -ENODEV;
460 }
461
462 index = afe_get_port_index(port_id);
463 pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);
464
465 if (this_adm.apr == NULL) {
466 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
467 0xFFFFFFFF, &this_adm);
468 if (this_adm.apr == NULL) {
469 pr_err("%s: Unable to register ADM\n", __func__);
470 ret = -ENODEV;
471 return ret;
472 }
473 rtac_set_adm_handle(this_adm.apr);
474 }
475
476 /* Create a COPP if port id are not enabled */
477 if (atomic_read(&this_adm.copp_cnt[index]) == 0) {
478
479 open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
480 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
481
482 open.hdr.pkt_size =
483 sizeof(struct adm_multi_ch_copp_open_command);
484 open.hdr.opcode = ADM_CMD_MULTI_CHANNEL_COPP_OPEN;
485 memset(open.dev_channel_mapping, 0, 8);
486
487 if (channel_mode == 1) {
488 open.dev_channel_mapping[0] = PCM_CHANNEL_FC;
489 } else if (channel_mode == 2) {
490 open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
491 open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
Mingming Yin647e9ea2012-03-17 19:56:10 -0700492 } else if (channel_mode == 4) {
493 open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
494 open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
495 open.dev_channel_mapping[2] = PCM_CHANNEL_RB;
496 open.dev_channel_mapping[3] = PCM_CHANNEL_LB;
Kiran Kandi5e809b02012-01-31 00:24:33 -0800497 } else if (channel_mode == 6) {
498 open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
499 open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
500 open.dev_channel_mapping[2] = PCM_CHANNEL_LFE;
501 open.dev_channel_mapping[3] = PCM_CHANNEL_FC;
502 open.dev_channel_mapping[4] = PCM_CHANNEL_LB;
503 open.dev_channel_mapping[5] = PCM_CHANNEL_RB;
504 } else {
505 pr_err("%s invalid num_chan %d\n", __func__,
506 channel_mode);
507 return -EINVAL;
508 }
509
510
511 open.hdr.src_svc = APR_SVC_ADM;
512 open.hdr.src_domain = APR_DOMAIN_APPS;
513 open.hdr.src_port = port_id;
514 open.hdr.dest_svc = APR_SVC_ADM;
515 open.hdr.dest_domain = APR_DOMAIN_ADSP;
516 open.hdr.dest_port = port_id;
517 open.hdr.token = port_id;
518
519 open.mode = path;
520 open.endpoint_id1 = port_id;
521 open.endpoint_id2 = 0xFFFF;
522
523 /* convert path to acdb path */
524 if (path == ADM_PATH_PLAYBACK)
525 open.topology_id = get_adm_rx_topology();
526 else {
527 open.topology_id = get_adm_tx_topology();
528 if ((open.topology_id ==
529 VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
530 (open.topology_id ==
Jay Wang0124d872012-05-23 14:10:36 -0700531 VPM_TX_DM_FLUENCE_COPP_TOPOLOGY))
Kiran Kandi5e809b02012-01-31 00:24:33 -0800532 rate = 16000;
533 }
534
535 if (open.topology_id == 0)
536 open.topology_id = topology;
537
538 open.channel_config = channel_mode & 0x00FF;
539 open.rate = rate;
540
541 pr_debug("%s: channel_config=%d port_id=%d rate=%d"
542 " topology_id=0x%X\n", __func__, open.channel_config,
543 open.endpoint_id1, open.rate,
544 open.topology_id);
545
546 atomic_set(&this_adm.copp_stat[index], 0);
547
548 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
549 if (ret < 0) {
550 pr_err("%s:ADM enable for port %d failed\n",
551 __func__, port_id);
552 ret = -EINVAL;
553 goto fail_cmd;
554 }
555 /* Wait for the callback with copp id */
556 ret = wait_event_timeout(this_adm.wait,
557 atomic_read(&this_adm.copp_stat[index]),
558 msecs_to_jiffies(TIMEOUT_MS));
559 if (!ret) {
560 pr_err("%s ADM open failed for port %d\n", __func__,
561 port_id);
562 ret = -EINVAL;
563 goto fail_cmd;
564 }
565 }
566 atomic_inc(&this_adm.copp_cnt[index]);
567 return 0;
568
569fail_cmd:
570
571 return ret;
572}
573
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700574int adm_matrix_map(int session_id, int path, int num_copps,
575 unsigned int *port_id, int copp_id)
576{
577 struct adm_routings_command route;
578 int ret = 0, i = 0;
579 /* Assumes port_ids have already been validated during adm_open */
580 int index = afe_get_port_index(copp_id);
Mingming Yinc09967e2012-04-27 15:09:43 -0700581 if (index < 0 || index >= AFE_MAX_PORTS) {
582 pr_err("%s: invalid port idx %d token %d\n",
583 __func__, index, copp_id);
584 return 0;
585 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700586
587 pr_debug("%s: session 0x%x path:%d num_copps:%d port_id[0]:%d\n",
588 __func__, session_id, path, num_copps, port_id[0]);
589
590 route.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
591 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
592 route.hdr.pkt_size = sizeof(route);
593 route.hdr.src_svc = 0;
594 route.hdr.src_domain = APR_DOMAIN_APPS;
595 route.hdr.src_port = copp_id;
596 route.hdr.dest_svc = APR_SVC_ADM;
597 route.hdr.dest_domain = APR_DOMAIN_ADSP;
598 route.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
599 route.hdr.token = copp_id;
600 route.hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS;
601 route.num_sessions = 1;
602 route.session[0].id = session_id;
603 route.session[0].num_copps = num_copps;
604
605 for (i = 0; i < num_copps; i++) {
606 int tmp;
Laxminath Kasam32657ec2011-08-01 19:26:57 +0530607 port_id[i] = afe_convert_virtual_to_portid(port_id[i]);
608
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700609 tmp = afe_get_port_index(port_id[i]);
610
611 pr_debug("%s: port_id[%d]: %d, index: %d\n", __func__, i,
612 port_id[i], tmp);
613
Mingming Yinc09967e2012-04-27 15:09:43 -0700614 if (tmp >= 0 && tmp < AFE_MAX_PORTS)
Bharath Ramachandramurthy94ad7e22012-02-28 18:44:07 -0800615 route.session[0].copp_id[i] =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616 atomic_read(&this_adm.copp_id[tmp]);
617 }
618 if (num_copps % 2)
619 route.session[0].copp_id[i] = 0;
620
621 switch (path) {
622 case 0x1:
623 route.path = AUDIO_RX;
624 break;
625 case 0x2:
626 case 0x3:
627 route.path = AUDIO_TX;
628 break;
629 default:
630 pr_err("%s: Wrong path set[%d]\n", __func__, path);
631 break;
632 }
633 atomic_set(&this_adm.copp_stat[index], 0);
634
635 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&route);
636 if (ret < 0) {
637 pr_err("%s: ADM routing for port %d failed\n",
638 __func__, port_id[0]);
639 ret = -EINVAL;
640 goto fail_cmd;
641 }
642 ret = wait_event_timeout(this_adm.wait,
643 atomic_read(&this_adm.copp_stat[index]),
644 msecs_to_jiffies(TIMEOUT_MS));
645 if (!ret) {
646 pr_err("%s: ADM cmd Route failed for port %d\n",
647 __func__, port_id[0]);
648 ret = -EINVAL;
649 goto fail_cmd;
650 }
651
652 for (i = 0; i < num_copps; i++)
653 send_adm_cal(port_id[i], path);
654
Ben Romberger974a40d2011-07-18 15:08:21 -0700655 for (i = 0; i < num_copps; i++)
656 rtac_add_adm_device(port_id[i], atomic_read(&this_adm.copp_id
657 [afe_get_port_index(port_id[i])]),
658 path, session_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700659 return 0;
660
661fail_cmd:
662
663 return ret;
664}
665
666int adm_memory_map_regions(uint32_t *buf_add, uint32_t mempool_id,
667 uint32_t *bufsz, uint32_t bufcnt)
668{
669 struct adm_cmd_memory_map_regions *mmap_regions = NULL;
670 struct adm_memory_map_regions *mregions = NULL;
671 void *mmap_region_cmd = NULL;
672 void *payload = NULL;
673 int ret = 0;
674 int i = 0;
675 int cmd_size = 0;
676
Ben Rombergerb7603232011-11-23 17:16:27 -0800677 pr_debug("%s\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700678 if (this_adm.apr == NULL) {
679 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
680 0xFFFFFFFF, &this_adm);
681 if (this_adm.apr == NULL) {
682 pr_err("%s: Unable to register ADM\n", __func__);
683 ret = -ENODEV;
684 return ret;
685 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700686 rtac_set_adm_handle(this_adm.apr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700687 }
688
689 cmd_size = sizeof(struct adm_cmd_memory_map_regions)
690 + sizeof(struct adm_memory_map_regions) * bufcnt;
691
692 mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
693 if (!mmap_region_cmd) {
694 pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
695 return -ENOMEM;
696 }
697 mmap_regions = (struct adm_cmd_memory_map_regions *)mmap_region_cmd;
698 mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
699 APR_HDR_LEN(APR_HDR_SIZE),
700 APR_PKT_VER);
701 mmap_regions->hdr.pkt_size = cmd_size;
702 mmap_regions->hdr.src_port = 0;
703 mmap_regions->hdr.dest_port = 0;
704 mmap_regions->hdr.token = 0;
705 mmap_regions->hdr.opcode = ADM_CMD_MEMORY_MAP_REGIONS;
706 mmap_regions->mempool_id = mempool_id & 0x00ff;
707 mmap_regions->nregions = bufcnt & 0x00ff;
708 pr_debug("%s: map_regions->nregions = %d\n", __func__,
709 mmap_regions->nregions);
710 payload = ((u8 *) mmap_region_cmd +
711 sizeof(struct adm_cmd_memory_map_regions));
712 mregions = (struct adm_memory_map_regions *)payload;
713
714 for (i = 0; i < bufcnt; i++) {
715 mregions->phys = buf_add[i];
716 mregions->buf_size = bufsz[i];
717 ++mregions;
718 }
719
720 atomic_set(&this_adm.copp_stat[0], 0);
721 ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd);
722 if (ret < 0) {
723 pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
724 mmap_regions->hdr.opcode, ret);
725 ret = -EINVAL;
726 goto fail_cmd;
727 }
728
729 ret = wait_event_timeout(this_adm.wait,
730 atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
731 if (!ret) {
732 pr_err("%s: timeout. waited for memory_map\n", __func__);
733 ret = -EINVAL;
734 goto fail_cmd;
735 }
736fail_cmd:
737 kfree(mmap_region_cmd);
738 return ret;
739}
740
741int adm_memory_unmap_regions(uint32_t *buf_add, uint32_t *bufsz,
742 uint32_t bufcnt)
743{
744 struct adm_cmd_memory_unmap_regions *unmap_regions = NULL;
745 struct adm_memory_unmap_regions *mregions = NULL;
746 void *unmap_region_cmd = NULL;
747 void *payload = NULL;
748 int ret = 0;
749 int i = 0;
750 int cmd_size = 0;
751
Ben Rombergerb7603232011-11-23 17:16:27 -0800752 pr_debug("%s\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700753
754 if (this_adm.apr == NULL) {
755 pr_err("%s APR handle NULL\n", __func__);
756 return -EINVAL;
757 }
758
759 cmd_size = sizeof(struct adm_cmd_memory_unmap_regions)
760 + sizeof(struct adm_memory_unmap_regions) * bufcnt;
761
762 unmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
763 if (!unmap_region_cmd) {
764 pr_err("%s: allocate unmap_region_cmd failed\n", __func__);
765 return -ENOMEM;
766 }
767 unmap_regions = (struct adm_cmd_memory_unmap_regions *)
768 unmap_region_cmd;
769 unmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
770 APR_HDR_LEN(APR_HDR_SIZE),
771 APR_PKT_VER);
772 unmap_regions->hdr.pkt_size = cmd_size;
773 unmap_regions->hdr.src_port = 0;
774 unmap_regions->hdr.dest_port = 0;
775 unmap_regions->hdr.token = 0;
776 unmap_regions->hdr.opcode = ADM_CMD_MEMORY_UNMAP_REGIONS;
777 unmap_regions->nregions = bufcnt & 0x00ff;
778 unmap_regions->reserved = 0;
779 pr_debug("%s: unmap_regions->nregions = %d\n", __func__,
780 unmap_regions->nregions);
781 payload = ((u8 *) unmap_region_cmd +
782 sizeof(struct adm_cmd_memory_unmap_regions));
783 mregions = (struct adm_memory_unmap_regions *)payload;
784
785 for (i = 0; i < bufcnt; i++) {
786 mregions->phys = buf_add[i];
787 ++mregions;
788 }
789 atomic_set(&this_adm.copp_stat[0], 0);
790 ret = apr_send_pkt(this_adm.apr, (uint32_t *) unmap_region_cmd);
791 if (ret < 0) {
792 pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
793 unmap_regions->hdr.opcode, ret);
794 ret = -EINVAL;
795 goto fail_cmd;
796 }
797
798 ret = wait_event_timeout(this_adm.wait,
799 atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
800 if (!ret) {
801 pr_err("%s: timeout. waited for memory_unmap\n", __func__);
802 ret = -EINVAL;
803 goto fail_cmd;
804 }
805fail_cmd:
806 kfree(unmap_region_cmd);
807 return ret;
808}
809
Ben Romberger974a40d2011-07-18 15:08:21 -0700810int adm_get_copp_id(int port_index)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700811{
812 pr_debug("%s\n", __func__);
813
Ben Romberger974a40d2011-07-18 15:08:21 -0700814 if (port_index < 0) {
815 pr_err("%s: invalid port_id = %d\n", __func__, port_index);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700816 return -EINVAL;
817 }
818
Ben Romberger974a40d2011-07-18 15:08:21 -0700819 return atomic_read(&this_adm.copp_id[port_index]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700820}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700821
822int adm_close(int port_id)
823{
824 struct apr_hdr close;
825
826 int ret = 0;
Laxminath Kasam32657ec2011-08-01 19:26:57 +0530827 int index = 0;
828
829 port_id = afe_convert_virtual_to_portid(port_id);
830
831 index = afe_get_port_index(port_id);
Bharath Ramachandramurthy51a86212011-07-29 12:43:43 -0700832 if (afe_validate_port(port_id) < 0)
833 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700834
Jeff Ohlstein293b91f2011-12-16 13:22:46 -0800835 pr_debug("%s port_id=%d index %d\n", __func__, port_id, index);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700836
837 if (!(atomic_read(&this_adm.copp_cnt[index]))) {
838 pr_err("%s: copp count for port[%d]is 0\n", __func__, port_id);
839
840 goto fail_cmd;
841 }
842 atomic_dec(&this_adm.copp_cnt[index]);
843 if (!(atomic_read(&this_adm.copp_cnt[index]))) {
844
845 close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
846 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
847 close.pkt_size = sizeof(close);
848 close.src_svc = APR_SVC_ADM;
849 close.src_domain = APR_DOMAIN_APPS;
850 close.src_port = port_id;
851 close.dest_svc = APR_SVC_ADM;
852 close.dest_domain = APR_DOMAIN_ADSP;
853 close.dest_port = atomic_read(&this_adm.copp_id[index]);
854 close.token = port_id;
855 close.opcode = ADM_CMD_COPP_CLOSE;
856
857 atomic_set(&this_adm.copp_id[index], RESET_COPP_ID);
858 atomic_set(&this_adm.copp_stat[index], 0);
859
860
861 pr_debug("%s:coppid %d portid=%d index=%d coppcnt=%d\n",
862 __func__,
863 atomic_read(&this_adm.copp_id[index]),
864 port_id, index,
865 atomic_read(&this_adm.copp_cnt[index]));
866
867 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close);
868 if (ret < 0) {
869 pr_err("%s ADM close failed\n", __func__);
870 ret = -EINVAL;
871 goto fail_cmd;
872 }
873
874 ret = wait_event_timeout(this_adm.wait,
875 atomic_read(&this_adm.copp_stat[index]),
876 msecs_to_jiffies(TIMEOUT_MS));
877 if (!ret) {
878 pr_err("%s: ADM cmd Route failed for port %d\n",
879 __func__, port_id);
880 ret = -EINVAL;
881 goto fail_cmd;
882 }
Ben Romberger93d4d2d2011-10-19 23:04:02 -0700883
884 rtac_remove_adm_device(port_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700885 }
886
887fail_cmd:
888 return ret;
889}
890
891static int __init adm_init(void)
892{
893 int i = 0;
894 init_waitqueue_head(&this_adm.wait);
895 this_adm.apr = NULL;
896
897 for (i = 0; i < AFE_MAX_PORTS; i++) {
898 atomic_set(&this_adm.copp_id[i], RESET_COPP_ID);
899 atomic_set(&this_adm.copp_cnt[i], 0);
900 atomic_set(&this_adm.copp_stat[i], 0);
901 }
902 return 0;
903}
904
905device_initcall(adm_init);