blob: 177e1d824558af544eae4779c65031581b2d0786 [file] [log] [blame]
Ben Romberger48fabc32012-01-06 17:39:39 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/wait.h>
15#include <linux/sched.h>
16#include <linux/jiffies.h>
17#include <linux/uaccess.h>
18#include <linux/atomic.h>
Ben Rombergerfce8f512011-07-18 16:46:09 -070019
20#include <mach/qdsp6v2/audio_dev_ctl.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021#include <mach/qdsp6v2/audio_acdb.h>
Ben Rombergerfce8f512011-07-18 16:46:09 -070022#include <mach/qdsp6v2/rtac.h>
23
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024#include <sound/apr_audio.h>
25#include <sound/q6afe.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026
27#define TIMEOUT_MS 1000
28#define AUDIO_RX 0x0
29#define AUDIO_TX 0x1
Patrick Laicf999112011-08-23 11:27:20 -070030
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031#define ASM_MAX_SESSION 0x8 /* To do: define in a header */
32#define RESET_COPP_ID 99
33#define INVALID_COPP_ID 0xFF
34
35struct adm_ctl {
36 void *apr;
37 atomic_t copp_id[AFE_MAX_PORTS];
38 atomic_t copp_cnt[AFE_MAX_PORTS];
39 atomic_t copp_stat[AFE_MAX_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040 wait_queue_head_t wait;
41};
42
Ben Romberger48fabc32012-01-06 17:39:39 -080043static struct acdb_cal_block mem_addr_audproc[MAX_AUDPROC_TYPES];
44static struct acdb_cal_block mem_addr_audvol[MAX_AUDPROC_TYPES];
45
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046static struct adm_ctl this_adm;
47
48static int32_t adm_callback(struct apr_client_data *data, void *priv)
49{
50 uint32_t *payload;
51 int i, index;
52 payload = data->payload;
53
54 if (data->opcode == RESET_EVENTS) {
55 pr_debug("adm_callback: Reset event is received: %d %d apr[%p]\n",
56 data->reset_event, data->reset_proc,
57 this_adm.apr);
58 if (this_adm.apr) {
59 apr_reset(this_adm.apr);
60 for (i = 0; i < AFE_MAX_PORTS; i++) {
61 atomic_set(&this_adm.copp_id[i],
62 RESET_COPP_ID);
63 atomic_set(&this_adm.copp_cnt[i], 0);
64 atomic_set(&this_adm.copp_stat[i], 0);
65 }
66 this_adm.apr = NULL;
67 }
68 return 0;
69 }
70
71 pr_debug("%s: code = 0x%x %x %x size = %d\n", __func__,
72 data->opcode, payload[0], payload[1],
73 data->payload_size);
74
75 if (data->payload_size) {
76 index = afe_get_port_index(data->token);
77 pr_debug("%s: Port ID %d, index %d\n", __func__,
78 data->token, index);
79
80 if (data->opcode == APR_BASIC_RSP_RESULT) {
81 pr_debug("APR_BASIC_RSP_RESULT\n");
82 switch (payload[0]) {
83 case ADM_CMD_SET_PARAMS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084 if (rtac_make_adm_callback(payload,
85 data->payload_size))
86 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087 case ADM_CMD_COPP_CLOSE:
88 case ADM_CMD_MEMORY_MAP:
89 case ADM_CMD_MEMORY_UNMAP:
90 case ADM_CMD_MEMORY_MAP_REGIONS:
91 case ADM_CMD_MEMORY_UNMAP_REGIONS:
92 case ADM_CMD_MATRIX_MAP_ROUTINGS:
93 pr_debug("ADM_CMD_MATRIX_MAP_ROUTINGS\n");
94 atomic_set(&this_adm.copp_stat[index], 1);
95 wake_up(&this_adm.wait);
96 break;
97 default:
98 pr_err("%s: Unknown Cmd: 0x%x\n", __func__,
99 payload[0]);
100 break;
101 }
102 return 0;
103 }
104
105 switch (data->opcode) {
106 case ADM_CMDRSP_COPP_OPEN: {
107 struct adm_copp_open_respond *open = data->payload;
108 if (open->copp_id == INVALID_COPP_ID) {
109 pr_err("%s: invalid coppid rxed %d\n",
110 __func__, open->copp_id);
111 atomic_set(&this_adm.copp_stat[index], 1);
112 wake_up(&this_adm.wait);
113 break;
114 }
115 atomic_set(&this_adm.copp_id[index], open->copp_id);
116 atomic_set(&this_adm.copp_stat[index], 1);
117 pr_debug("%s: coppid rxed=%d\n", __func__,
118 open->copp_id);
119 wake_up(&this_adm.wait);
120 }
121 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122 case ADM_CMDRSP_GET_PARAMS:
Swaminathan Sathappan88163a72011-08-01 16:01:14 -0700123 pr_debug("%s: ADM_CMDRSP_GET_PARAMS\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700124 rtac_make_adm_callback(payload,
125 data->payload_size);
126 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127 default:
128 pr_err("%s: Unknown cmd:0x%x\n", __func__,
129 data->opcode);
130 break;
131 }
132 }
133 return 0;
134}
135
Ben Romberger48fabc32012-01-06 17:39:39 -0800136static int send_adm_cal_block(int port_id, struct acdb_cal_block *aud_cal)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700137{
Ben Rombergerdcab5472011-12-08 19:20:12 -0800138 s32 result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700139 struct adm_set_params_command adm_params;
140 int index = afe_get_port_index(port_id);
141
142 pr_debug("%s: Port id %d, index %d\n", __func__, port_id, index);
143
144 if (!aud_cal || aud_cal->cal_size == 0) {
Ben Rombergerdcab5472011-12-08 19:20:12 -0800145 pr_debug("%s: No ADM cal to send for port_id = %d!\n",
146 __func__, port_id);
147 result = -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700148 goto done;
149 }
150
151 adm_params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
152 APR_HDR_LEN(20), APR_PKT_VER);
153 adm_params.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
154 sizeof(adm_params));
155 adm_params.hdr.src_svc = APR_SVC_ADM;
156 adm_params.hdr.src_domain = APR_DOMAIN_APPS;
157 adm_params.hdr.src_port = port_id;
158 adm_params.hdr.dest_svc = APR_SVC_ADM;
159 adm_params.hdr.dest_domain = APR_DOMAIN_ADSP;
160 adm_params.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
161 adm_params.hdr.token = port_id;
162 adm_params.hdr.opcode = ADM_CMD_SET_PARAMS;
163 adm_params.payload = aud_cal->cal_paddr;
164 adm_params.payload_size = aud_cal->cal_size;
165
166 atomic_set(&this_adm.copp_stat[index], 0);
167 pr_debug("%s: Sending SET_PARAMS payload = 0x%x, size = %d\n",
168 __func__, adm_params.payload, adm_params.payload_size);
169 result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params);
170 if (result < 0) {
171 pr_err("%s: Set params failed port = %d payload = 0x%x\n",
172 __func__, port_id, aud_cal->cal_paddr);
Ben Rombergerdcab5472011-12-08 19:20:12 -0800173 result = -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174 goto done;
175 }
176 /* Wait for the callback */
177 result = wait_event_timeout(this_adm.wait,
178 atomic_read(&this_adm.copp_stat[index]),
179 msecs_to_jiffies(TIMEOUT_MS));
Ben Rombergerdcab5472011-12-08 19:20:12 -0800180 if (!result) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700181 pr_err("%s: Set params timed out port = %d, payload = 0x%x\n",
182 __func__, port_id, aud_cal->cal_paddr);
Ben Rombergerdcab5472011-12-08 19:20:12 -0800183 result = -EINVAL;
184 goto done;
185 }
186
187 result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188done:
Ben Rombergerdcab5472011-12-08 19:20:12 -0800189 return result;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700190}
191
Ben Romberger48fabc32012-01-06 17:39:39 -0800192static void send_adm_cal(int port_id, int path)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700193{
Ben Romberger48fabc32012-01-06 17:39:39 -0800194 int result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700195 s32 acdb_path;
196 struct acdb_cal_block aud_cal;
197
198 pr_debug("%s\n", __func__);
199
200 /* Maps audio_dev_ctrl path definition to ACDB definition */
201 acdb_path = path - 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202
203 pr_debug("%s: Sending audproc cal\n", __func__);
204 get_audproc_cal(acdb_path, &aud_cal);
Ben Romberger48fabc32012-01-06 17:39:39 -0800205
206 /* map & cache buffers used */
207 if ((mem_addr_audproc[acdb_path].cal_paddr != aud_cal.cal_paddr) &&
208 (aud_cal.cal_size > 0)) {
209 if (mem_addr_audproc[acdb_path].cal_paddr != 0)
210 adm_memory_unmap_regions(
211 &mem_addr_audproc[acdb_path].cal_paddr,
212 &mem_addr_audproc[acdb_path].cal_size, 1);
213
214 result = adm_memory_map_regions(&aud_cal.cal_paddr, 0,
215 &aud_cal.cal_size, 1);
216 if (result < 0)
217 pr_err("ADM audproc mmap did not work! path = %d, "
218 "addr = 0x%x, size = %d\n", acdb_path,
219 aud_cal.cal_paddr, aud_cal.cal_size);
220 else
221 mem_addr_audproc[acdb_path] = aud_cal;
222 }
223
Ben Rombergerdcab5472011-12-08 19:20:12 -0800224 if (!send_adm_cal_block(port_id, &aud_cal))
Ben Romberger48fabc32012-01-06 17:39:39 -0800225 pr_debug("%s: Audproc cal sent for port id: %d, path %d\n",
Ben Rombergerdcab5472011-12-08 19:20:12 -0800226 __func__, port_id, acdb_path);
227 else
Ben Romberger48fabc32012-01-06 17:39:39 -0800228 pr_debug("%s: Audproc cal not sent for port id: %d, path %d\n",
Ben Rombergerdcab5472011-12-08 19:20:12 -0800229 __func__, port_id, acdb_path);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230
231 pr_debug("%s: Sending audvol cal\n", __func__);
232 get_audvol_cal(acdb_path, &aud_cal);
Ben Romberger48fabc32012-01-06 17:39:39 -0800233
234 /* map & cache buffers used */
235 if ((mem_addr_audvol[acdb_path].cal_paddr != aud_cal.cal_paddr) &&
236 (aud_cal.cal_size > 0)) {
237 if (mem_addr_audvol[acdb_path].cal_paddr != 0)
238 adm_memory_unmap_regions(
239 &mem_addr_audvol[acdb_path].cal_paddr,
240 &mem_addr_audvol[acdb_path].cal_size, 1);
241
242 result = adm_memory_map_regions(&aud_cal.cal_paddr, 0,
243 &aud_cal.cal_size, 1);
244 if (result < 0)
245 pr_err("ADM audvol mmap did not work! path = %d, "
246 "addr = 0x%x, size = %d\n", acdb_path,
247 aud_cal.cal_paddr, aud_cal.cal_size);
248 else
249 mem_addr_audvol[acdb_path] = aud_cal;
250 }
251
Ben Rombergerdcab5472011-12-08 19:20:12 -0800252 if (!send_adm_cal_block(port_id, &aud_cal))
Ben Romberger48fabc32012-01-06 17:39:39 -0800253 pr_debug("%s: Audvol cal sent for port id: %d, path %d\n",
Ben Rombergerdcab5472011-12-08 19:20:12 -0800254 __func__, port_id, acdb_path);
255 else
Ben Romberger48fabc32012-01-06 17:39:39 -0800256 pr_debug("%s: Audvol cal not sent for port id: %d, path %d\n",
Ben Rombergerdcab5472011-12-08 19:20:12 -0800257 __func__, port_id, acdb_path);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258}
259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260int adm_open(int port_id, int path, int rate, int channel_mode, int topology)
261{
262 struct adm_copp_open_command open;
263 int ret = 0;
264 int index;
265
266 pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__,
267 port_id, path, rate, channel_mode);
268
Laxminath Kasam32657ec2011-08-01 19:26:57 +0530269 port_id = afe_convert_virtual_to_portid(port_id);
270
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271 if (afe_validate_port(port_id) < 0) {
272 pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
273 return -ENODEV;
274 }
275
276 index = afe_get_port_index(port_id);
277 pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);
278
279 if (this_adm.apr == NULL) {
280 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
281 0xFFFFFFFF, &this_adm);
282 if (this_adm.apr == NULL) {
283 pr_err("%s: Unable to register ADM\n", __func__);
284 ret = -ENODEV;
285 return ret;
286 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287 rtac_set_adm_handle(this_adm.apr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288 }
289
290
291 /* Create a COPP if port id are not enabled */
292 if (atomic_read(&this_adm.copp_cnt[index]) == 0) {
293
294 open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
295 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
296 open.hdr.pkt_size = sizeof(open);
297 open.hdr.src_svc = APR_SVC_ADM;
298 open.hdr.src_domain = APR_DOMAIN_APPS;
299 open.hdr.src_port = port_id;
300 open.hdr.dest_svc = APR_SVC_ADM;
301 open.hdr.dest_domain = APR_DOMAIN_ADSP;
302 open.hdr.dest_port = port_id;
303 open.hdr.token = port_id;
304 open.hdr.opcode = ADM_CMD_COPP_OPEN;
305
306 open.mode = path;
307 open.endpoint_id1 = port_id;
308 open.endpoint_id2 = 0xFFFF;
309
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700310 /* convert path to acdb path */
Ben Romberger974a40d2011-07-18 15:08:21 -0700311 if (path == ADM_PATH_PLAYBACK)
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700312 open.topology_id = get_adm_rx_topology();
Jay Wang4fa2ee42011-07-18 00:21:22 -0700313 else {
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700314 open.topology_id = get_adm_tx_topology();
Jay Wang4fa2ee42011-07-18 00:21:22 -0700315 if ((open.topology_id ==
316 VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
317 (open.topology_id ==
Jayasena Sangaraboina0fc197d2011-12-09 13:20:33 -0800318 VPM_TX_DM_FLUENCE_COPP_TOPOLOGY) ||
319 (open.topology_id ==
320 VPM_TX_QMIC_FLUENCE_COPP_TOPOLOGY))
Jay Wang4fa2ee42011-07-18 00:21:22 -0700321 rate = 16000;
322 }
Ben Rombergerc49b85d2011-07-15 18:55:34 -0700323
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700324 if (open.topology_id == 0)
325 open.topology_id = topology;
326
327 open.channel_config = channel_mode & 0x00FF;
328 open.rate = rate;
329
330 pr_debug("%s: channel_config=%d port_id=%d rate=%d\
331 topology_id=0x%X\n", __func__, open.channel_config,\
332 open.endpoint_id1, open.rate,\
333 open.topology_id);
334
335 atomic_set(&this_adm.copp_stat[index], 0);
336
337 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
338 if (ret < 0) {
339 pr_err("%s:ADM enable for port %d failed\n",
340 __func__, port_id);
341 ret = -EINVAL;
342 goto fail_cmd;
343 }
344 /* Wait for the callback with copp id */
345 ret = wait_event_timeout(this_adm.wait,
346 atomic_read(&this_adm.copp_stat[index]),
347 msecs_to_jiffies(TIMEOUT_MS));
348 if (!ret) {
349 pr_err("%s ADM open failed for port %d\n", __func__,
350 port_id);
351 ret = -EINVAL;
352 goto fail_cmd;
353 }
354 }
355 atomic_inc(&this_adm.copp_cnt[index]);
356 return 0;
357
358fail_cmd:
359
360 return ret;
361}
362
363int adm_matrix_map(int session_id, int path, int num_copps,
364 unsigned int *port_id, int copp_id)
365{
366 struct adm_routings_command route;
367 int ret = 0, i = 0;
368 /* Assumes port_ids have already been validated during adm_open */
369 int index = afe_get_port_index(copp_id);
370
371 pr_debug("%s: session 0x%x path:%d num_copps:%d port_id[0]:%d\n",
372 __func__, session_id, path, num_copps, port_id[0]);
373
374 route.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
375 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
376 route.hdr.pkt_size = sizeof(route);
377 route.hdr.src_svc = 0;
378 route.hdr.src_domain = APR_DOMAIN_APPS;
379 route.hdr.src_port = copp_id;
380 route.hdr.dest_svc = APR_SVC_ADM;
381 route.hdr.dest_domain = APR_DOMAIN_ADSP;
382 route.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
383 route.hdr.token = copp_id;
384 route.hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS;
385 route.num_sessions = 1;
386 route.session[0].id = session_id;
387 route.session[0].num_copps = num_copps;
388
389 for (i = 0; i < num_copps; i++) {
390 int tmp;
Laxminath Kasam32657ec2011-08-01 19:26:57 +0530391 port_id[i] = afe_convert_virtual_to_portid(port_id[i]);
392
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393 tmp = afe_get_port_index(port_id[i]);
394
395 pr_debug("%s: port_id[%d]: %d, index: %d\n", __func__, i,
396 port_id[i], tmp);
397
398 route.session[0].copp_id[i] =
399 atomic_read(&this_adm.copp_id[tmp]);
400 }
401 if (num_copps % 2)
402 route.session[0].copp_id[i] = 0;
403
404 switch (path) {
405 case 0x1:
406 route.path = AUDIO_RX;
407 break;
408 case 0x2:
409 case 0x3:
410 route.path = AUDIO_TX;
411 break;
412 default:
413 pr_err("%s: Wrong path set[%d]\n", __func__, path);
414 break;
415 }
416 atomic_set(&this_adm.copp_stat[index], 0);
417
418 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&route);
419 if (ret < 0) {
420 pr_err("%s: ADM routing for port %d failed\n",
421 __func__, port_id[0]);
422 ret = -EINVAL;
423 goto fail_cmd;
424 }
425 ret = wait_event_timeout(this_adm.wait,
426 atomic_read(&this_adm.copp_stat[index]),
427 msecs_to_jiffies(TIMEOUT_MS));
428 if (!ret) {
429 pr_err("%s: ADM cmd Route failed for port %d\n",
430 __func__, port_id[0]);
431 ret = -EINVAL;
432 goto fail_cmd;
433 }
434
435 for (i = 0; i < num_copps; i++)
436 send_adm_cal(port_id[i], path);
437
Ben Romberger974a40d2011-07-18 15:08:21 -0700438 for (i = 0; i < num_copps; i++)
439 rtac_add_adm_device(port_id[i], atomic_read(&this_adm.copp_id
440 [afe_get_port_index(port_id[i])]),
441 path, session_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700442 return 0;
443
444fail_cmd:
445
446 return ret;
447}
448
449int adm_memory_map_regions(uint32_t *buf_add, uint32_t mempool_id,
450 uint32_t *bufsz, uint32_t bufcnt)
451{
452 struct adm_cmd_memory_map_regions *mmap_regions = NULL;
453 struct adm_memory_map_regions *mregions = NULL;
454 void *mmap_region_cmd = NULL;
455 void *payload = NULL;
456 int ret = 0;
457 int i = 0;
458 int cmd_size = 0;
459
Ben Rombergerb7603232011-11-23 17:16:27 -0800460 pr_debug("%s\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700461 if (this_adm.apr == NULL) {
462 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
463 0xFFFFFFFF, &this_adm);
464 if (this_adm.apr == NULL) {
465 pr_err("%s: Unable to register ADM\n", __func__);
466 ret = -ENODEV;
467 return ret;
468 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469 rtac_set_adm_handle(this_adm.apr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700470 }
471
472 cmd_size = sizeof(struct adm_cmd_memory_map_regions)
473 + sizeof(struct adm_memory_map_regions) * bufcnt;
474
475 mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
476 if (!mmap_region_cmd) {
477 pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
478 return -ENOMEM;
479 }
480 mmap_regions = (struct adm_cmd_memory_map_regions *)mmap_region_cmd;
481 mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
482 APR_HDR_LEN(APR_HDR_SIZE),
483 APR_PKT_VER);
484 mmap_regions->hdr.pkt_size = cmd_size;
485 mmap_regions->hdr.src_port = 0;
486 mmap_regions->hdr.dest_port = 0;
487 mmap_regions->hdr.token = 0;
488 mmap_regions->hdr.opcode = ADM_CMD_MEMORY_MAP_REGIONS;
489 mmap_regions->mempool_id = mempool_id & 0x00ff;
490 mmap_regions->nregions = bufcnt & 0x00ff;
491 pr_debug("%s: map_regions->nregions = %d\n", __func__,
492 mmap_regions->nregions);
493 payload = ((u8 *) mmap_region_cmd +
494 sizeof(struct adm_cmd_memory_map_regions));
495 mregions = (struct adm_memory_map_regions *)payload;
496
497 for (i = 0; i < bufcnt; i++) {
498 mregions->phys = buf_add[i];
499 mregions->buf_size = bufsz[i];
500 ++mregions;
501 }
502
503 atomic_set(&this_adm.copp_stat[0], 0);
504 ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd);
505 if (ret < 0) {
506 pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
507 mmap_regions->hdr.opcode, ret);
508 ret = -EINVAL;
509 goto fail_cmd;
510 }
511
512 ret = wait_event_timeout(this_adm.wait,
513 atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
514 if (!ret) {
515 pr_err("%s: timeout. waited for memory_map\n", __func__);
516 ret = -EINVAL;
517 goto fail_cmd;
518 }
519fail_cmd:
520 kfree(mmap_region_cmd);
521 return ret;
522}
523
524int adm_memory_unmap_regions(uint32_t *buf_add, uint32_t *bufsz,
525 uint32_t bufcnt)
526{
527 struct adm_cmd_memory_unmap_regions *unmap_regions = NULL;
528 struct adm_memory_unmap_regions *mregions = NULL;
529 void *unmap_region_cmd = NULL;
530 void *payload = NULL;
531 int ret = 0;
532 int i = 0;
533 int cmd_size = 0;
534
Ben Rombergerb7603232011-11-23 17:16:27 -0800535 pr_debug("%s\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536
537 if (this_adm.apr == NULL) {
538 pr_err("%s APR handle NULL\n", __func__);
539 return -EINVAL;
540 }
541
542 cmd_size = sizeof(struct adm_cmd_memory_unmap_regions)
543 + sizeof(struct adm_memory_unmap_regions) * bufcnt;
544
545 unmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
546 if (!unmap_region_cmd) {
547 pr_err("%s: allocate unmap_region_cmd failed\n", __func__);
548 return -ENOMEM;
549 }
550 unmap_regions = (struct adm_cmd_memory_unmap_regions *)
551 unmap_region_cmd;
552 unmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
553 APR_HDR_LEN(APR_HDR_SIZE),
554 APR_PKT_VER);
555 unmap_regions->hdr.pkt_size = cmd_size;
556 unmap_regions->hdr.src_port = 0;
557 unmap_regions->hdr.dest_port = 0;
558 unmap_regions->hdr.token = 0;
559 unmap_regions->hdr.opcode = ADM_CMD_MEMORY_UNMAP_REGIONS;
560 unmap_regions->nregions = bufcnt & 0x00ff;
561 unmap_regions->reserved = 0;
562 pr_debug("%s: unmap_regions->nregions = %d\n", __func__,
563 unmap_regions->nregions);
564 payload = ((u8 *) unmap_region_cmd +
565 sizeof(struct adm_cmd_memory_unmap_regions));
566 mregions = (struct adm_memory_unmap_regions *)payload;
567
568 for (i = 0; i < bufcnt; i++) {
569 mregions->phys = buf_add[i];
570 ++mregions;
571 }
572 atomic_set(&this_adm.copp_stat[0], 0);
573 ret = apr_send_pkt(this_adm.apr, (uint32_t *) unmap_region_cmd);
574 if (ret < 0) {
575 pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
576 unmap_regions->hdr.opcode, ret);
577 ret = -EINVAL;
578 goto fail_cmd;
579 }
580
581 ret = wait_event_timeout(this_adm.wait,
582 atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
583 if (!ret) {
584 pr_err("%s: timeout. waited for memory_unmap\n", __func__);
585 ret = -EINVAL;
586 goto fail_cmd;
587 }
588fail_cmd:
589 kfree(unmap_region_cmd);
590 return ret;
591}
592
Ben Romberger974a40d2011-07-18 15:08:21 -0700593int adm_get_copp_id(int port_index)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700594{
595 pr_debug("%s\n", __func__);
596
Ben Romberger974a40d2011-07-18 15:08:21 -0700597 if (port_index < 0) {
598 pr_err("%s: invalid port_id = %d\n", __func__, port_index);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700599 return -EINVAL;
600 }
601
Ben Romberger974a40d2011-07-18 15:08:21 -0700602 return atomic_read(&this_adm.copp_id[port_index]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700603}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700604
605int adm_close(int port_id)
606{
607 struct apr_hdr close;
608
609 int ret = 0;
Laxminath Kasam32657ec2011-08-01 19:26:57 +0530610 int index = 0;
611
612 port_id = afe_convert_virtual_to_portid(port_id);
613
614 index = afe_get_port_index(port_id);
Bharath Ramachandramurthy51a86212011-07-29 12:43:43 -0700615 if (afe_validate_port(port_id) < 0)
616 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700617
Jeff Ohlstein293b91f2011-12-16 13:22:46 -0800618 pr_debug("%s port_id=%d index %d\n", __func__, port_id, index);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700619
620 if (!(atomic_read(&this_adm.copp_cnt[index]))) {
621 pr_err("%s: copp count for port[%d]is 0\n", __func__, port_id);
622
623 goto fail_cmd;
624 }
625 atomic_dec(&this_adm.copp_cnt[index]);
626 if (!(atomic_read(&this_adm.copp_cnt[index]))) {
627
628 close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
629 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
630 close.pkt_size = sizeof(close);
631 close.src_svc = APR_SVC_ADM;
632 close.src_domain = APR_DOMAIN_APPS;
633 close.src_port = port_id;
634 close.dest_svc = APR_SVC_ADM;
635 close.dest_domain = APR_DOMAIN_ADSP;
636 close.dest_port = atomic_read(&this_adm.copp_id[index]);
637 close.token = port_id;
638 close.opcode = ADM_CMD_COPP_CLOSE;
639
640 atomic_set(&this_adm.copp_id[index], RESET_COPP_ID);
641 atomic_set(&this_adm.copp_stat[index], 0);
642
643
644 pr_debug("%s:coppid %d portid=%d index=%d coppcnt=%d\n",
645 __func__,
646 atomic_read(&this_adm.copp_id[index]),
647 port_id, index,
648 atomic_read(&this_adm.copp_cnt[index]));
649
650 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close);
651 if (ret < 0) {
652 pr_err("%s ADM close failed\n", __func__);
653 ret = -EINVAL;
654 goto fail_cmd;
655 }
656
657 ret = wait_event_timeout(this_adm.wait,
658 atomic_read(&this_adm.copp_stat[index]),
659 msecs_to_jiffies(TIMEOUT_MS));
660 if (!ret) {
661 pr_err("%s: ADM cmd Route failed for port %d\n",
662 __func__, port_id);
663 ret = -EINVAL;
664 goto fail_cmd;
665 }
Ben Romberger93d4d2d2011-10-19 23:04:02 -0700666
667 rtac_remove_adm_device(port_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700668 }
669
670fail_cmd:
671 return ret;
672}
673
674static int __init adm_init(void)
675{
676 int i = 0;
677 init_waitqueue_head(&this_adm.wait);
678 this_adm.apr = NULL;
679
680 for (i = 0; i < AFE_MAX_PORTS; i++) {
681 atomic_set(&this_adm.copp_id[i], RESET_COPP_ID);
682 atomic_set(&this_adm.copp_cnt[i], 0);
683 atomic_set(&this_adm.copp_stat[i], 0);
684 }
685 return 0;
686}
687
688device_initcall(adm_init);