blob: 49079084110b68fdd37c63ceb32ba863a86a5cea [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004 * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070034#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070036#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053038#include <mach/socinfo.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070039
40#include "smd_private.h"
41#include "proc_comm.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042#include "modem_notifier.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070043
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
45 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX)
Brian Swetland37521a32009-07-01 18:30:47 -070046#define CONFIG_QDSP6 1
47#endif
48
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960)
50#define CONFIG_DSPS 1
51#endif
52
53#if defined(CONFIG_ARCH_MSM8960)
54#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060055#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070057
58#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059#define SMEM_VERSION 0x000B
60#define SMD_VERSION 0x00020000
61
62uint32_t SMSM_NUM_ENTRIES = 8;
63uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070064
65enum {
66 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067 MSM_SMSM_DEBUG = 1U << 1,
68 MSM_SMD_INFO = 1U << 2,
69 MSM_SMSM_INFO = 1U << 3,
70};
71
72struct smsm_shared_info {
73 uint32_t *state;
74 uint32_t *intr_mask;
75 uint32_t *intr_mux;
76};
77
78static struct smsm_shared_info smsm_info;
79
80struct smsm_size_info_type {
81 uint32_t num_hosts;
82 uint32_t num_entries;
83 uint32_t reserved0;
84 uint32_t reserved1;
85};
86
87struct smsm_state_cb_info {
88 struct list_head cb_list;
89 uint32_t mask;
90 void *data;
91 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
92};
93
94struct smsm_state_info {
95 struct list_head callbacks;
96 uint32_t last_value;
97};
98
99#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
100#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
101 entry * SMSM_NUM_HOSTS + host)
102#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
103
104/* Internal definitions which are not exported in some targets */
105enum {
106 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700107};
108
109static int msm_smd_debug_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700110module_param_named(debug_mask, msm_smd_debug_mask,
111 int, S_IRUGO | S_IWUSR | S_IWGRP);
112
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700113#if defined(CONFIG_MSM_SMD_DEBUG)
114#define SMD_DBG(x...) do { \
115 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
116 printk(KERN_DEBUG x); \
117 } while (0)
118
119#define SMSM_DBG(x...) do { \
120 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
121 printk(KERN_DEBUG x); \
122 } while (0)
123
124#define SMD_INFO(x...) do { \
125 if (msm_smd_debug_mask & MSM_SMD_INFO) \
126 printk(KERN_INFO x); \
127 } while (0)
128
129#define SMSM_INFO(x...) do { \
130 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
131 printk(KERN_INFO x); \
132 } while (0)
133#else
134#define SMD_DBG(x...) do { } while (0)
135#define SMSM_DBG(x...) do { } while (0)
136#define SMD_INFO(x...) do { } while (0)
137#define SMSM_INFO(x...) do { } while (0)
138#endif
139
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700140static unsigned last_heap_free = 0xffffffff;
141
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142static inline void smd_write_intr(unsigned int val,
143 const void __iomem *addr);
144
145#if defined(CONFIG_ARCH_MSM7X30)
146#define MSM_TRIG_A2M_SMD_INT \
147 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
148#define MSM_TRIG_A2Q6_SMD_INT \
149 (smd_write_intr(1 << 8, MSM_GCC_BASE + 0x8))
150#define MSM_TRIG_A2M_SMSM_INT \
151 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
152#define MSM_TRIG_A2Q6_SMSM_INT \
153 (smd_write_intr(1 << 8, MSM_GCC_BASE + 0x8))
154#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600155#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156#define MSM_TRIG_A2WCNSS_SMD_INT
157#define MSM_TRIG_A2WCNSS_SMSM_INT
158#elif defined(CONFIG_ARCH_MSM8X60)
159#define MSM_TRIG_A2M_SMD_INT \
160 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
161#define MSM_TRIG_A2Q6_SMD_INT \
162 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
163#define MSM_TRIG_A2M_SMSM_INT \
164 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
165#define MSM_TRIG_A2Q6_SMSM_INT \
166 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
167#define MSM_TRIG_A2DSPS_SMD_INT \
168 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600169#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700170#define MSM_TRIG_A2WCNSS_SMD_INT
171#define MSM_TRIG_A2WCNSS_SMSM_INT
172#elif defined(CONFIG_ARCH_MSM8960)
173#define MSM_TRIG_A2M_SMD_INT \
174 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
175#define MSM_TRIG_A2Q6_SMD_INT \
176 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
177#define MSM_TRIG_A2M_SMSM_INT \
178 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
179#define MSM_TRIG_A2Q6_SMSM_INT \
180 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
181#define MSM_TRIG_A2DSPS_SMD_INT \
182 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600183#define MSM_TRIG_A2DSPS_SMSM_INT \
184 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4094))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185#define MSM_TRIG_A2WCNSS_SMD_INT \
186 (smd_write_intr(1 << 25, MSM_APCS_GCC_BASE + 0x8))
187#define MSM_TRIG_A2WCNSS_SMSM_INT \
188 (smd_write_intr(1 << 23, MSM_APCS_GCC_BASE + 0x8))
189#elif defined(CONFIG_ARCH_FSM9XXX)
190#define MSM_TRIG_A2Q6_SMD_INT \
191 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
192#define MSM_TRIG_A2Q6_SMSM_INT \
193 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
194#define MSM_TRIG_A2M_SMD_INT \
195 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
196#define MSM_TRIG_A2M_SMSM_INT \
197 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
198#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600199#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700200#define MSM_TRIG_A2WCNSS_SMD_INT
201#define MSM_TRIG_A2WCNSS_SMSM_INT
202#else
203#define MSM_TRIG_A2M_SMD_INT \
204 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
205#define MSM_TRIG_A2Q6_SMD_INT \
206 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (8) * 4))
207#define MSM_TRIG_A2M_SMSM_INT \
208 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
209#define MSM_TRIG_A2Q6_SMSM_INT \
210 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (8) * 4))
211#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600212#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700213#define MSM_TRIG_A2WCNSS_SMD_INT
214#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700215#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216
217#define SMD_LOOPBACK_CID 100
218
219static LIST_HEAD(smd_ch_list_loopback);
220static irqreturn_t smsm_irq_handler(int irq, void *data);
221static void smd_fake_irq_handler(unsigned long arg);
222
223static void notify_smsm_cb_clients_worker(struct work_struct *work);
224static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600225static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226static struct smsm_state_info *smsm_states;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +0530227static int spinlocks_initialized;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228
229static inline void smd_write_intr(unsigned int val,
230 const void __iomem *addr)
231{
232 wmb();
233 __raw_writel(val, addr);
234}
235
236#ifdef CONFIG_WCNSS
237static inline void wakeup_v1_riva(void)
238{
239 /*
240 * workaround hack for RIVA v1 hardware bug
241 * trigger GPIO 40 to wake up RIVA from power collaspe
242 * not to be sent to customers
243 */
244 __raw_writel(0x0, MSM_TLMM_BASE + 0x1284);
245 __raw_writel(0x2, MSM_TLMM_BASE + 0x1284);
246 /* end workaround */
247}
248#else
249static inline void wakeup_v1_riva(void) {}
250#endif
251
252static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
253{
254 /* older protocol don't use smsm_intr_mask,
255 but still communicates with modem */
256 if (!smsm_info.intr_mask ||
257 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
258 & notify_mask))
259 MSM_TRIG_A2M_SMSM_INT;
260
261 if (smsm_info.intr_mask &&
262 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
263 & notify_mask)) {
264#if !defined(CONFIG_ARCH_MSM8X60) && !defined(MCONFIG_ARCH_MSM8960)
265 uint32_t mux_val;
266
267 if (smsm_info.intr_mux) {
268 mux_val = __raw_readl(
269 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
270 mux_val++;
271 __raw_writel(mux_val,
272 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
273 }
274#endif
275 MSM_TRIG_A2Q6_SMSM_INT;
276 }
277
278 if (smsm_info.intr_mask &&
279 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
280 & notify_mask)) {
281 wakeup_v1_riva();
282 MSM_TRIG_A2WCNSS_SMSM_INT;
283 }
284
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600285 if (smsm_info.intr_mask &&
286 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
287 & notify_mask)) {
288 MSM_TRIG_A2DSPS_SMSM_INT;
289 }
290
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700291 schedule_work(&smsm_cb_work);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700292}
293
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700294static inline void notify_modem_smd(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700295{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700296 MSM_TRIG_A2M_SMD_INT;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700297}
298
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700299static inline void notify_dsp_smd(void)
300{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301 MSM_TRIG_A2Q6_SMD_INT;
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700302}
303
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700304static inline void notify_dsps_smd(void)
305{
306 MSM_TRIG_A2DSPS_SMD_INT;
307}
308
309static inline void notify_wcnss_smd(void)
310{
311 wakeup_v1_riva();
312 MSM_TRIG_A2WCNSS_SMD_INT;
313}
314
315void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700316{
317 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700319
320 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
321 if (x != 0) {
322 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700323 SMD_INFO("smem: DIAG '%s'\n", x);
324 }
325
326 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
327 if (x != 0) {
328 x[size - 1] = 0;
329 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700330 }
331}
332
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700333
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700334static void handle_modem_crash(void)
335{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700337 smd_diag();
338
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700339 /* hard reboot if possible FIXME
340 if (msm_reset_hook)
341 msm_reset_hook();
342 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700343
344 /* in this case the modem or watchdog should reboot us */
345 for (;;)
346 ;
347}
348
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700350{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351 /* if the modem's not ready yet, we have to hope for the best */
352 if (!smsm_info.state)
353 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700354
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700356 handle_modem_crash();
357 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700358 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700359 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700360}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700362
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700363/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700364 * irq handler and code that mutates the channel
365 * list or fiddles with channel state
366 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700368DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700369
370/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700371 * operations to avoid races while creating or
372 * destroying smd_channel structures
373 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700374static DEFINE_MUTEX(smd_creation_mutex);
375
376static int smd_initialized;
377
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700378struct smd_shared_v1 {
379 struct smd_half_channel ch0;
380 unsigned char data0[SMD_BUF_SIZE];
381 struct smd_half_channel ch1;
382 unsigned char data1[SMD_BUF_SIZE];
383};
384
385struct smd_shared_v2 {
386 struct smd_half_channel ch0;
387 struct smd_half_channel ch1;
388};
389
390struct smd_channel {
391 volatile struct smd_half_channel *send;
392 volatile struct smd_half_channel *recv;
393 unsigned char *send_data;
394 unsigned char *recv_data;
395 unsigned fifo_size;
396 unsigned fifo_mask;
397 struct list_head ch_list;
398
399 unsigned current_packet;
400 unsigned n;
401 void *priv;
402 void (*notify)(void *priv, unsigned flags);
403
404 int (*read)(smd_channel_t *ch, void *data, int len, int user_buf);
405 int (*write)(smd_channel_t *ch, const void *data, int len,
406 int user_buf);
407 int (*read_avail)(smd_channel_t *ch);
408 int (*write_avail)(smd_channel_t *ch);
409 int (*read_from_cb)(smd_channel_t *ch, void *data, int len,
410 int user_buf);
411
412 void (*update_state)(smd_channel_t *ch);
413 unsigned last_state;
414 void (*notify_other_cpu)(void);
415
416 char name[20];
417 struct platform_device pdev;
418 unsigned type;
419
420 int pending_pkt_sz;
421
422 char is_pkt_ch;
423};
424
425struct edge_to_pid {
426 uint32_t local_pid;
427 uint32_t remote_pid;
428};
429
Eric Holmberg7dfd1972011-09-09 16:07:57 -0600430/*
431 * SMD Processor ID's.
432 *
433 * For all processors that have both SMSM and SMD clients,
434 * the SMSM Processor ID and the SMD Processor ID will
435 * be the same. In cases where a processor only supports
436 * SMD, the entry will only exist in this enum.
437 */
438enum {
439 SMD_APPS = SMSM_APPS,
440 SMD_MODEM = SMSM_MODEM,
441 SMD_Q6 = SMSM_Q6,
442 SMD_WCNSS = SMSM_WCNSS,
443 SMD_DSPS = SMSM_DSPS,
444 SMD_MODEM_Q6_FW,
445};
446
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447/**
448 * Maps edge type to local and remote processor ID's.
449 */
450static struct edge_to_pid edge_to_pids[] = {
451 [SMD_APPS_MODEM] = {SMSM_APPS, SMSM_MODEM},
452 [SMD_APPS_QDSP] = {SMSM_APPS, SMSM_Q6},
453 [SMD_MODEM_QDSP] = {SMSM_MODEM, SMSM_Q6},
454 [SMD_APPS_DSPS] = {SMSM_APPS, SMSM_DSPS},
455 [SMD_MODEM_DSPS] = {SMSM_MODEM, SMSM_DSPS},
456 [SMD_QDSP_DSPS] = {SMSM_Q6, SMSM_DSPS},
457 [SMD_APPS_WCNSS] = {SMSM_APPS, SMSM_WCNSS},
458 [SMD_MODEM_WCNSS] = {SMSM_MODEM, SMSM_WCNSS},
459 [SMD_QDSP_WCNSS] = {SMSM_Q6, SMSM_WCNSS},
460 [SMD_DSPS_WCNSS] = {SMSM_DSPS, SMSM_WCNSS},
Eric Holmberg7dfd1972011-09-09 16:07:57 -0600461 [SMD_APPS_Q6FW] = {SMSM_APPS, SMD_MODEM_Q6_FW},
462 [SMD_MODEM_Q6FW] = {SMSM_MODEM, SMD_MODEM_Q6_FW},
463 [SMD_QDSP_Q6FW] = {SMSM_Q6, SMD_MODEM_Q6_FW},
464 [SMD_DSPS_Q6FW] = {SMSM_DSPS, SMD_MODEM_Q6_FW},
465 [SMD_WCNSS_Q6FW] = {SMSM_WCNSS, SMD_MODEM_Q6_FW},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700466};
467
468struct restart_notifier_block {
469 unsigned processor;
470 char *name;
471 struct notifier_block nb;
472};
473
474static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
475
476static LIST_HEAD(smd_ch_closed_list);
477static LIST_HEAD(smd_ch_closing_list);
478static LIST_HEAD(smd_ch_to_close_list);
479static LIST_HEAD(smd_ch_list_modem);
480static LIST_HEAD(smd_ch_list_dsp);
481static LIST_HEAD(smd_ch_list_dsps);
482static LIST_HEAD(smd_ch_list_wcnss);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700483
484static unsigned char smd_ch_allocated[64];
485static struct work_struct probe_work;
486
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487static void finalize_channel_close_fn(struct work_struct *work);
488static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
489static struct workqueue_struct *channel_close_wq;
490
491static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
492
493/* on smp systems, the probe might get called from multiple cores,
494 hence use a lock */
495static DEFINE_MUTEX(smd_probe_lock);
496
497static void smd_channel_probe_worker(struct work_struct *work)
498{
499 struct smd_alloc_elm *shared;
500 unsigned n;
501 uint32_t type;
502
503 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
504
505 if (!shared) {
506 pr_err("%s: allocation table not initialized\n", __func__);
507 return;
508 }
509
510 mutex_lock(&smd_probe_lock);
511 for (n = 0; n < 64; n++) {
512 if (smd_ch_allocated[n])
513 continue;
514
515 /* channel should be allocated only if APPS
516 processor is involved */
517 type = SMD_CHANNEL_TYPE(shared[n].type);
518 if ((type != SMD_APPS_MODEM) && (type != SMD_APPS_QDSP) &&
519 (type != SMD_APPS_DSPS) && (type != SMD_APPS_WCNSS))
520 continue;
521 if (!shared[n].ref_count)
522 continue;
523 if (!shared[n].name[0])
524 continue;
525
526 if (!smd_alloc_channel(&shared[n]))
527 smd_ch_allocated[n] = 1;
528 else
529 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
530 }
531 mutex_unlock(&smd_probe_lock);
532}
533
534/**
535 * Lookup processor ID and determine if it belongs to the proved edge
536 * type.
537 *
538 * @shared2: Pointer to v2 shared channel structure
539 * @type: Edge type
540 * @pid: Processor ID of processor on edge
541 * @local_ch: Channel that belongs to processor @pid
542 * @remote_ch: Other side of edge contained @pid
543 *
544 * Returns 0 for not on edge, 1 for found on edge
545 */
546static int pid_is_on_edge(struct smd_shared_v2 *shared2,
547 uint32_t type, uint32_t pid,
548 struct smd_half_channel **local_ch,
549 struct smd_half_channel **remote_ch
550 )
551{
552 int ret = 0;
553 struct edge_to_pid *edge;
554
555 *local_ch = 0;
556 *remote_ch = 0;
557
558 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
559 return 0;
560
561 edge = &edge_to_pids[type];
562 if (edge->local_pid != edge->remote_pid) {
563 if (pid == edge->local_pid) {
564 *local_ch = &shared2->ch0;
565 *remote_ch = &shared2->ch1;
566 ret = 1;
567 } else if (pid == edge->remote_pid) {
568 *local_ch = &shared2->ch1;
569 *remote_ch = &shared2->ch0;
570 ret = 1;
571 }
572 }
573
574 return ret;
575}
576
577
578static void smd_channel_reset_state(struct smd_alloc_elm *shared,
579 unsigned new_state, unsigned pid)
580{
581 unsigned n;
582 struct smd_shared_v2 *shared2;
583 uint32_t type;
584 struct smd_half_channel *local_ch;
585 struct smd_half_channel *remote_ch;
586
587 for (n = 0; n < SMD_CHANNELS; n++) {
588 if (!shared[n].ref_count)
589 continue;
590 if (!shared[n].name[0])
591 continue;
592
593 type = SMD_CHANNEL_TYPE(shared[n].type);
594 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n, sizeof(*shared2));
595 if (!shared2)
596 continue;
597
Eric Holmberg7dfd1972011-09-09 16:07:57 -0600598 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch) ||
599 (pid == SMSM_MODEM &&
600 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
601 &local_ch, &remote_ch))) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700602
603 /* force remote state for processor being restarted */
604 if (local_ch->state != SMD_SS_CLOSED) {
605 local_ch->state = new_state;
606 local_ch->fDSR = 0;
607 local_ch->fCTS = 0;
608 local_ch->fCD = 0;
609 local_ch->fSTATE = 1;
610 }
611 }
612 }
613}
614
615
616void smd_channel_reset(uint32_t restart_pid)
617{
618 struct smd_alloc_elm *shared;
619 unsigned n;
620 uint32_t *smem_lock;
621 unsigned long flags;
622
623 SMD_DBG("%s: starting reset\n", __func__);
624 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
625 if (!shared) {
626 pr_err("%s: allocation table not initialized\n", __func__);
627 return;
628 }
629
630 smem_lock = smem_alloc(SMEM_SPINLOCK_ARRAY, 8 * sizeof(uint32_t));
631 if (smem_lock) {
632 SMD_DBG("%s: releasing locks\n", __func__);
633 for (n = 0; n < 8; n++) {
634 uint32_t pid = readl_relaxed(smem_lock);
635 if (pid == (restart_pid + 1))
636 writel_relaxed(0, smem_lock);
637 smem_lock++;
638 }
639 }
640
641 /* reset SMSM entry */
642 if (smsm_info.state) {
643 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
644
645 /* clear apps SMSM to restart SMSM init handshake */
646 if (restart_pid == SMSM_MODEM)
647 writel_relaxed(0, SMSM_STATE_ADDR(SMSM_APPS));
648
649 /* notify SMSM processors */
650 smsm_irq_handler(0, 0);
651 MSM_TRIG_A2M_SMSM_INT;
652 MSM_TRIG_A2Q6_SMSM_INT;
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600653 MSM_TRIG_A2DSPS_SMSM_INT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700654 }
655
656 /* change all remote states to CLOSING */
657 mutex_lock(&smd_probe_lock);
658 spin_lock_irqsave(&smd_lock, flags);
659 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
660 spin_unlock_irqrestore(&smd_lock, flags);
661 mutex_unlock(&smd_probe_lock);
662
663 /* notify SMD processors */
664 mb();
665 smd_fake_irq_handler(0);
666 notify_modem_smd();
667 notify_dsp_smd();
668 notify_dsps_smd();
669 notify_wcnss_smd();
670
671 /* change all remote states to CLOSED */
672 mutex_lock(&smd_probe_lock);
673 spin_lock_irqsave(&smd_lock, flags);
674 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
675 spin_unlock_irqrestore(&smd_lock, flags);
676 mutex_unlock(&smd_probe_lock);
677
678 /* notify SMD processors */
679 mb();
680 smd_fake_irq_handler(0);
681 notify_modem_smd();
682 notify_dsp_smd();
683 notify_dsps_smd();
684 notify_wcnss_smd();
685
686 SMD_DBG("%s: finished reset\n", __func__);
687}
688
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700689/* how many bytes are available for reading */
690static int smd_stream_read_avail(struct smd_channel *ch)
691{
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700692 return (ch->recv->head - ch->recv->tail) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700693}
694
695/* how many bytes we are free to write */
696static int smd_stream_write_avail(struct smd_channel *ch)
697{
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700698 return ch->fifo_mask -
699 ((ch->send->head - ch->send->tail) & ch->fifo_mask);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700700}
701
702static int smd_packet_read_avail(struct smd_channel *ch)
703{
704 if (ch->current_packet) {
705 int n = smd_stream_read_avail(ch);
706 if (n > ch->current_packet)
707 n = ch->current_packet;
708 return n;
709 } else {
710 return 0;
711 }
712}
713
714static int smd_packet_write_avail(struct smd_channel *ch)
715{
716 int n = smd_stream_write_avail(ch);
717 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
718}
719
720static int ch_is_open(struct smd_channel *ch)
721{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700722 return (ch->recv->state == SMD_SS_OPENED ||
723 ch->recv->state == SMD_SS_FLUSHING)
724 && (ch->send->state == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700725}
726
727/* provide a pointer and length to readable data in the fifo */
728static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
729{
730 unsigned head = ch->recv->head;
731 unsigned tail = ch->recv->tail;
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700732 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700733
734 if (tail <= head)
735 return head - tail;
736 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700737 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700738}
739
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700740static int read_intr_blocked(struct smd_channel *ch)
741{
742 return ch->recv->fBLOCKREADINTR;
743}
744
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700745/* advance the fifo read pointer after data from ch_read_buffer is consumed */
746static void ch_read_done(struct smd_channel *ch, unsigned count)
747{
748 BUG_ON(count > smd_stream_read_avail(ch));
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700749 ch->recv->tail = (ch->recv->tail + count) & ch->fifo_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700750 wmb();
Haley Teng7632fba2009-10-12 10:38:10 -0700751 ch->send->fTAIL = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700752}
753
754/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -0700755 * by smd_*_read() and update_packet_state()
756 * will read-and-discard if the _data pointer is null
757 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700758static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700759{
760 void *ptr;
761 unsigned n;
762 unsigned char *data = _data;
763 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700764 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700765
766 while (len > 0) {
767 n = ch_read_buffer(ch, &ptr);
768 if (n == 0)
769 break;
770
771 if (n > len)
772 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700773 if (_data) {
774 if (user_buf) {
775 r = copy_to_user(data, ptr, n);
776 if (r > 0) {
777 pr_err("%s: "
778 "copy_to_user could not copy "
779 "%i bytes.\n",
780 __func__,
781 r);
782 }
783 } else
784 memcpy(data, ptr, n);
785 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700786
787 data += n;
788 len -= n;
789 ch_read_done(ch, n);
790 }
791
792 return orig_len - len;
793}
794
795static void update_stream_state(struct smd_channel *ch)
796{
797 /* streams have no special state requiring updating */
798}
799
800static void update_packet_state(struct smd_channel *ch)
801{
802 unsigned hdr[5];
803 int r;
804
805 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700806 while (ch->current_packet == 0) {
807 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700808
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700809 /* don't bother unless we can get the full header */
810 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
811 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700812
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700813 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
814 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700815
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700816 ch->current_packet = hdr[0];
817 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700818}
819
820/* provide a pointer and length to next free space in the fifo */
821static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
822{
823 unsigned head = ch->send->head;
824 unsigned tail = ch->send->tail;
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700825 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700826
827 if (head < tail) {
828 return tail - head - 1;
829 } else {
830 if (tail == 0)
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700831 return ch->fifo_size - head - 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700832 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700833 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700834 }
835}
836
837/* advace the fifo write pointer after freespace
838 * from ch_write_buffer is filled
839 */
840static void ch_write_done(struct smd_channel *ch, unsigned count)
841{
842 BUG_ON(count > smd_stream_write_avail(ch));
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700843 ch->send->head = (ch->send->head + count) & ch->fifo_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700844 wmb();
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700845 ch->send->fHEAD = 1;
846}
847
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700848static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700849{
850 if (n == SMD_SS_OPENED) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700851 ch->send->fDSR = 1;
852 ch->send->fCTS = 1;
853 ch->send->fCD = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700854 } else {
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700855 ch->send->fDSR = 0;
856 ch->send->fCTS = 0;
857 ch->send->fCD = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700858 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700859 ch->send->state = n;
860 ch->send->fSTATE = 1;
861 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700862}
863
864static void do_smd_probe(void)
865{
866 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
867 if (shared->heap_info.free_offset != last_heap_free) {
868 last_heap_free = shared->heap_info.free_offset;
869 schedule_work(&probe_work);
870 }
871}
872
873static void smd_state_change(struct smd_channel *ch,
874 unsigned last, unsigned next)
875{
876 ch->last_state = next;
877
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700878 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700879
880 switch (next) {
881 case SMD_SS_OPENING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700882 if (ch->send->state == SMD_SS_CLOSING ||
883 ch->send->state == SMD_SS_CLOSED) {
884 ch->recv->tail = 0;
885 ch->send->head = 0;
886 ch->send->fBLOCKREADINTR = 0;
887 ch_set_state(ch, SMD_SS_OPENING);
888 }
889 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700890 case SMD_SS_OPENED:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700891 if (ch->send->state == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700892 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700893 ch->notify(ch->priv, SMD_EVENT_OPEN);
894 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700895 break;
896 case SMD_SS_FLUSHING:
897 case SMD_SS_RESET:
898 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899 break;
900 case SMD_SS_CLOSED:
901 if (ch->send->state == SMD_SS_OPENED) {
902 ch_set_state(ch, SMD_SS_CLOSING);
903 ch->current_packet = 0;
904 ch->notify(ch->priv, SMD_EVENT_CLOSE);
905 }
906 break;
907 case SMD_SS_CLOSING:
908 if (ch->send->state == SMD_SS_CLOSED) {
909 list_move(&ch->ch_list,
910 &smd_ch_to_close_list);
911 queue_work(channel_close_wq,
912 &finalize_channel_close_work);
913 }
914 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700915 }
916}
917
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700918static void handle_smd_irq_closing_list(void)
919{
920 unsigned long flags;
921 struct smd_channel *ch;
922 struct smd_channel *index;
923 unsigned tmp;
924
925 spin_lock_irqsave(&smd_lock, flags);
926 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
927 if (ch->recv->fSTATE)
928 ch->recv->fSTATE = 0;
929 tmp = ch->recv->state;
930 if (tmp != ch->last_state)
931 smd_state_change(ch, ch->last_state, tmp);
932 }
933 spin_unlock_irqrestore(&smd_lock, flags);
934}
935
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700936static void handle_smd_irq(struct list_head *list, void (*notify)(void))
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700937{
938 unsigned long flags;
939 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700940 unsigned ch_flags;
941 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700942 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700943
944 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700945 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700946 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700947 ch_flags = 0;
948 if (ch_is_open(ch)) {
949 if (ch->recv->fHEAD) {
950 ch->recv->fHEAD = 0;
951 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700952 }
953 if (ch->recv->fTAIL) {
954 ch->recv->fTAIL = 0;
955 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700956 }
957 if (ch->recv->fSTATE) {
958 ch->recv->fSTATE = 0;
959 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700960 }
961 }
962 tmp = ch->recv->state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700963 if (tmp != ch->last_state) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700964 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700965 state_change = 1;
966 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700967 if (ch_flags) {
968 ch->update_state(ch);
969 ch->notify(ch->priv, SMD_EVENT_DATA);
970 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700971 if (ch_flags & 0x4 && !state_change)
972 ch->notify(ch->priv, SMD_EVENT_STATUS);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700973 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700974 spin_unlock_irqrestore(&smd_lock, flags);
975 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700976}
977
Brian Swetland37521a32009-07-01 18:30:47 -0700978static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700979{
Brian Swetland37521a32009-07-01 18:30:47 -0700980 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700981 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -0700982 return IRQ_HANDLED;
983}
984
Daniel Walkerb13525c2010-03-18 10:10:30 -0700985#if defined(CONFIG_QDSP6)
Brian Swetland37521a32009-07-01 18:30:47 -0700986static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
987{
988 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700989 handle_smd_irq_closing_list();
990 return IRQ_HANDLED;
991}
992#endif
993
994#if defined(CONFIG_DSPS)
995static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
996{
997 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
998 handle_smd_irq_closing_list();
999 return IRQ_HANDLED;
1000}
1001#endif
1002
1003#if defined(CONFIG_WCNSS)
1004static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1005{
1006 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1007 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001008 return IRQ_HANDLED;
1009}
Daniel Walkerb13525c2010-03-18 10:10:30 -07001010#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001011
1012static void smd_fake_irq_handler(unsigned long arg)
1013{
Brian Swetland37521a32009-07-01 18:30:47 -07001014 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1015 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001016 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1017 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1018 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001019}
1020
1021static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1022
Brian Swetland37521a32009-07-01 18:30:47 -07001023static inline int smd_need_int(struct smd_channel *ch)
1024{
1025 if (ch_is_open(ch)) {
1026 if (ch->recv->fHEAD || ch->recv->fTAIL || ch->recv->fSTATE)
1027 return 1;
1028 if (ch->recv->state != ch->last_state)
1029 return 1;
1030 }
1031 return 0;
1032}
1033
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001034void smd_sleep_exit(void)
1035{
1036 unsigned long flags;
1037 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001038 int need_int = 0;
1039
1040 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001041 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1042 if (smd_need_int(ch)) {
1043 need_int = 1;
1044 break;
1045 }
1046 }
1047 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1048 if (smd_need_int(ch)) {
1049 need_int = 1;
1050 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001051 }
1052 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001053 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1054 if (smd_need_int(ch)) {
1055 need_int = 1;
1056 break;
1057 }
1058 }
1059 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1060 if (smd_need_int(ch)) {
1061 need_int = 1;
1062 break;
1063 }
1064 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001065 spin_unlock_irqrestore(&smd_lock, flags);
1066 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001067
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001068 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001069 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001070 tasklet_schedule(&smd_fake_irq_tasklet);
1071 }
1072}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001073EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001074
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001075static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001076{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001077 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1078 return 0;
1079 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001080 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001081
1082 /* for cases where xfer type is 0 */
1083 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001084 return 0;
1085
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001086 /* for cases where xfer type is 0 */
1087 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1088 return 0;
1089
1090 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001091 return 1;
1092 else
1093 return 0;
1094}
1095
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001096static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1097 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001098{
1099 void *ptr;
1100 const unsigned char *buf = _data;
1101 unsigned xfer;
1102 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001103 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001104
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001105 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001106 if (len < 0)
1107 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001108 else if (len == 0)
1109 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001110
1111 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
1112 if (!ch_is_open(ch))
1113 break;
1114 if (xfer > len)
1115 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001116 if (user_buf) {
1117 r = copy_from_user(ptr, buf, xfer);
1118 if (r > 0) {
1119 pr_err("%s: "
1120 "copy_from_user could not copy %i "
1121 "bytes.\n",
1122 __func__,
1123 r);
1124 }
1125 } else
1126 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001127 ch_write_done(ch, xfer);
1128 len -= xfer;
1129 buf += xfer;
1130 if (len == 0)
1131 break;
1132 }
1133
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001134 if (orig_len - len)
1135 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001136
1137 return orig_len - len;
1138}
1139
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001140static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1141 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001142{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001143 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001144 unsigned hdr[5];
1145
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001146 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001147 if (len < 0)
1148 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001149 else if (len == 0)
1150 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001151
1152 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1153 return -ENOMEM;
1154
1155 hdr[0] = len;
1156 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1157
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001158
1159 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1160 if (ret < 0 || ret != sizeof(hdr)) {
1161 SMD_DBG("%s failed to write pkt header: "
1162 "%d returned\n", __func__, ret);
1163 return -1;
1164 }
1165
1166
1167 ret = smd_stream_write(ch, _data, len, user_buf);
1168 if (ret < 0 || ret != len) {
1169 SMD_DBG("%s failed to write pkt data: "
1170 "%d returned\n", __func__, ret);
1171 return ret;
1172 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001173
1174 return len;
1175}
1176
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001177static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001178{
1179 int r;
1180
1181 if (len < 0)
1182 return -EINVAL;
1183
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001184 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001185 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001186 if (!read_intr_blocked(ch))
1187 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001188
1189 return r;
1190}
1191
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001192static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001193{
1194 unsigned long flags;
1195 int r;
1196
1197 if (len < 0)
1198 return -EINVAL;
1199
1200 if (len > ch->current_packet)
1201 len = ch->current_packet;
1202
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001203 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001204 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001205 if (!read_intr_blocked(ch))
1206 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001207
1208 spin_lock_irqsave(&smd_lock, flags);
1209 ch->current_packet -= r;
1210 update_packet_state(ch);
1211 spin_unlock_irqrestore(&smd_lock, flags);
1212
1213 return r;
1214}
1215
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001216static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1217 int user_buf)
1218{
1219 int r;
1220
1221 if (len < 0)
1222 return -EINVAL;
1223
1224 if (len > ch->current_packet)
1225 len = ch->current_packet;
1226
1227 r = ch_read(ch, data, len, user_buf);
1228 if (r > 0)
1229 if (!read_intr_blocked(ch))
1230 ch->notify_other_cpu();
1231
1232 ch->current_packet -= r;
1233 update_packet_state(ch);
1234
1235 return r;
1236}
1237
1238static int smd_alloc_v2(struct smd_channel *ch)
1239{
1240 struct smd_shared_v2 *shared2;
1241 void *buffer;
1242 unsigned buffer_sz;
1243
1244 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n, sizeof(*shared2));
1245 if (!shared2) {
1246 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1247 return -1;
1248 }
1249 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1250 if (!buffer) {
1251 SMD_INFO("smem_get_entry failed \n");
1252 return -1;
1253 }
1254
1255 /* buffer must be a power-of-two size */
1256 if (buffer_sz & (buffer_sz - 1))
1257 return -1;
1258
1259 buffer_sz /= 2;
1260 ch->send = &shared2->ch0;
1261 ch->recv = &shared2->ch1;
1262 ch->send_data = buffer;
1263 ch->recv_data = buffer + buffer_sz;
1264 ch->fifo_size = buffer_sz;
1265 return 0;
1266}
1267
1268static int smd_alloc_v1(struct smd_channel *ch)
1269{
1270 struct smd_shared_v1 *shared1;
1271 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1272 if (!shared1) {
1273 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
1274 return -1;
1275 }
1276 ch->send = &shared1->ch0;
1277 ch->recv = &shared1->ch1;
1278 ch->send_data = shared1->data0;
1279 ch->recv_data = shared1->data1;
1280 ch->fifo_size = SMD_BUF_SIZE;
1281 return 0;
1282}
1283
1284static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001285{
1286 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001287
1288 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1289 if (ch == 0) {
1290 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001291 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001292 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001293 ch->n = alloc_elm->cid;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001294
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001295 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001296 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001297 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001298 }
1299
1300 ch->fifo_mask = ch->fifo_size - 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001301 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001302
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001303 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001304 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001305 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001306 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001307 else if (ch->type == SMD_APPS_DSPS)
1308 ch->notify_other_cpu = notify_dsps_smd;
1309 else
1310 ch->notify_other_cpu = notify_wcnss_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001311
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001312 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001313 ch->read = smd_packet_read;
1314 ch->write = smd_packet_write;
1315 ch->read_avail = smd_packet_read_avail;
1316 ch->write_avail = smd_packet_write_avail;
1317 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001318 ch->read_from_cb = smd_packet_read_from_cb;
1319 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001320 } else {
1321 ch->read = smd_stream_read;
1322 ch->write = smd_stream_write;
1323 ch->read_avail = smd_stream_read_avail;
1324 ch->write_avail = smd_stream_write_avail;
1325 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001326 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001327 }
1328
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001329 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1330 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001331
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001332 ch->pdev.name = ch->name;
1333 ch->pdev.id = ch->type;
1334
1335 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1336 ch->name, ch->n);
1337
1338 mutex_lock(&smd_creation_mutex);
1339 list_add(&ch->ch_list, &smd_ch_closed_list);
1340 mutex_unlock(&smd_creation_mutex);
1341
1342 platform_device_register(&ch->pdev);
1343 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1344 /* create a platform driver to be used by smd_tty driver
1345 * so that it can access the loopback port
1346 */
1347 loopback_tty_pdev.id = ch->type;
1348 platform_device_register(&loopback_tty_pdev);
1349 }
1350 return 0;
1351}
1352
1353static inline void notify_loopback_smd(void)
1354{
1355 unsigned long flags;
1356 struct smd_channel *ch;
1357
1358 spin_lock_irqsave(&smd_lock, flags);
1359 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1360 ch->notify(ch->priv, SMD_EVENT_DATA);
1361 }
1362 spin_unlock_irqrestore(&smd_lock, flags);
1363}
1364
1365static int smd_alloc_loopback_channel(void)
1366{
1367 static struct smd_half_channel smd_loopback_ctl;
1368 static char smd_loopback_data[SMD_BUF_SIZE];
1369 struct smd_channel *ch;
1370
1371 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1372 if (ch == 0) {
1373 pr_err("%s: out of memory\n", __func__);
1374 return -1;
1375 }
1376 ch->n = SMD_LOOPBACK_CID;
1377
1378 ch->send = &smd_loopback_ctl;
1379 ch->recv = &smd_loopback_ctl;
1380 ch->send_data = smd_loopback_data;
1381 ch->recv_data = smd_loopback_data;
1382 ch->fifo_size = SMD_BUF_SIZE;
1383
1384 ch->fifo_mask = ch->fifo_size - 1;
1385 ch->type = SMD_LOOPBACK_TYPE;
1386 ch->notify_other_cpu = notify_loopback_smd;
1387
1388 ch->read = smd_stream_read;
1389 ch->write = smd_stream_write;
1390 ch->read_avail = smd_stream_read_avail;
1391 ch->write_avail = smd_stream_write_avail;
1392 ch->update_state = update_stream_state;
1393 ch->read_from_cb = smd_stream_read;
1394
1395 memset(ch->name, 0, 20);
1396 memcpy(ch->name, "local_loopback", 14);
1397
1398 ch->pdev.name = ch->name;
1399 ch->pdev.id = ch->type;
1400
1401 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001402
1403 mutex_lock(&smd_creation_mutex);
1404 list_add(&ch->ch_list, &smd_ch_closed_list);
1405 mutex_unlock(&smd_creation_mutex);
1406
1407 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001408 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001409}
1410
1411static void do_nothing_notify(void *priv, unsigned flags)
1412{
1413}
1414
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001415static void finalize_channel_close_fn(struct work_struct *work)
1416{
1417 unsigned long flags;
1418 struct smd_channel *ch;
1419 struct smd_channel *index;
1420
1421 spin_lock_irqsave(&smd_lock, flags);
1422 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1423 list_del(&ch->ch_list);
1424 spin_unlock_irqrestore(&smd_lock, flags);
1425 mutex_lock(&smd_creation_mutex);
1426 list_add(&ch->ch_list, &smd_ch_closed_list);
1427 mutex_unlock(&smd_creation_mutex);
1428 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1429 ch->notify = do_nothing_notify;
1430 spin_lock_irqsave(&smd_lock, flags);
1431 }
1432 spin_unlock_irqrestore(&smd_lock, flags);
1433}
1434
1435struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001436{
1437 struct smd_channel *ch;
1438
1439 mutex_lock(&smd_creation_mutex);
1440 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001441 if (!strcmp(name, ch->name) &&
1442 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001443 list_del(&ch->ch_list);
1444 mutex_unlock(&smd_creation_mutex);
1445 return ch;
1446 }
1447 }
1448 mutex_unlock(&smd_creation_mutex);
1449
1450 return NULL;
1451}
1452
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001453int smd_named_open_on_edge(const char *name, uint32_t edge,
1454 smd_channel_t **_ch,
1455 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001456{
1457 struct smd_channel *ch;
1458 unsigned long flags;
1459
1460 if (smd_initialized == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001461 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001462 return -ENODEV;
1463 }
1464
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001465 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1466
1467 ch = smd_get_channel(name, edge);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001468 if (!ch)
1469 return -ENODEV;
1470
1471 if (notify == 0)
1472 notify = do_nothing_notify;
1473
1474 ch->notify = notify;
1475 ch->current_packet = 0;
1476 ch->last_state = SMD_SS_CLOSED;
1477 ch->priv = priv;
1478
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001479 if (edge == SMD_LOOPBACK_TYPE) {
1480 ch->last_state = SMD_SS_OPENED;
1481 ch->send->state = SMD_SS_OPENED;
1482 ch->send->fDSR = 1;
1483 ch->send->fCTS = 1;
1484 ch->send->fCD = 1;
1485 }
1486
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001487 *_ch = ch;
1488
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001489 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1490
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001491 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001492 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001493 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001494 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001495 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001496 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1497 list_add(&ch->ch_list, &smd_ch_list_dsps);
1498 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1499 list_add(&ch->ch_list, &smd_ch_list_wcnss);
1500 else
1501 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001502
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001503 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1504
1505 if (edge != SMD_LOOPBACK_TYPE)
1506 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1507
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001508 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001509
1510 return 0;
1511}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001512EXPORT_SYMBOL(smd_named_open_on_edge);
1513
1514
1515int smd_open(const char *name, smd_channel_t **_ch,
1516 void *priv, void (*notify)(void *, unsigned))
1517{
1518 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
1519 notify);
1520}
1521EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001522
1523int smd_close(smd_channel_t *ch)
1524{
1525 unsigned long flags;
1526
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001527 if (ch == 0)
1528 return -1;
1529
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001530 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001531
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001532 spin_lock_irqsave(&smd_lock, flags);
1533 list_del(&ch->ch_list);
1534 if (ch->n == SMD_LOOPBACK_CID) {
1535 ch->send->fDSR = 0;
1536 ch->send->fCTS = 0;
1537 ch->send->fCD = 0;
1538 ch->send->state = SMD_SS_CLOSED;
1539 } else
1540 ch_set_state(ch, SMD_SS_CLOSED);
1541
1542 if (ch->recv->state == SMD_SS_OPENED) {
1543 list_add(&ch->ch_list, &smd_ch_closing_list);
1544 spin_unlock_irqrestore(&smd_lock, flags);
1545 } else {
1546 spin_unlock_irqrestore(&smd_lock, flags);
1547 ch->notify = do_nothing_notify;
1548 mutex_lock(&smd_creation_mutex);
1549 list_add(&ch->ch_list, &smd_ch_closed_list);
1550 mutex_unlock(&smd_creation_mutex);
1551 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001552
1553 return 0;
1554}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001555EXPORT_SYMBOL(smd_close);
1556
1557int smd_write_start(smd_channel_t *ch, int len)
1558{
1559 int ret;
1560 unsigned hdr[5];
1561
1562 if (!ch) {
1563 pr_err("%s: Invalid channel specified\n", __func__);
1564 return -ENODEV;
1565 }
1566 if (!ch->is_pkt_ch) {
1567 pr_err("%s: non-packet channel specified\n", __func__);
1568 return -EACCES;
1569 }
1570 if (len < 1) {
1571 pr_err("%s: invalid length: %d\n", __func__, len);
1572 return -EINVAL;
1573 }
1574
1575 if (ch->pending_pkt_sz) {
1576 pr_err("%s: packet of size: %d in progress\n", __func__,
1577 ch->pending_pkt_sz);
1578 return -EBUSY;
1579 }
1580 ch->pending_pkt_sz = len;
1581
1582 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
1583 ch->pending_pkt_sz = 0;
1584 SMD_DBG("%s: no space to write packet header\n", __func__);
1585 return -EAGAIN;
1586 }
1587
1588 hdr[0] = len;
1589 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1590
1591
1592 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1593 if (ret < 0 || ret != sizeof(hdr)) {
1594 ch->pending_pkt_sz = 0;
1595 pr_err("%s: packet header failed to write\n", __func__);
1596 return -EPERM;
1597 }
1598 return 0;
1599}
1600EXPORT_SYMBOL(smd_write_start);
1601
1602int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
1603{
1604 int bytes_written;
1605
1606 if (!ch) {
1607 pr_err("%s: Invalid channel specified\n", __func__);
1608 return -ENODEV;
1609 }
1610 if (len < 1) {
1611 pr_err("%s: invalid length: %d\n", __func__, len);
1612 return -EINVAL;
1613 }
1614
1615 if (!ch->pending_pkt_sz) {
1616 pr_err("%s: no transaction in progress\n", __func__);
1617 return -ENOEXEC;
1618 }
1619 if (ch->pending_pkt_sz - len < 0) {
1620 pr_err("%s: segment of size: %d will make packet go over "
1621 "length\n", __func__, len);
1622 return -EINVAL;
1623 }
1624
1625 bytes_written = smd_stream_write(ch, data, len, user_buf);
1626
1627 ch->pending_pkt_sz -= bytes_written;
1628
1629 return bytes_written;
1630}
1631EXPORT_SYMBOL(smd_write_segment);
1632
1633int smd_write_end(smd_channel_t *ch)
1634{
1635
1636 if (!ch) {
1637 pr_err("%s: Invalid channel specified\n", __func__);
1638 return -ENODEV;
1639 }
1640 if (ch->pending_pkt_sz) {
1641 pr_err("%s: current packet not completely written\n", __func__);
1642 return -E2BIG;
1643 }
1644
1645 return 0;
1646}
1647EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001648
1649int smd_read(smd_channel_t *ch, void *data, int len)
1650{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001651 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001652}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001653EXPORT_SYMBOL(smd_read);
1654
1655int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
1656{
1657 return ch->read(ch, data, len, 1);
1658}
1659EXPORT_SYMBOL(smd_read_user_buffer);
1660
1661int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
1662{
1663 return ch->read_from_cb(ch, data, len, 0);
1664}
1665EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001666
1667int smd_write(smd_channel_t *ch, const void *data, int len)
1668{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001669 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001670}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001671EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001672
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001673int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08001674{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001675 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08001676}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001677EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08001678
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001679int smd_read_avail(smd_channel_t *ch)
1680{
1681 return ch->read_avail(ch);
1682}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001683EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001684
1685int smd_write_avail(smd_channel_t *ch)
1686{
1687 return ch->write_avail(ch);
1688}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001689EXPORT_SYMBOL(smd_write_avail);
1690
1691void smd_enable_read_intr(smd_channel_t *ch)
1692{
1693 if (ch)
1694 ch->send->fBLOCKREADINTR = 0;
1695}
1696EXPORT_SYMBOL(smd_enable_read_intr);
1697
1698void smd_disable_read_intr(smd_channel_t *ch)
1699{
1700 if (ch)
1701 ch->send->fBLOCKREADINTR = 1;
1702}
1703EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001704
1705int smd_wait_until_readable(smd_channel_t *ch, int bytes)
1706{
1707 return -1;
1708}
1709
1710int smd_wait_until_writable(smd_channel_t *ch, int bytes)
1711{
1712 return -1;
1713}
1714
1715int smd_cur_packet_size(smd_channel_t *ch)
1716{
1717 return ch->current_packet;
1718}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001719EXPORT_SYMBOL(smd_cur_packet_size);
1720
1721int smd_tiocmget(smd_channel_t *ch)
1722{
1723 return (ch->recv->fDSR ? TIOCM_DSR : 0) |
1724 (ch->recv->fCTS ? TIOCM_CTS : 0) |
1725 (ch->recv->fCD ? TIOCM_CD : 0) |
1726 (ch->recv->fRI ? TIOCM_RI : 0) |
1727 (ch->send->fCTS ? TIOCM_RTS : 0) |
1728 (ch->send->fDSR ? TIOCM_DTR : 0);
1729}
1730EXPORT_SYMBOL(smd_tiocmget);
1731
Vamsi Krishnacb12a102011-08-17 15:18:26 -07001732/* this api will be called while holding smd_lock */
1733int
1734smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001735{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001736 if (set & TIOCM_DTR)
1737 ch->send->fDSR = 1;
1738
1739 if (set & TIOCM_RTS)
1740 ch->send->fCTS = 1;
1741
1742 if (clear & TIOCM_DTR)
1743 ch->send->fDSR = 0;
1744
1745 if (clear & TIOCM_RTS)
1746 ch->send->fCTS = 0;
1747
1748 ch->send->fSTATE = 1;
1749 barrier();
1750 ch->notify_other_cpu();
Vamsi Krishnacb12a102011-08-17 15:18:26 -07001751
1752 return 0;
1753}
1754EXPORT_SYMBOL(smd_tiocmset_from_cb);
1755
1756int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
1757{
1758 unsigned long flags;
1759
1760 spin_lock_irqsave(&smd_lock, flags);
1761 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001762 spin_unlock_irqrestore(&smd_lock, flags);
1763
1764 return 0;
1765}
1766EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001767
1768
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001769/* -------------------------------------------------------------------------- */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001770
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001771/* smem_alloc returns the pointer to smem item if it is already allocated.
1772 * Otherwise, it returns NULL.
1773 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001774void *smem_alloc(unsigned id, unsigned size)
1775{
1776 return smem_find(id, size);
1777}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001778EXPORT_SYMBOL(smem_alloc);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001779
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001780#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
1781static remote_spinlock_t remote_spinlock;
1782
1783/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
1784 * it allocates it and then returns the pointer to it.
1785 */
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05301786void *smem_alloc2(unsigned id, unsigned size_in)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001787{
1788 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1789 struct smem_heap_entry *toc = shared->heap_toc;
1790 unsigned long flags;
1791 void *ret = NULL;
1792
1793 if (!shared->heap_info.initialized) {
1794 pr_err("%s: smem heap info not initialized\n", __func__);
1795 return NULL;
1796 }
1797
1798 if (id >= SMEM_NUM_ITEMS)
1799 return NULL;
1800
1801 size_in = ALIGN(size_in, 8);
1802 remote_spin_lock_irqsave(&remote_spinlock, flags);
1803 if (toc[id].allocated) {
1804 SMD_DBG("%s: %u already allocated\n", __func__, id);
1805 if (size_in != toc[id].size)
1806 pr_err("%s: wrong size %u (expected %u)\n",
1807 __func__, toc[id].size, size_in);
1808 else
1809 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
1810 } else if (id > SMEM_FIXED_ITEM_LAST) {
1811 SMD_DBG("%s: allocating %u\n", __func__, id);
1812 if (shared->heap_info.heap_remaining >= size_in) {
1813 toc[id].offset = shared->heap_info.free_offset;
1814 toc[id].size = size_in;
1815 wmb();
1816 toc[id].allocated = 1;
1817
1818 shared->heap_info.free_offset += size_in;
1819 shared->heap_info.heap_remaining -= size_in;
1820 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
1821 } else
1822 pr_err("%s: not enough memory %u (required %u)\n",
1823 __func__, shared->heap_info.heap_remaining,
1824 size_in);
1825 }
1826 wmb();
1827 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
1828 return ret;
1829}
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05301830EXPORT_SYMBOL(smem_alloc2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001831
1832void *smem_get_entry(unsigned id, unsigned *size)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001833{
1834 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1835 struct smem_heap_entry *toc = shared->heap_toc;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05301836 int use_spinlocks = spinlocks_initialized;
1837 void *ret = 0;
1838 unsigned long flags = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001839
1840 if (id >= SMEM_NUM_ITEMS)
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05301841 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001842
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05301843 if (use_spinlocks)
1844 remote_spin_lock_irqsave(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001845 /* toc is in device memory and cannot be speculatively accessed */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001846 if (toc[id].allocated) {
1847 *size = toc[id].size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001848 barrier();
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05301849 ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001850 } else {
1851 *size = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001852 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05301853 if (use_spinlocks)
1854 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001855
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05301856 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001857}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001858EXPORT_SYMBOL(smem_get_entry);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001859
1860void *smem_find(unsigned id, unsigned size_in)
1861{
1862 unsigned size;
1863 void *ptr;
1864
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001865 ptr = smem_get_entry(id, &size);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001866 if (!ptr)
1867 return 0;
1868
1869 size_in = ALIGN(size_in, 8);
1870 if (size_in != size) {
1871 pr_err("smem_find(%d, %d): wrong size %d\n",
1872 id, size_in, size);
1873 return 0;
1874 }
1875
1876 return ptr;
1877}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001878EXPORT_SYMBOL(smem_find);
1879
1880static int smsm_cb_init(void)
1881{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001882 struct smsm_state_info *state_info;
1883 int n;
1884 int ret = 0;
1885
1886 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
1887 GFP_KERNEL);
1888
1889 if (!smsm_states) {
1890 pr_err("%s: SMSM init failed\n", __func__);
1891 return -ENOMEM;
1892 }
1893
Eric Holmbergc8002902011-09-16 13:55:57 -06001894 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001895 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
1896 state_info = &smsm_states[n];
1897 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
1898 INIT_LIST_HEAD(&state_info->callbacks);
1899 }
Eric Holmbergc8002902011-09-16 13:55:57 -06001900 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001901
1902 return ret;
1903}
1904
1905static int smsm_init(void)
1906{
1907 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1908 int i;
1909 struct smsm_size_info_type *smsm_size_info;
1910
1911 i = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_ALLOC);
1912 if (i) {
1913 pr_err("%s: remote spinlock init failed %d\n", __func__, i);
1914 return i;
1915 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05301916 spinlocks_initialized = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001917
1918 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
1919 sizeof(struct smsm_size_info_type));
1920 if (smsm_size_info) {
1921 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
1922 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
1923 }
1924
1925 if (!smsm_info.state) {
1926 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
1927 SMSM_NUM_ENTRIES *
1928 sizeof(uint32_t));
1929
1930 if (smsm_info.state) {
1931 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
1932 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
1933 __raw_writel(0, \
1934 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
1935 }
1936 }
1937
1938 if (!smsm_info.intr_mask) {
1939 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
1940 SMSM_NUM_ENTRIES *
1941 SMSM_NUM_HOSTS *
1942 sizeof(uint32_t));
1943
1944 if (smsm_info.intr_mask)
1945 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
1946 __raw_writel(0xffffffff,
1947 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
1948 }
1949
1950 if (!smsm_info.intr_mux)
1951 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
1952 SMSM_NUM_INTR_MUX *
1953 sizeof(uint32_t));
1954
1955 i = smsm_cb_init();
1956 if (i)
1957 return i;
1958
1959 wmb();
1960 return 0;
1961}
1962
1963void smsm_reset_modem(unsigned mode)
1964{
1965 if (mode == SMSM_SYSTEM_DOWNLOAD) {
1966 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
1967 } else if (mode == SMSM_MODEM_WAIT) {
1968 mode = SMSM_RESET | SMSM_MODEM_WAIT;
1969 } else { /* reset_mode is SMSM_RESET or default */
1970 mode = SMSM_RESET;
1971 }
1972
1973 smsm_change_state(SMSM_APPS_STATE, mode, mode);
1974}
1975EXPORT_SYMBOL(smsm_reset_modem);
1976
1977void smsm_reset_modem_cont(void)
1978{
1979 unsigned long flags;
1980 uint32_t state;
1981
1982 if (!smsm_info.state)
1983 return;
1984
1985 spin_lock_irqsave(&smem_lock, flags);
1986 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
1987 & ~SMSM_MODEM_WAIT;
1988 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
1989 wmb();
1990 spin_unlock_irqrestore(&smem_lock, flags);
1991}
1992EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001993
1994static irqreturn_t smsm_irq_handler(int irq, void *data)
1995{
1996 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001997
1998#if !defined(CONFIG_ARCH_MSM8X60)
1999 uint32_t mux_val;
2000 static uint32_t prev_smem_q6_apps_smsm;
2001
2002 if (irq == INT_ADSP_A11_SMSM) {
2003 if (!smsm_info.intr_mux)
2004 return IRQ_HANDLED;
2005 mux_val = __raw_readl(SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2006 if (mux_val != prev_smem_q6_apps_smsm)
2007 prev_smem_q6_apps_smsm = mux_val;
2008 return IRQ_HANDLED;
2009 }
2010#else
2011 if (irq == INT_ADSP_A11_SMSM)
2012 return IRQ_HANDLED;
2013#endif
2014
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002015
2016 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002017 if (!smsm_info.state) {
2018 SMSM_INFO("<SM NO STATE>\n");
2019 } else {
2020 unsigned old_apps, apps;
2021 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002022
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002023 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002024
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002025 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2026 if (apps & SMSM_RESET) {
2027 /* If we get an interrupt and the apps SMSM_RESET
2028 bit is already set, the modem is acking the
2029 app's reset ack. */
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302030 if (!cpu_is_msm8960())
2031 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002032 /* Issue a fake irq to handle any
2033 * smd state changes during reset
2034 */
2035 smd_fake_irq_handler(0);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002036
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002037 /* queue modem restart notify chain */
2038 modem_queue_start_reset_notify();
2039
2040 } else if (modm & SMSM_RESET) {
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302041 if (!cpu_is_msm8960())
2042 apps |= SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002043
2044 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
2045 modem_queue_start_reset_notify();
2046
2047 } else if (modm & SMSM_INIT) {
2048 if (!(apps & SMSM_INIT)) {
2049 apps |= SMSM_INIT;
2050 modem_queue_smsm_init_notify();
2051 }
2052
2053 if (modm & SMSM_SMDINIT)
2054 apps |= SMSM_SMDINIT;
2055 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2056 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2057 apps |= SMSM_RUN;
2058 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2059 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2060 modem_queue_start_reset_notify();
2061 }
2062
2063 if (old_apps != apps) {
2064 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2065 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2066 do_smd_probe();
2067 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2068 }
2069
2070 schedule_work(&smsm_cb_work);
2071 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002072 spin_unlock_irqrestore(&smem_lock, flags);
2073 return IRQ_HANDLED;
2074}
2075
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002076int smsm_change_intr_mask(uint32_t smsm_entry,
2077 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002078{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002079 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002080 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002081
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002082 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2083 pr_err("smsm_change_state: Invalid entry %d\n",
2084 smsm_entry);
2085 return -EINVAL;
2086 }
2087
2088 if (!smsm_info.intr_mask) {
2089 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002090 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002091 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002092
2093 spin_lock_irqsave(&smem_lock, flags);
2094
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002095 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2096 new_mask = (old_mask & ~clear_mask) | set_mask;
2097 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002098
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002099 wmb();
2100 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002101
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002102 return 0;
2103}
2104EXPORT_SYMBOL(smsm_change_intr_mask);
2105
2106int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2107{
2108 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2109 pr_err("smsm_change_state: Invalid entry %d\n",
2110 smsm_entry);
2111 return -EINVAL;
2112 }
2113
2114 if (!smsm_info.intr_mask) {
2115 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2116 return -EIO;
2117 }
2118
2119 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2120 return 0;
2121}
2122EXPORT_SYMBOL(smsm_get_intr_mask);
2123
2124int smsm_change_state(uint32_t smsm_entry,
2125 uint32_t clear_mask, uint32_t set_mask)
2126{
2127 unsigned long flags;
2128 uint32_t old_state, new_state;
2129
2130 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2131 pr_err("smsm_change_state: Invalid entry %d",
2132 smsm_entry);
2133 return -EINVAL;
2134 }
2135
2136 if (!smsm_info.state) {
2137 pr_err("smsm_change_state <SM NO STATE>\n");
2138 return -EIO;
2139 }
2140 spin_lock_irqsave(&smem_lock, flags);
2141
2142 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2143 new_state = (old_state & ~clear_mask) | set_mask;
2144 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2145 SMSM_DBG("smsm_change_state %x\n", new_state);
2146 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002147
2148 spin_unlock_irqrestore(&smem_lock, flags);
2149
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002150 return 0;
2151}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002152EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002153
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002154uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002155{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002156 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002157
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002158 /* needs interface change to return error code */
2159 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2160 pr_err("smsm_change_state: Invalid entry %d",
2161 smsm_entry);
2162 return 0;
2163 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002164
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002165 if (!smsm_info.state) {
2166 pr_err("smsm_get_state <SM NO STATE>\n");
2167 } else {
2168 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2169 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002170
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002171 return rv;
2172}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002173EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002174
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002175/**
2176 * Performs SMSM callback client notifiction.
2177 */
2178void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002179{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002180 struct smsm_state_cb_info *cb_info;
2181 struct smsm_state_info *state_info;
2182 int n;
2183 uint32_t new_state;
2184 uint32_t state_changes;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002185
Eric Holmbergc8002902011-09-16 13:55:57 -06002186 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002187
2188 if (!smsm_states) {
2189 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06002190 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002191 return;
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002192 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002193
2194 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2195 state_info = &smsm_states[n];
2196 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2197
2198 if (new_state != state_info->last_value) {
2199 state_changes = state_info->last_value ^ new_state;
2200
2201 list_for_each_entry(cb_info,
2202 &state_info->callbacks, cb_list) {
2203
2204 if (cb_info->mask & state_changes)
2205 cb_info->notify(cb_info->data,
2206 state_info->last_value,
2207 new_state);
2208 }
2209 state_info->last_value = new_state;
2210 }
2211 }
2212
Eric Holmbergc8002902011-09-16 13:55:57 -06002213 mutex_unlock(&smsm_lock);
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002214}
2215
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002216
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002217/**
2218 * Registers callback for SMSM state notifications when the specified
2219 * bits change.
2220 *
2221 * @smsm_entry Processor entry to deregister
2222 * @mask Bits to deregister (if result is 0, callback is removed)
2223 * @notify Notification function to deregister
2224 * @data Opaque data passed in to callback
2225 *
2226 * @returns Status code
2227 * <0 error code
2228 * 0 inserted new entry
2229 * 1 updated mask of existing entry
2230 */
2231int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
2232 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002233{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002234 struct smsm_state_cb_info *cb_info;
2235 struct smsm_state_cb_info *cb_found = 0;
2236 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002237
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002238 if (smsm_entry >= SMSM_NUM_ENTRIES)
2239 return -EINVAL;
2240
Eric Holmbergc8002902011-09-16 13:55:57 -06002241 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002242
2243 if (!smsm_states) {
2244 /* smsm not yet initialized */
2245 ret = -ENODEV;
2246 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002247 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002248
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002249 list_for_each_entry(cb_info,
2250 &smsm_states[smsm_entry].callbacks, cb_list) {
2251 if ((cb_info->notify == notify) &&
2252 (cb_info->data == data)) {
2253 cb_info->mask |= mask;
2254 cb_found = cb_info;
2255 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002256 break;
2257 }
2258 }
2259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002260 if (!cb_found) {
2261 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
2262 GFP_ATOMIC);
2263 if (!cb_info) {
2264 ret = -ENOMEM;
2265 goto cleanup;
2266 }
2267
2268 cb_info->mask = mask;
2269 cb_info->notify = notify;
2270 cb_info->data = data;
2271 INIT_LIST_HEAD(&cb_info->cb_list);
2272 list_add_tail(&cb_info->cb_list,
2273 &smsm_states[smsm_entry].callbacks);
2274 }
2275
2276cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06002277 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002278 return ret;
2279}
2280EXPORT_SYMBOL(smsm_state_cb_register);
2281
2282
2283/**
2284 * Deregisters for SMSM state notifications for the specified bits.
2285 *
2286 * @smsm_entry Processor entry to deregister
2287 * @mask Bits to deregister (if result is 0, callback is removed)
2288 * @notify Notification function to deregister
2289 * @data Opaque data passed in to callback
2290 *
2291 * @returns Status code
2292 * <0 error code
2293 * 0 not found
2294 * 1 updated mask
2295 * 2 removed callback
2296 */
2297int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
2298 void (*notify)(void *, uint32_t, uint32_t), void *data)
2299{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002300 struct smsm_state_cb_info *cb_info;
2301 int ret = 0;
2302
2303 if (smsm_entry >= SMSM_NUM_ENTRIES)
2304 return -EINVAL;
2305
Eric Holmbergc8002902011-09-16 13:55:57 -06002306 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002307
2308 if (!smsm_states) {
2309 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06002310 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002311 return -ENODEV;
2312 }
2313
2314 list_for_each_entry(cb_info,
2315 &smsm_states[smsm_entry].callbacks, cb_list) {
2316 if ((cb_info->notify == notify) &&
2317 (cb_info->data == data)) {
2318 cb_info->mask &= ~mask;
2319 ret = 1;
2320 if (!cb_info->mask) {
2321 /* no mask bits set, remove callback */
2322 list_del(&cb_info->cb_list);
2323 kfree(cb_info);
2324 ret = 2;
2325 }
2326 break;
2327 }
2328 }
2329
Eric Holmbergc8002902011-09-16 13:55:57 -06002330 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002331 return ret;
2332}
2333EXPORT_SYMBOL(smsm_state_cb_deregister);
2334
2335
2336int smd_core_init(void)
2337{
2338 int r;
2339 unsigned long flags = IRQF_TRIGGER_RISING;
2340 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002341
Brian Swetland37521a32009-07-01 18:30:47 -07002342 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002343 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002344 if (r < 0)
2345 return r;
2346 r = enable_irq_wake(INT_A9_M2A_0);
2347 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002348 pr_err("smd_core_init: "
2349 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002350
2351 r = request_irq(INT_A9_M2A_5, smsm_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002352 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002353 if (r < 0) {
2354 free_irq(INT_A9_M2A_0, 0);
2355 return r;
2356 }
2357 r = enable_irq_wake(INT_A9_M2A_5);
2358 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002359 pr_err("smd_core_init: "
2360 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002361
Brian Swetland37521a32009-07-01 18:30:47 -07002362#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002363#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
2364 flags |= IRQF_SHARED;
2365#endif
Brian Swetland37521a32009-07-01 18:30:47 -07002366 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002367 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07002368 if (r < 0) {
2369 free_irq(INT_A9_M2A_0, 0);
2370 free_irq(INT_A9_M2A_5, 0);
2371 return r;
2372 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002373
2374 r = request_irq(INT_ADSP_A11_SMSM, smsm_irq_handler,
2375 flags, "smsm_dev", smsm_irq_handler);
2376 if (r < 0) {
2377 free_irq(INT_A9_M2A_0, 0);
2378 free_irq(INT_A9_M2A_5, 0);
2379 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
2380 return r;
2381 }
2382
2383 r = enable_irq_wake(INT_ADSP_A11);
2384 if (r < 0)
2385 pr_err("smd_core_init: "
2386 "enable_irq_wake failed for INT_ADSP_A11\n");
2387
2388#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
2389 r = enable_irq_wake(INT_ADSP_A11_SMSM);
2390 if (r < 0)
2391 pr_err("smd_core_init: enable_irq_wake "
2392 "failed for INT_ADSP_A11_SMSM\n");
2393#endif
2394 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07002395#endif
2396
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002397#if defined(CONFIG_DSPS)
2398 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
2399 flags, "smd_dev", smd_dsps_irq_handler);
2400 if (r < 0) {
2401 free_irq(INT_A9_M2A_0, 0);
2402 free_irq(INT_A9_M2A_5, 0);
2403 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
2404 free_irq(INT_ADSP_A11_SMSM, smsm_irq_handler);
2405 return r;
2406 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002407
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002408 r = enable_irq_wake(INT_DSPS_A11);
2409 if (r < 0)
2410 pr_err("smd_core_init: "
2411 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002412#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002413
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002414#if defined(CONFIG_WCNSS)
2415 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
2416 flags, "smd_dev", smd_wcnss_irq_handler);
2417 if (r < 0) {
2418 free_irq(INT_A9_M2A_0, 0);
2419 free_irq(INT_A9_M2A_5, 0);
2420 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
2421 free_irq(INT_ADSP_A11_SMSM, smsm_irq_handler);
2422 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
2423 return r;
2424 }
2425
2426 r = enable_irq_wake(INT_WCNSS_A11);
2427 if (r < 0)
2428 pr_err("smd_core_init: "
2429 "enable_irq_wake failed for INT_WCNSS_A11\n");
2430
2431 r = request_irq(INT_WCNSS_A11_SMSM, smsm_irq_handler,
2432 flags, "smsm_dev", smsm_irq_handler);
2433 if (r < 0) {
2434 free_irq(INT_A9_M2A_0, 0);
2435 free_irq(INT_A9_M2A_5, 0);
2436 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
2437 free_irq(INT_ADSP_A11_SMSM, smsm_irq_handler);
2438 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
2439 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
2440 return r;
2441 }
2442
2443 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
2444 if (r < 0)
2445 pr_err("smd_core_init: "
2446 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
2447#endif
2448
Jeff Hugo6a8057c2011-08-16 13:47:12 -06002449#if defined(CONFIG_DSPS_SMSM)
2450 r = request_irq(INT_DSPS_A11_SMSM, smsm_irq_handler,
2451 flags, "smsm_dev", smsm_irq_handler);
2452 if (r < 0) {
2453 free_irq(INT_A9_M2A_0, 0);
2454 free_irq(INT_A9_M2A_5, 0);
2455 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
2456 free_irq(INT_ADSP_A11_SMSM, smsm_irq_handler);
2457 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
2458 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
2459 free_irq(INT_WCNSS_A11_SMSM, smsm_irq_handler);
2460 return r;
2461 }
2462
2463 r = enable_irq_wake(INT_DSPS_A11_SMSM);
2464 if (r < 0)
2465 pr_err("smd_core_init: "
2466 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
2467#endif
2468
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002469 /* we may have missed a signal while booting -- fake
2470 * an interrupt to make sure we process any existing
2471 * state
2472 */
2473 smsm_irq_handler(0, 0);
2474
2475 SMD_INFO("smd_core_init() done\n");
2476
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002477 return 0;
2478}
2479
Gregory Bean4416e9e2010-07-28 10:22:12 -07002480static int __devinit msm_smd_probe(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002481{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002482 SMD_INFO("smd probe\n");
Daniel Walker0aec66d2010-03-18 12:31:08 -07002483
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002484 INIT_WORK(&probe_work, smd_channel_probe_worker);
2485
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002486 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
2487 if (IS_ERR(channel_close_wq)) {
2488 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
2489 return -ENOMEM;
2490 }
2491
2492 if (smsm_init()) {
2493 pr_err("smsm_init() failed\n");
2494 return -1;
2495 }
2496
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002497 if (smd_core_init()) {
2498 pr_err("smd_core_init() failed\n");
2499 return -1;
2500 }
2501
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002502 smd_initialized = 1;
2503
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002504 smd_alloc_loopback_channel();
2505
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002506 return 0;
2507}
2508
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002509static int restart_notifier_cb(struct notifier_block *this,
2510 unsigned long code,
2511 void *data);
2512
2513static struct restart_notifier_block restart_notifiers[] = {
2514 {SMSM_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
2515 {SMSM_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
2516};
2517
2518static int restart_notifier_cb(struct notifier_block *this,
2519 unsigned long code,
2520 void *data)
2521{
2522 if (code == SUBSYS_AFTER_SHUTDOWN) {
2523 struct restart_notifier_block *notifier;
2524
2525 notifier = container_of(this,
2526 struct restart_notifier_block, nb);
2527 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
2528 __func__, notifier->processor,
2529 notifier->name);
2530
2531 smd_channel_reset(notifier->processor);
2532 }
2533
2534 return NOTIFY_DONE;
2535}
2536
2537static __init int modem_restart_late_init(void)
2538{
2539 int i;
2540 void *handle;
2541 struct restart_notifier_block *nb;
2542
2543 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
2544 nb = &restart_notifiers[i];
2545 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
2546 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
2547 __func__, nb->name, handle);
2548 }
2549 return 0;
2550}
2551late_initcall(modem_restart_late_init);
2552
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002553static struct platform_driver msm_smd_driver = {
2554 .probe = msm_smd_probe,
2555 .driver = {
2556 .name = MODULE_NAME,
2557 .owner = THIS_MODULE,
2558 },
2559};
2560
2561static int __init msm_smd_init(void)
2562{
2563 return platform_driver_register(&msm_smd_driver);
2564}
2565
2566module_init(msm_smd_init);
2567
2568MODULE_DESCRIPTION("MSM Shared Memory Core");
2569MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
2570MODULE_LICENSE("GPL");