blob: c502bce0a01a40396bb446aeb47c676f9cf2dd8d [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07004 * Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f942012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070037#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070039#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053041#include <mach/socinfo.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070042
43#include "smd_private.h"
44#include "proc_comm.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045#include "modem_notifier.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070046
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
Jeff Hugo56b933a2011-09-28 14:42:05 -060048 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060049 || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064)
Brian Swetland37521a32009-07-01 18:30:47 -070050#define CONFIG_QDSP6 1
51#endif
52
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060053#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \
54 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055#define CONFIG_DSPS 1
56#endif
57
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060058#if defined(CONFIG_ARCH_MSM8960) \
59 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070060#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060061#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070063
64#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065#define SMEM_VERSION 0x000B
66#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070067#define SMSM_SNAPSHOT_CNT 64
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068
69uint32_t SMSM_NUM_ENTRIES = 8;
70uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070071
72enum {
73 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074 MSM_SMSM_DEBUG = 1U << 1,
75 MSM_SMD_INFO = 1U << 2,
76 MSM_SMSM_INFO = 1U << 3,
Eric Holmberg98c6c642012-02-24 11:29:35 -070077 MSM_SMx_POWER_INFO = 1U << 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070078};
79
80struct smsm_shared_info {
81 uint32_t *state;
82 uint32_t *intr_mask;
83 uint32_t *intr_mux;
84};
85
86static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f942012-03-19 10:04:22 -060087static struct kfifo smsm_snapshot_fifo;
88static struct wake_lock smsm_snapshot_wakelock;
89static int smsm_snapshot_count;
90static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070091
92struct smsm_size_info_type {
93 uint32_t num_hosts;
94 uint32_t num_entries;
95 uint32_t reserved0;
96 uint32_t reserved1;
97};
98
99struct smsm_state_cb_info {
100 struct list_head cb_list;
101 uint32_t mask;
102 void *data;
103 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
104};
105
106struct smsm_state_info {
107 struct list_head callbacks;
108 uint32_t last_value;
109};
110
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530111struct interrupt_config_item {
112 /* must be initialized */
113 irqreturn_t (*irq_handler)(int req, void *data);
114 /* outgoing interrupt config (set from platform data) */
115 uint32_t out_bit_pos;
116 void __iomem *out_base;
117 uint32_t out_offset;
118};
119
120struct interrupt_config {
121 struct interrupt_config_item smd;
122 struct interrupt_config_item smsm;
123};
124
125static irqreturn_t smd_modem_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700126static irqreturn_t smsm_modem_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530127static irqreturn_t smd_dsp_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700128static irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530129static irqreturn_t smd_dsps_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700130static irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530131static irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700132static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530133static irqreturn_t smsm_irq_handler(int irq, void *data);
134
135static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
136 [SMD_MODEM] = {
137 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700138 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530139 },
140 [SMD_Q6] = {
141 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700142 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530143 },
144 [SMD_DSPS] = {
145 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700146 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530147 },
148 [SMD_WCNSS] = {
149 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700150 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530151 },
152};
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700153struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530154
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
156#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
157 entry * SMSM_NUM_HOSTS + host)
158#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
159
160/* Internal definitions which are not exported in some targets */
161enum {
162 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700163};
164
165static int msm_smd_debug_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700166module_param_named(debug_mask, msm_smd_debug_mask,
167 int, S_IRUGO | S_IWUSR | S_IWGRP);
168
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700169#if defined(CONFIG_MSM_SMD_DEBUG)
170#define SMD_DBG(x...) do { \
171 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
172 printk(KERN_DEBUG x); \
173 } while (0)
174
175#define SMSM_DBG(x...) do { \
176 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
177 printk(KERN_DEBUG x); \
178 } while (0)
179
180#define SMD_INFO(x...) do { \
181 if (msm_smd_debug_mask & MSM_SMD_INFO) \
182 printk(KERN_INFO x); \
183 } while (0)
184
185#define SMSM_INFO(x...) do { \
186 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
187 printk(KERN_INFO x); \
188 } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700189#define SMx_POWER_INFO(x...) do { \
190 if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \
191 printk(KERN_INFO x); \
192 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700193#else
194#define SMD_DBG(x...) do { } while (0)
195#define SMSM_DBG(x...) do { } while (0)
196#define SMD_INFO(x...) do { } while (0)
197#define SMSM_INFO(x...) do { } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700198#define SMx_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199#endif
200
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700201static unsigned last_heap_free = 0xffffffff;
202
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700203static inline void smd_write_intr(unsigned int val,
204 const void __iomem *addr);
205
206#if defined(CONFIG_ARCH_MSM7X30)
207#define MSM_TRIG_A2M_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530208 (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700209#define MSM_TRIG_A2Q6_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530210 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700211#define MSM_TRIG_A2M_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530212 (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700213#define MSM_TRIG_A2Q6_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530214 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600216#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217#define MSM_TRIG_A2WCNSS_SMD_INT
218#define MSM_TRIG_A2WCNSS_SMSM_INT
219#elif defined(CONFIG_ARCH_MSM8X60)
220#define MSM_TRIG_A2M_SMD_INT \
221 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
222#define MSM_TRIG_A2Q6_SMD_INT \
223 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
224#define MSM_TRIG_A2M_SMSM_INT \
225 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
226#define MSM_TRIG_A2Q6_SMSM_INT \
227 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
228#define MSM_TRIG_A2DSPS_SMD_INT \
229 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600230#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700231#define MSM_TRIG_A2WCNSS_SMD_INT
232#define MSM_TRIG_A2WCNSS_SMSM_INT
Jeff Hugo56b933a2011-09-28 14:42:05 -0600233#elif defined(CONFIG_ARCH_MSM9615)
234#define MSM_TRIG_A2M_SMD_INT \
235 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
236#define MSM_TRIG_A2Q6_SMD_INT \
237 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
238#define MSM_TRIG_A2M_SMSM_INT \
239 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
240#define MSM_TRIG_A2Q6_SMSM_INT \
241 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
242#define MSM_TRIG_A2DSPS_SMD_INT
243#define MSM_TRIG_A2DSPS_SMSM_INT
244#define MSM_TRIG_A2WCNSS_SMD_INT
245#define MSM_TRIG_A2WCNSS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700246#elif defined(CONFIG_ARCH_FSM9XXX)
247#define MSM_TRIG_A2Q6_SMD_INT \
248 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
249#define MSM_TRIG_A2Q6_SMSM_INT \
250 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
251#define MSM_TRIG_A2M_SMD_INT \
252 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
253#define MSM_TRIG_A2M_SMSM_INT \
254 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
255#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600256#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257#define MSM_TRIG_A2WCNSS_SMD_INT
258#define MSM_TRIG_A2WCNSS_SMSM_INT
Eric Holmberg73d45462012-02-28 10:41:31 -0700259#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260#define MSM_TRIG_A2M_SMD_INT \
261 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700262#define MSM_TRIG_A2Q6_SMD_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700263#define MSM_TRIG_A2M_SMSM_INT \
264 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700265#define MSM_TRIG_A2Q6_SMSM_INT
266#define MSM_TRIG_A2DSPS_SMD_INT
267#define MSM_TRIG_A2DSPS_SMSM_INT
268#define MSM_TRIG_A2WCNSS_SMD_INT
269#define MSM_TRIG_A2WCNSS_SMSM_INT
270#elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A)
271#define MSM_TRIG_A2M_SMD_INT \
272 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
273#define MSM_TRIG_A2Q6_SMD_INT
274#define MSM_TRIG_A2M_SMSM_INT \
275 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
276#define MSM_TRIG_A2Q6_SMSM_INT
277#define MSM_TRIG_A2DSPS_SMD_INT
278#define MSM_TRIG_A2DSPS_SMSM_INT
279#define MSM_TRIG_A2WCNSS_SMD_INT
280#define MSM_TRIG_A2WCNSS_SMSM_INT
281#else /* use platform device / device tree configuration */
282#define MSM_TRIG_A2M_SMD_INT
283#define MSM_TRIG_A2Q6_SMD_INT
284#define MSM_TRIG_A2M_SMSM_INT
285#define MSM_TRIG_A2Q6_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700286#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600287#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288#define MSM_TRIG_A2WCNSS_SMD_INT
289#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700290#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700291
Jeff Hugoee40b152012-02-09 17:39:47 -0700292/*
293 * stub out legacy macros if they are not being used so that the legacy
294 * code compiles even though it is not used
295 *
296 * these definitions should not be used in active code and will cause
297 * an early failure
298 */
299#ifndef INT_A9_M2A_0
300#define INT_A9_M2A_0 -1
301#endif
302#ifndef INT_A9_M2A_5
303#define INT_A9_M2A_5 -1
304#endif
305#ifndef INT_ADSP_A11
306#define INT_ADSP_A11 -1
307#endif
308#ifndef INT_ADSP_A11_SMSM
309#define INT_ADSP_A11_SMSM -1
310#endif
311#ifndef INT_DSPS_A11
312#define INT_DSPS_A11 -1
313#endif
314#ifndef INT_DSPS_A11_SMSM
315#define INT_DSPS_A11_SMSM -1
316#endif
317#ifndef INT_WCNSS_A11
318#define INT_WCNSS_A11 -1
319#endif
320#ifndef INT_WCNSS_A11_SMSM
321#define INT_WCNSS_A11_SMSM -1
322#endif
323
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700324#define SMD_LOOPBACK_CID 100
325
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600326#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
327static remote_spinlock_t remote_spinlock;
328
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700329static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700330static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -0700331static void smsm_cb_snapshot(void);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700332
333static void notify_smsm_cb_clients_worker(struct work_struct *work);
334static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600335static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336static struct smsm_state_info *smsm_states;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +0530337static int spinlocks_initialized;
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -0600338static RAW_NOTIFIER_HEAD(smsm_driver_state_notifier_list);
339static DEFINE_MUTEX(smsm_driver_state_notifier_lock);
340static void smsm_driver_state_notify(uint32_t state, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700341
342static inline void smd_write_intr(unsigned int val,
343 const void __iomem *addr)
344{
345 wmb();
346 __raw_writel(val, addr);
347}
348
349#ifdef CONFIG_WCNSS
350static inline void wakeup_v1_riva(void)
351{
352 /*
353 * workaround hack for RIVA v1 hardware bug
354 * trigger GPIO 40 to wake up RIVA from power collaspe
355 * not to be sent to customers
356 */
Jeff Hugoafb8c4a2011-10-27 15:57:27 -0600357 if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 1) {
358 __raw_writel(0x0, MSM_TLMM_BASE + 0x1284);
359 __raw_writel(0x2, MSM_TLMM_BASE + 0x1284);
360 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361 /* end workaround */
362}
363#else
364static inline void wakeup_v1_riva(void) {}
365#endif
366
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530367static inline void notify_modem_smd(void)
368{
369 static const struct interrupt_config_item *intr
370 = &private_intr_config[SMD_MODEM].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700371 if (intr->out_base) {
372 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530373 smd_write_intr(intr->out_bit_pos,
374 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700375 } else {
376 ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530377 MSM_TRIG_A2M_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700378 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530379}
380
381static inline void notify_dsp_smd(void)
382{
383 static const struct interrupt_config_item *intr
384 = &private_intr_config[SMD_Q6].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700385 if (intr->out_base) {
386 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530387 smd_write_intr(intr->out_bit_pos,
388 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700389 } else {
390 ++interrupt_stats[SMD_Q6].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530391 MSM_TRIG_A2Q6_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700392 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530393}
394
395static inline void notify_dsps_smd(void)
396{
397 static const struct interrupt_config_item *intr
398 = &private_intr_config[SMD_DSPS].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700399 if (intr->out_base) {
400 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530401 smd_write_intr(intr->out_bit_pos,
402 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700403 } else {
404 ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530405 MSM_TRIG_A2DSPS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700406 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530407}
408
409static inline void notify_wcnss_smd(void)
410{
411 static const struct interrupt_config_item *intr
412 = &private_intr_config[SMD_WCNSS].smd;
413 wakeup_v1_riva();
414
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700415 if (intr->out_base) {
416 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530417 smd_write_intr(intr->out_bit_pos,
418 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700419 } else {
420 ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530421 MSM_TRIG_A2WCNSS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700422 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530423}
424
425static inline void notify_modem_smsm(void)
426{
427 static const struct interrupt_config_item *intr
428 = &private_intr_config[SMD_MODEM].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700429 if (intr->out_base) {
430 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530431 smd_write_intr(intr->out_bit_pos,
432 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700433 } else {
434 ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530435 MSM_TRIG_A2M_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700436 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530437}
438
439static inline void notify_dsp_smsm(void)
440{
441 static const struct interrupt_config_item *intr
442 = &private_intr_config[SMD_Q6].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700443 if (intr->out_base) {
444 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530445 smd_write_intr(intr->out_bit_pos,
446 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700447 } else {
448 ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530449 MSM_TRIG_A2Q6_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700450 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530451}
452
453static inline void notify_dsps_smsm(void)
454{
455 static const struct interrupt_config_item *intr
456 = &private_intr_config[SMD_DSPS].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700457 if (intr->out_base) {
458 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530459 smd_write_intr(intr->out_bit_pos,
460 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700461 } else {
462 ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530463 MSM_TRIG_A2DSPS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700464 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530465}
466
467static inline void notify_wcnss_smsm(void)
468{
469 static const struct interrupt_config_item *intr
470 = &private_intr_config[SMD_WCNSS].smsm;
471 wakeup_v1_riva();
472
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700473 if (intr->out_base) {
474 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530475 smd_write_intr(intr->out_bit_pos,
476 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700477 } else {
478 ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530479 MSM_TRIG_A2WCNSS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700480 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530481}
482
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
484{
485 /* older protocol don't use smsm_intr_mask,
486 but still communicates with modem */
487 if (!smsm_info.intr_mask ||
488 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
489 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530490 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491
492 if (smsm_info.intr_mask &&
493 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
494 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495 uint32_t mux_val;
496
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600497 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498 mux_val = __raw_readl(
499 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
500 mux_val++;
501 __raw_writel(mux_val,
502 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
503 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530504 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700505 }
506
507 if (smsm_info.intr_mask &&
508 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
509 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530510 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 }
512
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600513 if (smsm_info.intr_mask &&
514 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
515 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530516 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600517 }
518
Eric Holmbergc7e8daf2011-12-28 11:49:21 -0700519 smsm_cb_snapshot();
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700520}
521
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700523{
524 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700525 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700526
527 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
528 if (x != 0) {
529 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700530 SMD_INFO("smem: DIAG '%s'\n", x);
531 }
532
533 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
534 if (x != 0) {
535 x[size - 1] = 0;
536 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700537 }
538}
539
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700540
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700541static void handle_modem_crash(void)
542{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700543 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700544 smd_diag();
545
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700546 /* hard reboot if possible FIXME
547 if (msm_reset_hook)
548 msm_reset_hook();
549 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700550
551 /* in this case the modem or watchdog should reboot us */
552 for (;;)
553 ;
554}
555
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700556int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700557{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700558 /* if the modem's not ready yet, we have to hope for the best */
559 if (!smsm_info.state)
560 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700561
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700562 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700563 handle_modem_crash();
564 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700565 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700566 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700567}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700568EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700569
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700570/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700571 * irq handler and code that mutates the channel
572 * list or fiddles with channel state
573 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700574static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700575DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700576
577/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700578 * operations to avoid races while creating or
579 * destroying smd_channel structures
580 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700581static DEFINE_MUTEX(smd_creation_mutex);
582
583static int smd_initialized;
584
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700585struct smd_shared_v1 {
586 struct smd_half_channel ch0;
587 unsigned char data0[SMD_BUF_SIZE];
588 struct smd_half_channel ch1;
589 unsigned char data1[SMD_BUF_SIZE];
590};
591
592struct smd_shared_v2 {
593 struct smd_half_channel ch0;
594 struct smd_half_channel ch1;
595};
596
597struct smd_channel {
598 volatile struct smd_half_channel *send;
599 volatile struct smd_half_channel *recv;
600 unsigned char *send_data;
601 unsigned char *recv_data;
602 unsigned fifo_size;
603 unsigned fifo_mask;
604 struct list_head ch_list;
605
606 unsigned current_packet;
607 unsigned n;
608 void *priv;
609 void (*notify)(void *priv, unsigned flags);
610
611 int (*read)(smd_channel_t *ch, void *data, int len, int user_buf);
612 int (*write)(smd_channel_t *ch, const void *data, int len,
613 int user_buf);
614 int (*read_avail)(smd_channel_t *ch);
615 int (*write_avail)(smd_channel_t *ch);
616 int (*read_from_cb)(smd_channel_t *ch, void *data, int len,
617 int user_buf);
618
619 void (*update_state)(smd_channel_t *ch);
620 unsigned last_state;
621 void (*notify_other_cpu)(void);
622
623 char name[20];
624 struct platform_device pdev;
625 unsigned type;
626
627 int pending_pkt_sz;
628
629 char is_pkt_ch;
630};
631
632struct edge_to_pid {
633 uint32_t local_pid;
634 uint32_t remote_pid;
Eric Holmberg17992c12012-02-29 12:54:44 -0700635 char subsys_name[SMD_MAX_CH_NAME_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700636};
637
638/**
639 * Maps edge type to local and remote processor ID's.
640 */
641static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700642 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
643 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "q6"},
644 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
645 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
646 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
647 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
648 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
649 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
650 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
651 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
652 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
653 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
654 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
655 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
656 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700657};
658
659struct restart_notifier_block {
660 unsigned processor;
661 char *name;
662 struct notifier_block nb;
663};
664
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -0600665static int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700666static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
667
668static LIST_HEAD(smd_ch_closed_list);
669static LIST_HEAD(smd_ch_closing_list);
670static LIST_HEAD(smd_ch_to_close_list);
671static LIST_HEAD(smd_ch_list_modem);
672static LIST_HEAD(smd_ch_list_dsp);
673static LIST_HEAD(smd_ch_list_dsps);
674static LIST_HEAD(smd_ch_list_wcnss);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700675
676static unsigned char smd_ch_allocated[64];
677static struct work_struct probe_work;
678
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700679static void finalize_channel_close_fn(struct work_struct *work);
680static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
681static struct workqueue_struct *channel_close_wq;
682
683static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
684
685/* on smp systems, the probe might get called from multiple cores,
686 hence use a lock */
687static DEFINE_MUTEX(smd_probe_lock);
688
689static void smd_channel_probe_worker(struct work_struct *work)
690{
691 struct smd_alloc_elm *shared;
692 unsigned n;
693 uint32_t type;
694
695 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
696
697 if (!shared) {
698 pr_err("%s: allocation table not initialized\n", __func__);
699 return;
700 }
701
702 mutex_lock(&smd_probe_lock);
703 for (n = 0; n < 64; n++) {
704 if (smd_ch_allocated[n])
705 continue;
706
707 /* channel should be allocated only if APPS
708 processor is involved */
709 type = SMD_CHANNEL_TYPE(shared[n].type);
710 if ((type != SMD_APPS_MODEM) && (type != SMD_APPS_QDSP) &&
711 (type != SMD_APPS_DSPS) && (type != SMD_APPS_WCNSS))
712 continue;
713 if (!shared[n].ref_count)
714 continue;
715 if (!shared[n].name[0])
716 continue;
717
718 if (!smd_alloc_channel(&shared[n]))
719 smd_ch_allocated[n] = 1;
720 else
721 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
722 }
723 mutex_unlock(&smd_probe_lock);
724}
725
726/**
727 * Lookup processor ID and determine if it belongs to the proved edge
728 * type.
729 *
730 * @shared2: Pointer to v2 shared channel structure
731 * @type: Edge type
732 * @pid: Processor ID of processor on edge
733 * @local_ch: Channel that belongs to processor @pid
734 * @remote_ch: Other side of edge contained @pid
735 *
736 * Returns 0 for not on edge, 1 for found on edge
737 */
738static int pid_is_on_edge(struct smd_shared_v2 *shared2,
739 uint32_t type, uint32_t pid,
740 struct smd_half_channel **local_ch,
741 struct smd_half_channel **remote_ch
742 )
743{
744 int ret = 0;
745 struct edge_to_pid *edge;
746
747 *local_ch = 0;
748 *remote_ch = 0;
749
750 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
751 return 0;
752
753 edge = &edge_to_pids[type];
754 if (edge->local_pid != edge->remote_pid) {
755 if (pid == edge->local_pid) {
756 *local_ch = &shared2->ch0;
757 *remote_ch = &shared2->ch1;
758 ret = 1;
759 } else if (pid == edge->remote_pid) {
760 *local_ch = &shared2->ch1;
761 *remote_ch = &shared2->ch0;
762 ret = 1;
763 }
764 }
765
766 return ret;
767}
768
Eric Holmberg17992c12012-02-29 12:54:44 -0700769/*
770 * Returns a pointer to the subsystem name or NULL if no
771 * subsystem name is available.
772 *
773 * @type - Edge definition
774 */
775const char *smd_edge_to_subsystem(uint32_t type)
776{
777 const char *subsys = NULL;
778
779 if (type < ARRAY_SIZE(edge_to_pids)) {
780 subsys = edge_to_pids[type].subsys_name;
781 if (subsys[0] == 0x0)
782 subsys = NULL;
783 }
784 return subsys;
785}
786EXPORT_SYMBOL(smd_edge_to_subsystem);
787
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700788/*
789 * Returns a pointer to the subsystem name given the
790 * remote processor ID.
791 *
792 * @pid Remote processor ID
793 * @returns Pointer to subsystem name or NULL if not found
794 */
795const char *smd_pid_to_subsystem(uint32_t pid)
796{
797 const char *subsys = NULL;
798 int i;
799
800 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
801 if (pid == edge_to_pids[i].remote_pid &&
802 edge_to_pids[i].subsys_name[0] != 0x0
803 ) {
804 subsys = edge_to_pids[i].subsys_name;
805 break;
806 }
807 }
808
809 return subsys;
810}
811EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700812
Eric Holmberg2a563c32011-10-05 14:51:43 -0600813static void smd_reset_edge(struct smd_half_channel *ch, unsigned new_state)
814{
815 if (ch->state != SMD_SS_CLOSED) {
816 ch->state = new_state;
817 ch->fDSR = 0;
818 ch->fCTS = 0;
819 ch->fCD = 0;
820 ch->fSTATE = 1;
821 }
822}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700823
824static void smd_channel_reset_state(struct smd_alloc_elm *shared,
825 unsigned new_state, unsigned pid)
826{
827 unsigned n;
828 struct smd_shared_v2 *shared2;
829 uint32_t type;
830 struct smd_half_channel *local_ch;
831 struct smd_half_channel *remote_ch;
832
833 for (n = 0; n < SMD_CHANNELS; n++) {
834 if (!shared[n].ref_count)
835 continue;
836 if (!shared[n].name[0])
837 continue;
838
839 type = SMD_CHANNEL_TYPE(shared[n].type);
840 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n, sizeof(*shared2));
841 if (!shared2)
842 continue;
843
Eric Holmberg2a563c32011-10-05 14:51:43 -0600844 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch))
845 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700846
Eric Holmberg2a563c32011-10-05 14:51:43 -0600847 /*
848 * ModemFW is in the same subsystem as ModemSW, but has
849 * separate SMD edges that need to be reset.
850 */
851 if (pid == SMSM_MODEM &&
852 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
853 &local_ch, &remote_ch))
854 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700855 }
856}
857
858
859void smd_channel_reset(uint32_t restart_pid)
860{
861 struct smd_alloc_elm *shared;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700862 unsigned long flags;
863
864 SMD_DBG("%s: starting reset\n", __func__);
865 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
866 if (!shared) {
867 pr_err("%s: allocation table not initialized\n", __func__);
868 return;
869 }
870
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600871 /* release any held spinlocks */
872 remote_spin_release(&remote_spinlock, restart_pid);
873 remote_spin_release_all(restart_pid);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700874
875 /* reset SMSM entry */
876 if (smsm_info.state) {
877 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
878
Eric Holmberg351a63c2011-12-02 17:49:43 -0700879 /* restart SMSM init handshake */
880 if (restart_pid == SMSM_MODEM) {
881 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -0700882 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
883 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -0700884 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700885
886 /* notify SMSM processors */
887 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -0700888 notify_modem_smsm();
889 notify_dsp_smsm();
890 notify_dsps_smsm();
891 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700892 }
893
894 /* change all remote states to CLOSING */
895 mutex_lock(&smd_probe_lock);
896 spin_lock_irqsave(&smd_lock, flags);
897 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
898 spin_unlock_irqrestore(&smd_lock, flags);
899 mutex_unlock(&smd_probe_lock);
900
901 /* notify SMD processors */
902 mb();
903 smd_fake_irq_handler(0);
904 notify_modem_smd();
905 notify_dsp_smd();
906 notify_dsps_smd();
907 notify_wcnss_smd();
908
909 /* change all remote states to CLOSED */
910 mutex_lock(&smd_probe_lock);
911 spin_lock_irqsave(&smd_lock, flags);
912 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
913 spin_unlock_irqrestore(&smd_lock, flags);
914 mutex_unlock(&smd_probe_lock);
915
916 /* notify SMD processors */
917 mb();
918 smd_fake_irq_handler(0);
919 notify_modem_smd();
920 notify_dsp_smd();
921 notify_dsps_smd();
922 notify_wcnss_smd();
923
924 SMD_DBG("%s: finished reset\n", __func__);
925}
926
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700927/* how many bytes are available for reading */
928static int smd_stream_read_avail(struct smd_channel *ch)
929{
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700930 return (ch->recv->head - ch->recv->tail) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700931}
932
933/* how many bytes we are free to write */
934static int smd_stream_write_avail(struct smd_channel *ch)
935{
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700936 return ch->fifo_mask -
937 ((ch->send->head - ch->send->tail) & ch->fifo_mask);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700938}
939
940static int smd_packet_read_avail(struct smd_channel *ch)
941{
942 if (ch->current_packet) {
943 int n = smd_stream_read_avail(ch);
944 if (n > ch->current_packet)
945 n = ch->current_packet;
946 return n;
947 } else {
948 return 0;
949 }
950}
951
952static int smd_packet_write_avail(struct smd_channel *ch)
953{
954 int n = smd_stream_write_avail(ch);
955 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
956}
957
958static int ch_is_open(struct smd_channel *ch)
959{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700960 return (ch->recv->state == SMD_SS_OPENED ||
961 ch->recv->state == SMD_SS_FLUSHING)
962 && (ch->send->state == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700963}
964
965/* provide a pointer and length to readable data in the fifo */
966static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
967{
968 unsigned head = ch->recv->head;
969 unsigned tail = ch->recv->tail;
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700970 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700971
972 if (tail <= head)
973 return head - tail;
974 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700975 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700976}
977
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700978static int read_intr_blocked(struct smd_channel *ch)
979{
980 return ch->recv->fBLOCKREADINTR;
981}
982
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700983/* advance the fifo read pointer after data from ch_read_buffer is consumed */
984static void ch_read_done(struct smd_channel *ch, unsigned count)
985{
986 BUG_ON(count > smd_stream_read_avail(ch));
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700987 ch->recv->tail = (ch->recv->tail + count) & ch->fifo_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700988 wmb();
Haley Teng7632fba2009-10-12 10:38:10 -0700989 ch->send->fTAIL = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700990}
991
992/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -0700993 * by smd_*_read() and update_packet_state()
994 * will read-and-discard if the _data pointer is null
995 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700996static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700997{
998 void *ptr;
999 unsigned n;
1000 unsigned char *data = _data;
1001 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001002 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001003
1004 while (len > 0) {
1005 n = ch_read_buffer(ch, &ptr);
1006 if (n == 0)
1007 break;
1008
1009 if (n > len)
1010 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001011 if (_data) {
1012 if (user_buf) {
1013 r = copy_to_user(data, ptr, n);
1014 if (r > 0) {
1015 pr_err("%s: "
1016 "copy_to_user could not copy "
1017 "%i bytes.\n",
1018 __func__,
1019 r);
1020 }
1021 } else
1022 memcpy(data, ptr, n);
1023 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001024
1025 data += n;
1026 len -= n;
1027 ch_read_done(ch, n);
1028 }
1029
1030 return orig_len - len;
1031}
1032
1033static void update_stream_state(struct smd_channel *ch)
1034{
1035 /* streams have no special state requiring updating */
1036}
1037
1038static void update_packet_state(struct smd_channel *ch)
1039{
1040 unsigned hdr[5];
1041 int r;
1042
1043 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001044 while (ch->current_packet == 0) {
1045 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001046
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001047 /* don't bother unless we can get the full header */
1048 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1049 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001050
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001051 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1052 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001053
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001054 ch->current_packet = hdr[0];
1055 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001056}
1057
1058/* provide a pointer and length to next free space in the fifo */
1059static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1060{
1061 unsigned head = ch->send->head;
1062 unsigned tail = ch->send->tail;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001063 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001064
1065 if (head < tail) {
1066 return tail - head - 1;
1067 } else {
1068 if (tail == 0)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001069 return ch->fifo_size - head - 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001070 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001071 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001072 }
1073}
1074
1075/* advace the fifo write pointer after freespace
1076 * from ch_write_buffer is filled
1077 */
1078static void ch_write_done(struct smd_channel *ch, unsigned count)
1079{
1080 BUG_ON(count > smd_stream_write_avail(ch));
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001081 ch->send->head = (ch->send->head + count) & ch->fifo_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001082 wmb();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001083 ch->send->fHEAD = 1;
1084}
1085
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001086static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001087{
1088 if (n == SMD_SS_OPENED) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001089 ch->send->fDSR = 1;
1090 ch->send->fCTS = 1;
1091 ch->send->fCD = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001092 } else {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001093 ch->send->fDSR = 0;
1094 ch->send->fCTS = 0;
1095 ch->send->fCD = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001096 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001097 ch->send->state = n;
1098 ch->send->fSTATE = 1;
1099 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001100}
1101
1102static void do_smd_probe(void)
1103{
1104 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1105 if (shared->heap_info.free_offset != last_heap_free) {
1106 last_heap_free = shared->heap_info.free_offset;
1107 schedule_work(&probe_work);
1108 }
1109}
1110
1111static void smd_state_change(struct smd_channel *ch,
1112 unsigned last, unsigned next)
1113{
1114 ch->last_state = next;
1115
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001116 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001117
1118 switch (next) {
1119 case SMD_SS_OPENING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001120 if (ch->send->state == SMD_SS_CLOSING ||
1121 ch->send->state == SMD_SS_CLOSED) {
1122 ch->recv->tail = 0;
1123 ch->send->head = 0;
1124 ch->send->fBLOCKREADINTR = 0;
1125 ch_set_state(ch, SMD_SS_OPENING);
1126 }
1127 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001128 case SMD_SS_OPENED:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001129 if (ch->send->state == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001130 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001131 ch->notify(ch->priv, SMD_EVENT_OPEN);
1132 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001133 break;
1134 case SMD_SS_FLUSHING:
1135 case SMD_SS_RESET:
1136 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001137 break;
1138 case SMD_SS_CLOSED:
1139 if (ch->send->state == SMD_SS_OPENED) {
1140 ch_set_state(ch, SMD_SS_CLOSING);
1141 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001142 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001143 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1144 }
1145 break;
1146 case SMD_SS_CLOSING:
1147 if (ch->send->state == SMD_SS_CLOSED) {
1148 list_move(&ch->ch_list,
1149 &smd_ch_to_close_list);
1150 queue_work(channel_close_wq,
1151 &finalize_channel_close_work);
1152 }
1153 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001154 }
1155}
1156
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001157static void handle_smd_irq_closing_list(void)
1158{
1159 unsigned long flags;
1160 struct smd_channel *ch;
1161 struct smd_channel *index;
1162 unsigned tmp;
1163
1164 spin_lock_irqsave(&smd_lock, flags);
1165 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
1166 if (ch->recv->fSTATE)
1167 ch->recv->fSTATE = 0;
1168 tmp = ch->recv->state;
1169 if (tmp != ch->last_state)
1170 smd_state_change(ch, ch->last_state, tmp);
1171 }
1172 spin_unlock_irqrestore(&smd_lock, flags);
1173}
1174
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001175static void handle_smd_irq(struct list_head *list, void (*notify)(void))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001176{
1177 unsigned long flags;
1178 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001179 unsigned ch_flags;
1180 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001181 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001182
1183 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001184 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001185 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001186 ch_flags = 0;
1187 if (ch_is_open(ch)) {
1188 if (ch->recv->fHEAD) {
1189 ch->recv->fHEAD = 0;
1190 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001191 }
1192 if (ch->recv->fTAIL) {
1193 ch->recv->fTAIL = 0;
1194 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001195 }
1196 if (ch->recv->fSTATE) {
1197 ch->recv->fSTATE = 0;
1198 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001199 }
1200 }
1201 tmp = ch->recv->state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001202 if (tmp != ch->last_state) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07001203 SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
1204 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001205 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001206 state_change = 1;
1207 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001208 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001209 ch->update_state(ch);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001210 SMx_POWER_INFO("SMD ch%d '%s' Data event r%d/w%d\n",
1211 ch->n, ch->name,
1212 ch->read_avail(ch),
1213 ch->fifo_size - ch->write_avail(ch));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001214 ch->notify(ch->priv, SMD_EVENT_DATA);
1215 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001216 if (ch_flags & 0x4 && !state_change) {
1217 SMx_POWER_INFO("SMD ch%d '%s' State update\n",
1218 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001219 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001220 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001221 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001222 spin_unlock_irqrestore(&smd_lock, flags);
1223 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001224}
1225
Brian Swetland37521a32009-07-01 18:30:47 -07001226static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001227{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001228 SMx_POWER_INFO("SMD Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001229 ++interrupt_stats[SMD_MODEM].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001230 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001231 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001232 return IRQ_HANDLED;
1233}
1234
1235static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
1236{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001237 SMx_POWER_INFO("SMD Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001238 ++interrupt_stats[SMD_Q6].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001239 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001240 handle_smd_irq_closing_list();
1241 return IRQ_HANDLED;
1242}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001243
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001244static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
1245{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001246 SMx_POWER_INFO("SMD Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001247 ++interrupt_stats[SMD_DSPS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001248 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1249 handle_smd_irq_closing_list();
1250 return IRQ_HANDLED;
1251}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001252
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001253static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1254{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001255 SMx_POWER_INFO("SMD Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001256 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001257 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1258 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001259 return IRQ_HANDLED;
1260}
1261
1262static void smd_fake_irq_handler(unsigned long arg)
1263{
Brian Swetland37521a32009-07-01 18:30:47 -07001264 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1265 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001266 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1267 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1268 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001269}
1270
1271static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1272
Brian Swetland37521a32009-07-01 18:30:47 -07001273static inline int smd_need_int(struct smd_channel *ch)
1274{
1275 if (ch_is_open(ch)) {
1276 if (ch->recv->fHEAD || ch->recv->fTAIL || ch->recv->fSTATE)
1277 return 1;
1278 if (ch->recv->state != ch->last_state)
1279 return 1;
1280 }
1281 return 0;
1282}
1283
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001284void smd_sleep_exit(void)
1285{
1286 unsigned long flags;
1287 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001288 int need_int = 0;
1289
1290 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001291 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1292 if (smd_need_int(ch)) {
1293 need_int = 1;
1294 break;
1295 }
1296 }
1297 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1298 if (smd_need_int(ch)) {
1299 need_int = 1;
1300 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001301 }
1302 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001303 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1304 if (smd_need_int(ch)) {
1305 need_int = 1;
1306 break;
1307 }
1308 }
1309 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1310 if (smd_need_int(ch)) {
1311 need_int = 1;
1312 break;
1313 }
1314 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001315 spin_unlock_irqrestore(&smd_lock, flags);
1316 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001317
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001318 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001319 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001320 tasklet_schedule(&smd_fake_irq_tasklet);
1321 }
1322}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001323EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001324
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001325static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001326{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001327 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1328 return 0;
1329 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001330 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001331
1332 /* for cases where xfer type is 0 */
1333 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001334 return 0;
1335
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001336 /* for cases where xfer type is 0 */
1337 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1338 return 0;
1339
1340 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001341 return 1;
1342 else
1343 return 0;
1344}
1345
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001346static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1347 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001348{
1349 void *ptr;
1350 const unsigned char *buf = _data;
1351 unsigned xfer;
1352 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001353 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001354
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001355 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001356 if (len < 0)
1357 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001358 else if (len == 0)
1359 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001360
1361 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001362 if (!ch_is_open(ch)) {
1363 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001364 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001365 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001366 if (xfer > len)
1367 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001368 if (user_buf) {
1369 r = copy_from_user(ptr, buf, xfer);
1370 if (r > 0) {
1371 pr_err("%s: "
1372 "copy_from_user could not copy %i "
1373 "bytes.\n",
1374 __func__,
1375 r);
1376 }
1377 } else
1378 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001379 ch_write_done(ch, xfer);
1380 len -= xfer;
1381 buf += xfer;
1382 if (len == 0)
1383 break;
1384 }
1385
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001386 if (orig_len - len)
1387 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001388
1389 return orig_len - len;
1390}
1391
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001392static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1393 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001394{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001395 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001396 unsigned hdr[5];
1397
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001398 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001399 if (len < 0)
1400 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001401 else if (len == 0)
1402 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001403
1404 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1405 return -ENOMEM;
1406
1407 hdr[0] = len;
1408 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1409
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001410
1411 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1412 if (ret < 0 || ret != sizeof(hdr)) {
1413 SMD_DBG("%s failed to write pkt header: "
1414 "%d returned\n", __func__, ret);
1415 return -1;
1416 }
1417
1418
1419 ret = smd_stream_write(ch, _data, len, user_buf);
1420 if (ret < 0 || ret != len) {
1421 SMD_DBG("%s failed to write pkt data: "
1422 "%d returned\n", __func__, ret);
1423 return ret;
1424 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001425
1426 return len;
1427}
1428
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001429static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001430{
1431 int r;
1432
1433 if (len < 0)
1434 return -EINVAL;
1435
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001436 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001437 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001438 if (!read_intr_blocked(ch))
1439 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001440
1441 return r;
1442}
1443
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001444static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001445{
1446 unsigned long flags;
1447 int r;
1448
1449 if (len < 0)
1450 return -EINVAL;
1451
1452 if (len > ch->current_packet)
1453 len = ch->current_packet;
1454
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001455 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001456 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001457 if (!read_intr_blocked(ch))
1458 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001459
1460 spin_lock_irqsave(&smd_lock, flags);
1461 ch->current_packet -= r;
1462 update_packet_state(ch);
1463 spin_unlock_irqrestore(&smd_lock, flags);
1464
1465 return r;
1466}
1467
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001468static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1469 int user_buf)
1470{
1471 int r;
1472
1473 if (len < 0)
1474 return -EINVAL;
1475
1476 if (len > ch->current_packet)
1477 len = ch->current_packet;
1478
1479 r = ch_read(ch, data, len, user_buf);
1480 if (r > 0)
1481 if (!read_intr_blocked(ch))
1482 ch->notify_other_cpu();
1483
1484 ch->current_packet -= r;
1485 update_packet_state(ch);
1486
1487 return r;
1488}
1489
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301490#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001491static int smd_alloc_v2(struct smd_channel *ch)
1492{
1493 struct smd_shared_v2 *shared2;
1494 void *buffer;
1495 unsigned buffer_sz;
1496
1497 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n, sizeof(*shared2));
1498 if (!shared2) {
1499 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301500 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001501 }
1502 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1503 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301504 SMD_INFO("smem_get_entry failed\n");
1505 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001506 }
1507
1508 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301509 if (buffer_sz & (buffer_sz - 1)) {
1510 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1511 return -EINVAL;
1512 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001513 buffer_sz /= 2;
1514 ch->send = &shared2->ch0;
1515 ch->recv = &shared2->ch1;
1516 ch->send_data = buffer;
1517 ch->recv_data = buffer + buffer_sz;
1518 ch->fifo_size = buffer_sz;
1519 return 0;
1520}
1521
1522static int smd_alloc_v1(struct smd_channel *ch)
1523{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301524 return -EINVAL;
1525}
1526
1527#else /* define v1 for older targets */
1528static int smd_alloc_v2(struct smd_channel *ch)
1529{
1530 return -EINVAL;
1531}
1532
1533static int smd_alloc_v1(struct smd_channel *ch)
1534{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001535 struct smd_shared_v1 *shared1;
1536 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1537 if (!shared1) {
1538 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301539 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001540 }
1541 ch->send = &shared1->ch0;
1542 ch->recv = &shared1->ch1;
1543 ch->send_data = shared1->data0;
1544 ch->recv_data = shared1->data1;
1545 ch->fifo_size = SMD_BUF_SIZE;
1546 return 0;
1547}
1548
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301549#endif
1550
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001551static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001552{
1553 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001554
1555 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1556 if (ch == 0) {
1557 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001558 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001559 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001560 ch->n = alloc_elm->cid;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001561
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001562 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001563 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001564 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001565 }
1566
1567 ch->fifo_mask = ch->fifo_size - 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001568 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001569
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001570 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001571 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001572 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001573 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001574 else if (ch->type == SMD_APPS_DSPS)
1575 ch->notify_other_cpu = notify_dsps_smd;
1576 else
1577 ch->notify_other_cpu = notify_wcnss_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001578
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001579 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001580 ch->read = smd_packet_read;
1581 ch->write = smd_packet_write;
1582 ch->read_avail = smd_packet_read_avail;
1583 ch->write_avail = smd_packet_write_avail;
1584 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001585 ch->read_from_cb = smd_packet_read_from_cb;
1586 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001587 } else {
1588 ch->read = smd_stream_read;
1589 ch->write = smd_stream_write;
1590 ch->read_avail = smd_stream_read_avail;
1591 ch->write_avail = smd_stream_write_avail;
1592 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001593 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001594 }
1595
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001596 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1597 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001598
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001599 ch->pdev.name = ch->name;
1600 ch->pdev.id = ch->type;
1601
1602 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1603 ch->name, ch->n);
1604
1605 mutex_lock(&smd_creation_mutex);
1606 list_add(&ch->ch_list, &smd_ch_closed_list);
1607 mutex_unlock(&smd_creation_mutex);
1608
1609 platform_device_register(&ch->pdev);
1610 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1611 /* create a platform driver to be used by smd_tty driver
1612 * so that it can access the loopback port
1613 */
1614 loopback_tty_pdev.id = ch->type;
1615 platform_device_register(&loopback_tty_pdev);
1616 }
1617 return 0;
1618}
1619
1620static inline void notify_loopback_smd(void)
1621{
1622 unsigned long flags;
1623 struct smd_channel *ch;
1624
1625 spin_lock_irqsave(&smd_lock, flags);
1626 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1627 ch->notify(ch->priv, SMD_EVENT_DATA);
1628 }
1629 spin_unlock_irqrestore(&smd_lock, flags);
1630}
1631
1632static int smd_alloc_loopback_channel(void)
1633{
1634 static struct smd_half_channel smd_loopback_ctl;
1635 static char smd_loopback_data[SMD_BUF_SIZE];
1636 struct smd_channel *ch;
1637
1638 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1639 if (ch == 0) {
1640 pr_err("%s: out of memory\n", __func__);
1641 return -1;
1642 }
1643 ch->n = SMD_LOOPBACK_CID;
1644
1645 ch->send = &smd_loopback_ctl;
1646 ch->recv = &smd_loopback_ctl;
1647 ch->send_data = smd_loopback_data;
1648 ch->recv_data = smd_loopback_data;
1649 ch->fifo_size = SMD_BUF_SIZE;
1650
1651 ch->fifo_mask = ch->fifo_size - 1;
1652 ch->type = SMD_LOOPBACK_TYPE;
1653 ch->notify_other_cpu = notify_loopback_smd;
1654
1655 ch->read = smd_stream_read;
1656 ch->write = smd_stream_write;
1657 ch->read_avail = smd_stream_read_avail;
1658 ch->write_avail = smd_stream_write_avail;
1659 ch->update_state = update_stream_state;
1660 ch->read_from_cb = smd_stream_read;
1661
1662 memset(ch->name, 0, 20);
1663 memcpy(ch->name, "local_loopback", 14);
1664
1665 ch->pdev.name = ch->name;
1666 ch->pdev.id = ch->type;
1667
1668 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001669
1670 mutex_lock(&smd_creation_mutex);
1671 list_add(&ch->ch_list, &smd_ch_closed_list);
1672 mutex_unlock(&smd_creation_mutex);
1673
1674 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001675 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001676}
1677
1678static void do_nothing_notify(void *priv, unsigned flags)
1679{
1680}
1681
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001682static void finalize_channel_close_fn(struct work_struct *work)
1683{
1684 unsigned long flags;
1685 struct smd_channel *ch;
1686 struct smd_channel *index;
1687
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001688 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001689 spin_lock_irqsave(&smd_lock, flags);
1690 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1691 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001692 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001693 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1694 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001695 }
1696 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001697 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001698}
1699
1700struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001701{
1702 struct smd_channel *ch;
1703
1704 mutex_lock(&smd_creation_mutex);
1705 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001706 if (!strcmp(name, ch->name) &&
1707 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001708 list_del(&ch->ch_list);
1709 mutex_unlock(&smd_creation_mutex);
1710 return ch;
1711 }
1712 }
1713 mutex_unlock(&smd_creation_mutex);
1714
1715 return NULL;
1716}
1717
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001718int smd_named_open_on_edge(const char *name, uint32_t edge,
1719 smd_channel_t **_ch,
1720 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001721{
1722 struct smd_channel *ch;
1723 unsigned long flags;
1724
1725 if (smd_initialized == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001726 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001727 return -ENODEV;
1728 }
1729
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001730 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1731
1732 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001733 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001734 /* check closing list for port */
1735 spin_lock_irqsave(&smd_lock, flags);
1736 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1737 if (!strncmp(name, ch->name, 20) &&
1738 (edge == ch->type)) {
1739 /* channel exists, but is being closed */
1740 spin_unlock_irqrestore(&smd_lock, flags);
1741 return -EAGAIN;
1742 }
1743 }
1744
1745 /* check closing workqueue list for port */
1746 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1747 if (!strncmp(name, ch->name, 20) &&
1748 (edge == ch->type)) {
1749 /* channel exists, but is being closed */
1750 spin_unlock_irqrestore(&smd_lock, flags);
1751 return -EAGAIN;
1752 }
1753 }
1754 spin_unlock_irqrestore(&smd_lock, flags);
1755
1756 /* one final check to handle closing->closed race condition */
1757 ch = smd_get_channel(name, edge);
1758 if (!ch)
1759 return -ENODEV;
1760 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001761
1762 if (notify == 0)
1763 notify = do_nothing_notify;
1764
1765 ch->notify = notify;
1766 ch->current_packet = 0;
1767 ch->last_state = SMD_SS_CLOSED;
1768 ch->priv = priv;
1769
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001770 if (edge == SMD_LOOPBACK_TYPE) {
1771 ch->last_state = SMD_SS_OPENED;
1772 ch->send->state = SMD_SS_OPENED;
1773 ch->send->fDSR = 1;
1774 ch->send->fCTS = 1;
1775 ch->send->fCD = 1;
1776 }
1777
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001778 *_ch = ch;
1779
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001780 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1781
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001782 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001783 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001784 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001785 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001786 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001787 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1788 list_add(&ch->ch_list, &smd_ch_list_dsps);
1789 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1790 list_add(&ch->ch_list, &smd_ch_list_wcnss);
1791 else
1792 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001793
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001794 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1795
1796 if (edge != SMD_LOOPBACK_TYPE)
1797 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1798
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001799 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001800
1801 return 0;
1802}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001803EXPORT_SYMBOL(smd_named_open_on_edge);
1804
1805
1806int smd_open(const char *name, smd_channel_t **_ch,
1807 void *priv, void (*notify)(void *, unsigned))
1808{
1809 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
1810 notify);
1811}
1812EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001813
1814int smd_close(smd_channel_t *ch)
1815{
1816 unsigned long flags;
1817
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001818 if (ch == 0)
1819 return -1;
1820
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001821 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001822
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001823 spin_lock_irqsave(&smd_lock, flags);
1824 list_del(&ch->ch_list);
1825 if (ch->n == SMD_LOOPBACK_CID) {
1826 ch->send->fDSR = 0;
1827 ch->send->fCTS = 0;
1828 ch->send->fCD = 0;
1829 ch->send->state = SMD_SS_CLOSED;
1830 } else
1831 ch_set_state(ch, SMD_SS_CLOSED);
1832
1833 if (ch->recv->state == SMD_SS_OPENED) {
1834 list_add(&ch->ch_list, &smd_ch_closing_list);
1835 spin_unlock_irqrestore(&smd_lock, flags);
1836 } else {
1837 spin_unlock_irqrestore(&smd_lock, flags);
1838 ch->notify = do_nothing_notify;
1839 mutex_lock(&smd_creation_mutex);
1840 list_add(&ch->ch_list, &smd_ch_closed_list);
1841 mutex_unlock(&smd_creation_mutex);
1842 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001843
1844 return 0;
1845}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001846EXPORT_SYMBOL(smd_close);
1847
1848int smd_write_start(smd_channel_t *ch, int len)
1849{
1850 int ret;
1851 unsigned hdr[5];
1852
1853 if (!ch) {
1854 pr_err("%s: Invalid channel specified\n", __func__);
1855 return -ENODEV;
1856 }
1857 if (!ch->is_pkt_ch) {
1858 pr_err("%s: non-packet channel specified\n", __func__);
1859 return -EACCES;
1860 }
1861 if (len < 1) {
1862 pr_err("%s: invalid length: %d\n", __func__, len);
1863 return -EINVAL;
1864 }
1865
1866 if (ch->pending_pkt_sz) {
1867 pr_err("%s: packet of size: %d in progress\n", __func__,
1868 ch->pending_pkt_sz);
1869 return -EBUSY;
1870 }
1871 ch->pending_pkt_sz = len;
1872
1873 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
1874 ch->pending_pkt_sz = 0;
1875 SMD_DBG("%s: no space to write packet header\n", __func__);
1876 return -EAGAIN;
1877 }
1878
1879 hdr[0] = len;
1880 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1881
1882
1883 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1884 if (ret < 0 || ret != sizeof(hdr)) {
1885 ch->pending_pkt_sz = 0;
1886 pr_err("%s: packet header failed to write\n", __func__);
1887 return -EPERM;
1888 }
1889 return 0;
1890}
1891EXPORT_SYMBOL(smd_write_start);
1892
1893int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
1894{
1895 int bytes_written;
1896
1897 if (!ch) {
1898 pr_err("%s: Invalid channel specified\n", __func__);
1899 return -ENODEV;
1900 }
1901 if (len < 1) {
1902 pr_err("%s: invalid length: %d\n", __func__, len);
1903 return -EINVAL;
1904 }
1905
1906 if (!ch->pending_pkt_sz) {
1907 pr_err("%s: no transaction in progress\n", __func__);
1908 return -ENOEXEC;
1909 }
1910 if (ch->pending_pkt_sz - len < 0) {
1911 pr_err("%s: segment of size: %d will make packet go over "
1912 "length\n", __func__, len);
1913 return -EINVAL;
1914 }
1915
1916 bytes_written = smd_stream_write(ch, data, len, user_buf);
1917
1918 ch->pending_pkt_sz -= bytes_written;
1919
1920 return bytes_written;
1921}
1922EXPORT_SYMBOL(smd_write_segment);
1923
1924int smd_write_end(smd_channel_t *ch)
1925{
1926
1927 if (!ch) {
1928 pr_err("%s: Invalid channel specified\n", __func__);
1929 return -ENODEV;
1930 }
1931 if (ch->pending_pkt_sz) {
1932 pr_err("%s: current packet not completely written\n", __func__);
1933 return -E2BIG;
1934 }
1935
1936 return 0;
1937}
1938EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001939
1940int smd_read(smd_channel_t *ch, void *data, int len)
1941{
Jack Pham1b236d12012-03-19 15:27:18 -07001942 if (!ch) {
1943 pr_err("%s: Invalid channel specified\n", __func__);
1944 return -ENODEV;
1945 }
1946
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001947 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001948}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001949EXPORT_SYMBOL(smd_read);
1950
1951int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
1952{
Jack Pham1b236d12012-03-19 15:27:18 -07001953 if (!ch) {
1954 pr_err("%s: Invalid channel specified\n", __func__);
1955 return -ENODEV;
1956 }
1957
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001958 return ch->read(ch, data, len, 1);
1959}
1960EXPORT_SYMBOL(smd_read_user_buffer);
1961
1962int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
1963{
Jack Pham1b236d12012-03-19 15:27:18 -07001964 if (!ch) {
1965 pr_err("%s: Invalid channel specified\n", __func__);
1966 return -ENODEV;
1967 }
1968
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001969 return ch->read_from_cb(ch, data, len, 0);
1970}
1971EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001972
1973int smd_write(smd_channel_t *ch, const void *data, int len)
1974{
Jack Pham1b236d12012-03-19 15:27:18 -07001975 if (!ch) {
1976 pr_err("%s: Invalid channel specified\n", __func__);
1977 return -ENODEV;
1978 }
1979
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001980 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001981}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001982EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001983
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001984int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08001985{
Jack Pham1b236d12012-03-19 15:27:18 -07001986 if (!ch) {
1987 pr_err("%s: Invalid channel specified\n", __func__);
1988 return -ENODEV;
1989 }
1990
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001991 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08001992}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001993EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08001994
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001995int smd_read_avail(smd_channel_t *ch)
1996{
Jack Pham1b236d12012-03-19 15:27:18 -07001997 if (!ch) {
1998 pr_err("%s: Invalid channel specified\n", __func__);
1999 return -ENODEV;
2000 }
2001
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002002 return ch->read_avail(ch);
2003}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002004EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002005
2006int smd_write_avail(smd_channel_t *ch)
2007{
Jack Pham1b236d12012-03-19 15:27:18 -07002008 if (!ch) {
2009 pr_err("%s: Invalid channel specified\n", __func__);
2010 return -ENODEV;
2011 }
2012
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002013 return ch->write_avail(ch);
2014}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002015EXPORT_SYMBOL(smd_write_avail);
2016
2017void smd_enable_read_intr(smd_channel_t *ch)
2018{
2019 if (ch)
2020 ch->send->fBLOCKREADINTR = 0;
2021}
2022EXPORT_SYMBOL(smd_enable_read_intr);
2023
2024void smd_disable_read_intr(smd_channel_t *ch)
2025{
2026 if (ch)
2027 ch->send->fBLOCKREADINTR = 1;
2028}
2029EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002030
2031int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2032{
2033 return -1;
2034}
2035
2036int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2037{
2038 return -1;
2039}
2040
2041int smd_cur_packet_size(smd_channel_t *ch)
2042{
Jack Pham1b236d12012-03-19 15:27:18 -07002043 if (!ch) {
2044 pr_err("%s: Invalid channel specified\n", __func__);
2045 return -ENODEV;
2046 }
2047
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002048 return ch->current_packet;
2049}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002050EXPORT_SYMBOL(smd_cur_packet_size);
2051
2052int smd_tiocmget(smd_channel_t *ch)
2053{
Jack Pham1b236d12012-03-19 15:27:18 -07002054 if (!ch) {
2055 pr_err("%s: Invalid channel specified\n", __func__);
2056 return -ENODEV;
2057 }
2058
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002059 return (ch->recv->fDSR ? TIOCM_DSR : 0) |
2060 (ch->recv->fCTS ? TIOCM_CTS : 0) |
2061 (ch->recv->fCD ? TIOCM_CD : 0) |
2062 (ch->recv->fRI ? TIOCM_RI : 0) |
2063 (ch->send->fCTS ? TIOCM_RTS : 0) |
2064 (ch->send->fDSR ? TIOCM_DTR : 0);
2065}
2066EXPORT_SYMBOL(smd_tiocmget);
2067
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002068/* this api will be called while holding smd_lock */
2069int
2070smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002071{
Jack Pham1b236d12012-03-19 15:27:18 -07002072 if (!ch) {
2073 pr_err("%s: Invalid channel specified\n", __func__);
2074 return -ENODEV;
2075 }
2076
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002077 if (set & TIOCM_DTR)
2078 ch->send->fDSR = 1;
2079
2080 if (set & TIOCM_RTS)
2081 ch->send->fCTS = 1;
2082
2083 if (clear & TIOCM_DTR)
2084 ch->send->fDSR = 0;
2085
2086 if (clear & TIOCM_RTS)
2087 ch->send->fCTS = 0;
2088
2089 ch->send->fSTATE = 1;
2090 barrier();
2091 ch->notify_other_cpu();
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002092
2093 return 0;
2094}
2095EXPORT_SYMBOL(smd_tiocmset_from_cb);
2096
2097int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2098{
2099 unsigned long flags;
2100
Jack Pham1b236d12012-03-19 15:27:18 -07002101 if (!ch) {
2102 pr_err("%s: Invalid channel specified\n", __func__);
2103 return -ENODEV;
2104 }
2105
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002106 spin_lock_irqsave(&smd_lock, flags);
2107 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002108 spin_unlock_irqrestore(&smd_lock, flags);
2109
2110 return 0;
2111}
2112EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002113
2114
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002115/* -------------------------------------------------------------------------- */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002116
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002117/* smem_alloc returns the pointer to smem item if it is already allocated.
2118 * Otherwise, it returns NULL.
2119 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002120void *smem_alloc(unsigned id, unsigned size)
2121{
2122 return smem_find(id, size);
2123}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002124EXPORT_SYMBOL(smem_alloc);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002125
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002126/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
2127 * it allocates it and then returns the pointer to it.
2128 */
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302129void *smem_alloc2(unsigned id, unsigned size_in)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002130{
2131 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2132 struct smem_heap_entry *toc = shared->heap_toc;
2133 unsigned long flags;
2134 void *ret = NULL;
2135
2136 if (!shared->heap_info.initialized) {
2137 pr_err("%s: smem heap info not initialized\n", __func__);
2138 return NULL;
2139 }
2140
2141 if (id >= SMEM_NUM_ITEMS)
2142 return NULL;
2143
2144 size_in = ALIGN(size_in, 8);
2145 remote_spin_lock_irqsave(&remote_spinlock, flags);
2146 if (toc[id].allocated) {
2147 SMD_DBG("%s: %u already allocated\n", __func__, id);
2148 if (size_in != toc[id].size)
2149 pr_err("%s: wrong size %u (expected %u)\n",
2150 __func__, toc[id].size, size_in);
2151 else
2152 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2153 } else if (id > SMEM_FIXED_ITEM_LAST) {
2154 SMD_DBG("%s: allocating %u\n", __func__, id);
2155 if (shared->heap_info.heap_remaining >= size_in) {
2156 toc[id].offset = shared->heap_info.free_offset;
2157 toc[id].size = size_in;
2158 wmb();
2159 toc[id].allocated = 1;
2160
2161 shared->heap_info.free_offset += size_in;
2162 shared->heap_info.heap_remaining -= size_in;
2163 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2164 } else
2165 pr_err("%s: not enough memory %u (required %u)\n",
2166 __func__, shared->heap_info.heap_remaining,
2167 size_in);
2168 }
2169 wmb();
2170 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
2171 return ret;
2172}
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302173EXPORT_SYMBOL(smem_alloc2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002174
2175void *smem_get_entry(unsigned id, unsigned *size)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002176{
2177 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2178 struct smem_heap_entry *toc = shared->heap_toc;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302179 int use_spinlocks = spinlocks_initialized;
2180 void *ret = 0;
2181 unsigned long flags = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002182
2183 if (id >= SMEM_NUM_ITEMS)
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302184 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002185
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302186 if (use_spinlocks)
2187 remote_spin_lock_irqsave(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002188 /* toc is in device memory and cannot be speculatively accessed */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002189 if (toc[id].allocated) {
2190 *size = toc[id].size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002191 barrier();
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302192 ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002193 } else {
2194 *size = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002195 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302196 if (use_spinlocks)
2197 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002198
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302199 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002200}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002201EXPORT_SYMBOL(smem_get_entry);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002202
2203void *smem_find(unsigned id, unsigned size_in)
2204{
2205 unsigned size;
2206 void *ptr;
2207
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002208 ptr = smem_get_entry(id, &size);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002209 if (!ptr)
2210 return 0;
2211
2212 size_in = ALIGN(size_in, 8);
2213 if (size_in != size) {
2214 pr_err("smem_find(%d, %d): wrong size %d\n",
2215 id, size_in, size);
2216 return 0;
2217 }
2218
2219 return ptr;
2220}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002221EXPORT_SYMBOL(smem_find);
2222
2223static int smsm_cb_init(void)
2224{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002225 struct smsm_state_info *state_info;
2226 int n;
2227 int ret = 0;
2228
2229 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2230 GFP_KERNEL);
2231
2232 if (!smsm_states) {
2233 pr_err("%s: SMSM init failed\n", __func__);
2234 return -ENOMEM;
2235 }
2236
Eric Holmbergc8002902011-09-16 13:55:57 -06002237 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002238 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2239 state_info = &smsm_states[n];
2240 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
2241 INIT_LIST_HEAD(&state_info->callbacks);
2242 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002243 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002244
2245 return ret;
2246}
2247
2248static int smsm_init(void)
2249{
2250 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2251 int i;
2252 struct smsm_size_info_type *smsm_size_info;
2253
2254 i = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_ALLOC);
2255 if (i) {
2256 pr_err("%s: remote spinlock init failed %d\n", __func__, i);
2257 return i;
2258 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302259 spinlocks_initialized = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002260
2261 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
2262 sizeof(struct smsm_size_info_type));
2263 if (smsm_size_info) {
2264 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2265 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2266 }
2267
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002268 i = kfifo_alloc(&smsm_snapshot_fifo,
2269 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2270 GFP_KERNEL);
2271 if (i) {
2272 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2273 return i;
2274 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002275 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2276 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002277
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002278 if (!smsm_info.state) {
2279 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
2280 SMSM_NUM_ENTRIES *
2281 sizeof(uint32_t));
2282
2283 if (smsm_info.state) {
2284 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2285 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2286 __raw_writel(0, \
2287 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2288 }
2289 }
2290
2291 if (!smsm_info.intr_mask) {
2292 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
2293 SMSM_NUM_ENTRIES *
2294 SMSM_NUM_HOSTS *
2295 sizeof(uint32_t));
2296
2297 if (smsm_info.intr_mask)
2298 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
2299 __raw_writel(0xffffffff,
2300 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2301 }
2302
2303 if (!smsm_info.intr_mux)
2304 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
2305 SMSM_NUM_INTR_MUX *
2306 sizeof(uint32_t));
2307
2308 i = smsm_cb_init();
2309 if (i)
2310 return i;
2311
2312 wmb();
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06002313 smsm_driver_state_notify(SMSM_INIT, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002314 return 0;
2315}
2316
2317void smsm_reset_modem(unsigned mode)
2318{
2319 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2320 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2321 } else if (mode == SMSM_MODEM_WAIT) {
2322 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2323 } else { /* reset_mode is SMSM_RESET or default */
2324 mode = SMSM_RESET;
2325 }
2326
2327 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2328}
2329EXPORT_SYMBOL(smsm_reset_modem);
2330
2331void smsm_reset_modem_cont(void)
2332{
2333 unsigned long flags;
2334 uint32_t state;
2335
2336 if (!smsm_info.state)
2337 return;
2338
2339 spin_lock_irqsave(&smem_lock, flags);
2340 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2341 & ~SMSM_MODEM_WAIT;
2342 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2343 wmb();
2344 spin_unlock_irqrestore(&smem_lock, flags);
2345}
2346EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002347
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002348static void smsm_cb_snapshot(void)
2349{
2350 int n;
2351 uint32_t new_state;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002352 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002353 int ret;
2354
2355 ret = kfifo_avail(&smsm_snapshot_fifo);
2356 if (ret < (SMSM_NUM_ENTRIES * 4)) {
2357 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2358 return;
2359 }
2360
2361 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2362 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2363
2364 ret = kfifo_in(&smsm_snapshot_fifo,
2365 &new_state, sizeof(new_state));
2366 if (ret != sizeof(new_state)) {
2367 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2368 return;
2369 }
2370 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002371
2372 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2373 if (smsm_snapshot_count == 0) {
2374 SMx_POWER_INFO("SMSM snapshot wake lock\n");
2375 wake_lock(&smsm_snapshot_wakelock);
2376 }
2377 ++smsm_snapshot_count;
2378 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002379 schedule_work(&smsm_cb_work);
2380}
2381
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002382static irqreturn_t smsm_irq_handler(int irq, void *data)
2383{
2384 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002385
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002386 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002387 uint32_t mux_val;
2388 static uint32_t prev_smem_q6_apps_smsm;
2389
2390 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2391 mux_val = __raw_readl(
2392 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2393 if (mux_val != prev_smem_q6_apps_smsm)
2394 prev_smem_q6_apps_smsm = mux_val;
2395 }
2396
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002397 spin_lock_irqsave(&smem_lock, flags);
2398 smsm_cb_snapshot();
2399 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002400 return IRQ_HANDLED;
2401 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002402
2403 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002404 if (!smsm_info.state) {
2405 SMSM_INFO("<SM NO STATE>\n");
2406 } else {
2407 unsigned old_apps, apps;
2408 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002409
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002410 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002411
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002412 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2413 if (apps & SMSM_RESET) {
2414 /* If we get an interrupt and the apps SMSM_RESET
2415 bit is already set, the modem is acking the
2416 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002417 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302418 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002419 /* Issue a fake irq to handle any
2420 * smd state changes during reset
2421 */
2422 smd_fake_irq_handler(0);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002423
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002424 /* queue modem restart notify chain */
2425 modem_queue_start_reset_notify();
2426
2427 } else if (modm & SMSM_RESET) {
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002428 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302429 apps |= SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002430
2431 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
2432 modem_queue_start_reset_notify();
2433
2434 } else if (modm & SMSM_INIT) {
2435 if (!(apps & SMSM_INIT)) {
2436 apps |= SMSM_INIT;
2437 modem_queue_smsm_init_notify();
2438 }
2439
2440 if (modm & SMSM_SMDINIT)
2441 apps |= SMSM_SMDINIT;
2442 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2443 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2444 apps |= SMSM_RUN;
2445 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2446 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2447 modem_queue_start_reset_notify();
2448 }
2449
2450 if (old_apps != apps) {
2451 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2452 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2453 do_smd_probe();
2454 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2455 }
2456
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002457 smsm_cb_snapshot();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002458 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002459 spin_unlock_irqrestore(&smem_lock, flags);
2460 return IRQ_HANDLED;
2461}
2462
Eric Holmberg98c6c642012-02-24 11:29:35 -07002463static irqreturn_t smsm_modem_irq_handler(int irq, void *data)
2464{
2465 SMx_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002466 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002467 return smsm_irq_handler(irq, data);
2468}
2469
2470static irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
2471{
2472 SMx_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002473 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002474 return smsm_irq_handler(irq, data);
2475}
2476
2477static irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
2478{
2479 SMx_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002480 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002481 return smsm_irq_handler(irq, data);
2482}
2483
2484static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
2485{
2486 SMx_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002487 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002488 return smsm_irq_handler(irq, data);
2489}
2490
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002491int smsm_change_intr_mask(uint32_t smsm_entry,
2492 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002493{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002494 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002495 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002496
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002497 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2498 pr_err("smsm_change_state: Invalid entry %d\n",
2499 smsm_entry);
2500 return -EINVAL;
2501 }
2502
2503 if (!smsm_info.intr_mask) {
2504 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002505 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002506 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002507
2508 spin_lock_irqsave(&smem_lock, flags);
2509
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002510 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2511 new_mask = (old_mask & ~clear_mask) | set_mask;
2512 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002513
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002514 wmb();
2515 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002516
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002517 return 0;
2518}
2519EXPORT_SYMBOL(smsm_change_intr_mask);
2520
2521int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2522{
2523 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2524 pr_err("smsm_change_state: Invalid entry %d\n",
2525 smsm_entry);
2526 return -EINVAL;
2527 }
2528
2529 if (!smsm_info.intr_mask) {
2530 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2531 return -EIO;
2532 }
2533
2534 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2535 return 0;
2536}
2537EXPORT_SYMBOL(smsm_get_intr_mask);
2538
2539int smsm_change_state(uint32_t smsm_entry,
2540 uint32_t clear_mask, uint32_t set_mask)
2541{
2542 unsigned long flags;
2543 uint32_t old_state, new_state;
2544
2545 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2546 pr_err("smsm_change_state: Invalid entry %d",
2547 smsm_entry);
2548 return -EINVAL;
2549 }
2550
2551 if (!smsm_info.state) {
2552 pr_err("smsm_change_state <SM NO STATE>\n");
2553 return -EIO;
2554 }
2555 spin_lock_irqsave(&smem_lock, flags);
2556
2557 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2558 new_state = (old_state & ~clear_mask) | set_mask;
2559 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2560 SMSM_DBG("smsm_change_state %x\n", new_state);
2561 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002562
2563 spin_unlock_irqrestore(&smem_lock, flags);
2564
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002565 return 0;
2566}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002567EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002568
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002569uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002570{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002571 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002572
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002573 /* needs interface change to return error code */
2574 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2575 pr_err("smsm_change_state: Invalid entry %d",
2576 smsm_entry);
2577 return 0;
2578 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002579
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002580 if (!smsm_info.state) {
2581 pr_err("smsm_get_state <SM NO STATE>\n");
2582 } else {
2583 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2584 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002585
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002586 return rv;
2587}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002588EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002589
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002590/**
2591 * Performs SMSM callback client notifiction.
2592 */
2593void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002594{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002595 struct smsm_state_cb_info *cb_info;
2596 struct smsm_state_info *state_info;
2597 int n;
2598 uint32_t new_state;
2599 uint32_t state_changes;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002600 int ret;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002601 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002602 int snapshot_size = SMSM_NUM_ENTRIES * sizeof(uint32_t);
Brian Swetland03e00cd2009-07-01 17:58:37 -07002603
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002604 if (!smd_initialized)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002605 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002606
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002607 while (kfifo_len(&smsm_snapshot_fifo) >= snapshot_size) {
2608 mutex_lock(&smsm_lock);
2609 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2610 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002611
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002612 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2613 sizeof(new_state));
2614 if (ret != sizeof(new_state)) {
2615 pr_err("%s: snapshot underflow %d\n",
2616 __func__, ret);
2617 mutex_unlock(&smsm_lock);
2618 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002619 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002620
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002621 state_changes = state_info->last_value ^ new_state;
2622 if (state_changes) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07002623 SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n",
2624 n, state_info->last_value,
2625 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002626 list_for_each_entry(cb_info,
2627 &state_info->callbacks, cb_list) {
2628
2629 if (cb_info->mask & state_changes)
2630 cb_info->notify(cb_info->data,
2631 state_info->last_value,
2632 new_state);
2633 }
2634 state_info->last_value = new_state;
2635 }
2636 }
2637 mutex_unlock(&smsm_lock);
Eric Holmberg59a9f942012-03-19 10:04:22 -06002638
2639 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2640 if (smsm_snapshot_count) {
2641 --smsm_snapshot_count;
2642 if (smsm_snapshot_count == 0) {
2643 SMx_POWER_INFO("SMSM snapshot wake unlock\n");
2644 wake_unlock(&smsm_snapshot_wakelock);
2645 }
2646 } else {
2647 pr_err("%s: invalid snapshot count\n", __func__);
2648 }
2649 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002650 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002651}
2652
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002653
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002654/**
2655 * Registers callback for SMSM state notifications when the specified
2656 * bits change.
2657 *
2658 * @smsm_entry Processor entry to deregister
2659 * @mask Bits to deregister (if result is 0, callback is removed)
2660 * @notify Notification function to deregister
2661 * @data Opaque data passed in to callback
2662 *
2663 * @returns Status code
2664 * <0 error code
2665 * 0 inserted new entry
2666 * 1 updated mask of existing entry
2667 */
2668int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
2669 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002670{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002671 struct smsm_state_cb_info *cb_info;
2672 struct smsm_state_cb_info *cb_found = 0;
2673 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002674
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002675 if (smsm_entry >= SMSM_NUM_ENTRIES)
2676 return -EINVAL;
2677
Eric Holmbergc8002902011-09-16 13:55:57 -06002678 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002679
2680 if (!smsm_states) {
2681 /* smsm not yet initialized */
2682 ret = -ENODEV;
2683 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002684 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002685
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002686 list_for_each_entry(cb_info,
2687 &smsm_states[smsm_entry].callbacks, cb_list) {
2688 if ((cb_info->notify == notify) &&
2689 (cb_info->data == data)) {
2690 cb_info->mask |= mask;
2691 cb_found = cb_info;
2692 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002693 break;
2694 }
2695 }
2696
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002697 if (!cb_found) {
2698 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
2699 GFP_ATOMIC);
2700 if (!cb_info) {
2701 ret = -ENOMEM;
2702 goto cleanup;
2703 }
2704
2705 cb_info->mask = mask;
2706 cb_info->notify = notify;
2707 cb_info->data = data;
2708 INIT_LIST_HEAD(&cb_info->cb_list);
2709 list_add_tail(&cb_info->cb_list,
2710 &smsm_states[smsm_entry].callbacks);
2711 }
2712
2713cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06002714 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002715 return ret;
2716}
2717EXPORT_SYMBOL(smsm_state_cb_register);
2718
2719
2720/**
2721 * Deregisters for SMSM state notifications for the specified bits.
2722 *
2723 * @smsm_entry Processor entry to deregister
2724 * @mask Bits to deregister (if result is 0, callback is removed)
2725 * @notify Notification function to deregister
2726 * @data Opaque data passed in to callback
2727 *
2728 * @returns Status code
2729 * <0 error code
2730 * 0 not found
2731 * 1 updated mask
2732 * 2 removed callback
2733 */
2734int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
2735 void (*notify)(void *, uint32_t, uint32_t), void *data)
2736{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002737 struct smsm_state_cb_info *cb_info;
2738 int ret = 0;
2739
2740 if (smsm_entry >= SMSM_NUM_ENTRIES)
2741 return -EINVAL;
2742
Eric Holmbergc8002902011-09-16 13:55:57 -06002743 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002744
2745 if (!smsm_states) {
2746 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06002747 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002748 return -ENODEV;
2749 }
2750
2751 list_for_each_entry(cb_info,
2752 &smsm_states[smsm_entry].callbacks, cb_list) {
2753 if ((cb_info->notify == notify) &&
2754 (cb_info->data == data)) {
2755 cb_info->mask &= ~mask;
2756 ret = 1;
2757 if (!cb_info->mask) {
2758 /* no mask bits set, remove callback */
2759 list_del(&cb_info->cb_list);
2760 kfree(cb_info);
2761 ret = 2;
2762 }
2763 break;
2764 }
2765 }
2766
Eric Holmbergc8002902011-09-16 13:55:57 -06002767 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002768 return ret;
2769}
2770EXPORT_SYMBOL(smsm_state_cb_deregister);
2771
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06002772int smsm_driver_state_notifier_register(struct notifier_block *nb)
2773{
2774 int ret;
2775 if (!nb)
2776 return -EINVAL;
2777 mutex_lock(&smsm_driver_state_notifier_lock);
2778 ret = raw_notifier_chain_register(&smsm_driver_state_notifier_list, nb);
2779 mutex_unlock(&smsm_driver_state_notifier_lock);
2780 return ret;
2781}
2782EXPORT_SYMBOL(smsm_driver_state_notifier_register);
2783
2784int smsm_driver_state_notifier_unregister(struct notifier_block *nb)
2785{
2786 int ret;
2787 if (!nb)
2788 return -EINVAL;
2789 mutex_lock(&smsm_driver_state_notifier_lock);
2790 ret = raw_notifier_chain_unregister(&smsm_driver_state_notifier_list,
2791 nb);
2792 mutex_unlock(&smsm_driver_state_notifier_lock);
2793 return ret;
2794}
2795EXPORT_SYMBOL(smsm_driver_state_notifier_unregister);
2796
2797static void smsm_driver_state_notify(uint32_t state, void *data)
2798{
2799 mutex_lock(&smsm_driver_state_notifier_lock);
2800 raw_notifier_call_chain(&smsm_driver_state_notifier_list,
2801 state, data);
2802 mutex_unlock(&smsm_driver_state_notifier_lock);
2803}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002804
2805int smd_core_init(void)
2806{
2807 int r;
2808 unsigned long flags = IRQF_TRIGGER_RISING;
2809 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002810
Brian Swetland37521a32009-07-01 18:30:47 -07002811 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002812 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002813 if (r < 0)
2814 return r;
2815 r = enable_irq_wake(INT_A9_M2A_0);
2816 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002817 pr_err("smd_core_init: "
2818 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002819
Eric Holmberg98c6c642012-02-24 11:29:35 -07002820 r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002821 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002822 if (r < 0) {
2823 free_irq(INT_A9_M2A_0, 0);
2824 return r;
2825 }
2826 r = enable_irq_wake(INT_A9_M2A_5);
2827 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002828 pr_err("smd_core_init: "
2829 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002830
Brian Swetland37521a32009-07-01 18:30:47 -07002831#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002832#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
2833 flags |= IRQF_SHARED;
2834#endif
Brian Swetland37521a32009-07-01 18:30:47 -07002835 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002836 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07002837 if (r < 0) {
2838 free_irq(INT_A9_M2A_0, 0);
2839 free_irq(INT_A9_M2A_5, 0);
2840 return r;
2841 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002842
Eric Holmberg98c6c642012-02-24 11:29:35 -07002843 r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler,
2844 flags, "smsm_dev", smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002845 if (r < 0) {
2846 free_irq(INT_A9_M2A_0, 0);
2847 free_irq(INT_A9_M2A_5, 0);
2848 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
2849 return r;
2850 }
2851
2852 r = enable_irq_wake(INT_ADSP_A11);
2853 if (r < 0)
2854 pr_err("smd_core_init: "
2855 "enable_irq_wake failed for INT_ADSP_A11\n");
2856
2857#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
2858 r = enable_irq_wake(INT_ADSP_A11_SMSM);
2859 if (r < 0)
2860 pr_err("smd_core_init: enable_irq_wake "
2861 "failed for INT_ADSP_A11_SMSM\n");
2862#endif
2863 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07002864#endif
2865
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002866#if defined(CONFIG_DSPS)
2867 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
2868 flags, "smd_dev", smd_dsps_irq_handler);
2869 if (r < 0) {
2870 free_irq(INT_A9_M2A_0, 0);
2871 free_irq(INT_A9_M2A_5, 0);
2872 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07002873 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002874 return r;
2875 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002876
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002877 r = enable_irq_wake(INT_DSPS_A11);
2878 if (r < 0)
2879 pr_err("smd_core_init: "
2880 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002881#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002882
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002883#if defined(CONFIG_WCNSS)
2884 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
2885 flags, "smd_dev", smd_wcnss_irq_handler);
2886 if (r < 0) {
2887 free_irq(INT_A9_M2A_0, 0);
2888 free_irq(INT_A9_M2A_5, 0);
2889 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07002890 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002891 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
2892 return r;
2893 }
2894
2895 r = enable_irq_wake(INT_WCNSS_A11);
2896 if (r < 0)
2897 pr_err("smd_core_init: "
2898 "enable_irq_wake failed for INT_WCNSS_A11\n");
2899
Eric Holmberg98c6c642012-02-24 11:29:35 -07002900 r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler,
2901 flags, "smsm_dev", smsm_wcnss_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002902 if (r < 0) {
2903 free_irq(INT_A9_M2A_0, 0);
2904 free_irq(INT_A9_M2A_5, 0);
2905 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07002906 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002907 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
2908 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
2909 return r;
2910 }
2911
2912 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
2913 if (r < 0)
2914 pr_err("smd_core_init: "
2915 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
2916#endif
2917
Jeff Hugo6a8057c2011-08-16 13:47:12 -06002918#if defined(CONFIG_DSPS_SMSM)
Eric Holmberg98c6c642012-02-24 11:29:35 -07002919 r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler,
2920 flags, "smsm_dev", smsm_dsps_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06002921 if (r < 0) {
2922 free_irq(INT_A9_M2A_0, 0);
2923 free_irq(INT_A9_M2A_5, 0);
2924 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07002925 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06002926 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
2927 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07002928 free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06002929 return r;
2930 }
2931
2932 r = enable_irq_wake(INT_DSPS_A11_SMSM);
2933 if (r < 0)
2934 pr_err("smd_core_init: "
2935 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
2936#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002937 SMD_INFO("smd_core_init() done\n");
2938
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002939 return 0;
2940}
2941
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05302942static int intr_init(struct interrupt_config_item *private_irq,
2943 struct smd_irq_config *platform_irq,
2944 struct platform_device *pdev
2945 )
2946{
2947 int irq_id;
2948 int ret;
2949 int ret_wake;
2950
2951 private_irq->out_bit_pos = platform_irq->out_bit_pos;
2952 private_irq->out_offset = platform_irq->out_offset;
2953 private_irq->out_base = platform_irq->out_base;
2954
2955 irq_id = platform_get_irq_byname(
2956 pdev,
2957 platform_irq->irq_name
2958 );
2959 SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__,
2960 platform_irq->irq_name, irq_id);
2961 ret = request_irq(irq_id,
2962 private_irq->irq_handler,
2963 platform_irq->flags,
2964 platform_irq->device_name,
2965 (void *)platform_irq->dev_id
2966 );
2967 if (ret < 0) {
2968 platform_irq->irq_id = ret;
2969 } else {
2970 platform_irq->irq_id = irq_id;
2971 ret_wake = enable_irq_wake(irq_id);
2972 if (ret_wake < 0) {
2973 pr_err("smd: enable_irq_wake failed on %s",
2974 platform_irq->irq_name);
2975 }
2976 }
2977
2978 return ret;
2979}
2980
2981int smd_core_platform_init(struct platform_device *pdev)
2982{
2983 int i;
2984 int ret;
2985 uint32_t num_ss;
2986 struct smd_platform *smd_platform_data;
2987 struct smd_subsystem_config *smd_ss_config_list;
2988 struct smd_subsystem_config *cfg;
2989 int err_ret = 0;
2990
2991 smd_platform_data = pdev->dev.platform_data;
2992 num_ss = smd_platform_data->num_ss_configs;
2993 smd_ss_config_list = smd_platform_data->smd_ss_configs;
2994
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002995 if (smd_platform_data->smd_ssr_config)
2996 disable_smsm_reset_handshake = smd_platform_data->
2997 smd_ssr_config->disable_smsm_reset_handshake;
2998
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05302999 for (i = 0; i < num_ss; i++) {
3000 cfg = &smd_ss_config_list[i];
3001
3002 ret = intr_init(
3003 &private_intr_config[cfg->irq_config_id].smd,
3004 &cfg->smd_int,
3005 pdev
3006 );
3007
3008 if (ret < 0) {
3009 err_ret = ret;
3010 pr_err("smd: register irq failed on %s\n",
3011 cfg->smd_int.irq_name);
3012 break;
3013 }
3014
3015 ret = intr_init(
3016 &private_intr_config[cfg->irq_config_id].smsm,
3017 &cfg->smsm_int,
3018 pdev
3019 );
3020
3021 if (ret < 0) {
3022 err_ret = ret;
3023 pr_err("smd: register irq failed on %s\n",
3024 cfg->smsm_int.irq_name);
3025 break;
3026 }
Eric Holmberg17992c12012-02-29 12:54:44 -07003027
3028 strncpy(edge_to_pids[cfg->edge].subsys_name,
3029 cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303030 }
3031
3032 if (err_ret < 0) {
3033 pr_err("smd: deregistering IRQs\n");
3034 for (i = 0; i < num_ss; ++i) {
3035 cfg = &smd_ss_config_list[i];
3036
3037 if (cfg->smd_int.irq_id >= 0)
3038 free_irq(cfg->smd_int.irq_id,
3039 (void *)cfg->smd_int.dev_id
3040 );
3041 if (cfg->smsm_int.irq_id >= 0)
3042 free_irq(cfg->smsm_int.irq_id,
3043 (void *)cfg->smsm_int.dev_id
3044 );
3045 }
3046 return err_ret;
3047 }
3048
3049 SMD_INFO("smd_core_platform_init() done\n");
3050 return 0;
3051
3052}
3053
Gregory Bean4416e9e2010-07-28 10:22:12 -07003054static int __devinit msm_smd_probe(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003055{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303056 int ret;
Daniel Walker0aec66d2010-03-18 12:31:08 -07003057
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303058 SMD_INFO("smd probe\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003059 INIT_WORK(&probe_work, smd_channel_probe_worker);
3060
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003061 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3062 if (IS_ERR(channel_close_wq)) {
3063 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3064 return -ENOMEM;
3065 }
3066
3067 if (smsm_init()) {
3068 pr_err("smsm_init() failed\n");
3069 return -1;
3070 }
3071
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303072 if (pdev) {
3073 if (pdev->dev.of_node) {
3074 pr_err("SMD: Device tree not currently supported\n");
3075 return -ENODEV;
3076 } else if (pdev->dev.platform_data) {
3077 ret = smd_core_platform_init(pdev);
3078 if (ret) {
3079 pr_err(
3080 "SMD: smd_core_platform_init() failed\n");
3081 return -ENODEV;
3082 }
3083 } else {
3084 ret = smd_core_init();
3085 if (ret) {
3086 pr_err("smd_core_init() failed\n");
3087 return -ENODEV;
3088 }
3089 }
3090 } else {
3091 pr_err("SMD: PDEV not found\n");
3092 return -ENODEV;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003093 }
3094
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003095 smd_initialized = 1;
3096
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003097 smd_alloc_loopback_channel();
Eric Holmbergc33d4ab2011-10-24 10:28:25 -06003098 smsm_irq_handler(0, 0);
3099 tasklet_schedule(&smd_fake_irq_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003100
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003101 return 0;
3102}
3103
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003104static int restart_notifier_cb(struct notifier_block *this,
3105 unsigned long code,
3106 void *data);
3107
3108static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003109 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3110 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
3111 {SMD_WCNSS, "riva", .nb.notifier_call = restart_notifier_cb},
3112 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003113 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003114};
3115
3116static int restart_notifier_cb(struct notifier_block *this,
3117 unsigned long code,
3118 void *data)
3119{
3120 if (code == SUBSYS_AFTER_SHUTDOWN) {
3121 struct restart_notifier_block *notifier;
3122
3123 notifier = container_of(this,
3124 struct restart_notifier_block, nb);
3125 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3126 __func__, notifier->processor,
3127 notifier->name);
3128
3129 smd_channel_reset(notifier->processor);
3130 }
3131
3132 return NOTIFY_DONE;
3133}
3134
3135static __init int modem_restart_late_init(void)
3136{
3137 int i;
3138 void *handle;
3139 struct restart_notifier_block *nb;
3140
3141 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
3142 nb = &restart_notifiers[i];
3143 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
3144 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
3145 __func__, nb->name, handle);
3146 }
3147 return 0;
3148}
3149late_initcall(modem_restart_late_init);
3150
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003151static struct platform_driver msm_smd_driver = {
3152 .probe = msm_smd_probe,
3153 .driver = {
3154 .name = MODULE_NAME,
3155 .owner = THIS_MODULE,
3156 },
3157};
3158
3159static int __init msm_smd_init(void)
3160{
3161 return platform_driver_register(&msm_smd_driver);
3162}
3163
3164module_init(msm_smd_init);
3165
3166MODULE_DESCRIPTION("MSM Shared Memory Core");
3167MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
3168MODULE_LICENSE("GPL");