blob: bbfe7025551ed2059fe5d7feeaf0568f5ad31665 [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07004 * Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f942012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070037#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070039#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053041#include <mach/socinfo.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070042
43#include "smd_private.h"
44#include "proc_comm.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045#include "modem_notifier.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070046
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
Jeff Hugo56b933a2011-09-28 14:42:05 -060048 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060049 || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064)
Brian Swetland37521a32009-07-01 18:30:47 -070050#define CONFIG_QDSP6 1
51#endif
52
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060053#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \
54 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055#define CONFIG_DSPS 1
56#endif
57
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060058#if defined(CONFIG_ARCH_MSM8960) \
59 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070060#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060061#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070063
64#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065#define SMEM_VERSION 0x000B
66#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070067#define SMSM_SNAPSHOT_CNT 64
Eric Holmbergda31d042012-03-28 14:01:02 -060068#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070069
70uint32_t SMSM_NUM_ENTRIES = 8;
71uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070072
73enum {
74 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070075 MSM_SMSM_DEBUG = 1U << 1,
76 MSM_SMD_INFO = 1U << 2,
77 MSM_SMSM_INFO = 1U << 3,
Eric Holmberg98c6c642012-02-24 11:29:35 -070078 MSM_SMx_POWER_INFO = 1U << 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070079};
80
81struct smsm_shared_info {
82 uint32_t *state;
83 uint32_t *intr_mask;
84 uint32_t *intr_mux;
85};
86
87static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f942012-03-19 10:04:22 -060088static struct kfifo smsm_snapshot_fifo;
89static struct wake_lock smsm_snapshot_wakelock;
90static int smsm_snapshot_count;
91static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070092
93struct smsm_size_info_type {
94 uint32_t num_hosts;
95 uint32_t num_entries;
96 uint32_t reserved0;
97 uint32_t reserved1;
98};
99
100struct smsm_state_cb_info {
101 struct list_head cb_list;
102 uint32_t mask;
103 void *data;
104 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
105};
106
107struct smsm_state_info {
108 struct list_head callbacks;
109 uint32_t last_value;
110};
111
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530112struct interrupt_config_item {
113 /* must be initialized */
114 irqreturn_t (*irq_handler)(int req, void *data);
115 /* outgoing interrupt config (set from platform data) */
116 uint32_t out_bit_pos;
117 void __iomem *out_base;
118 uint32_t out_offset;
119};
120
121struct interrupt_config {
122 struct interrupt_config_item smd;
123 struct interrupt_config_item smsm;
124};
125
126static irqreturn_t smd_modem_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700127static irqreturn_t smsm_modem_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530128static irqreturn_t smd_dsp_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700129static irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530130static irqreturn_t smd_dsps_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700131static irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530132static irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700133static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530134static irqreturn_t smsm_irq_handler(int irq, void *data);
135
136static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
137 [SMD_MODEM] = {
138 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700139 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530140 },
141 [SMD_Q6] = {
142 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700143 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530144 },
145 [SMD_DSPS] = {
146 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700147 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530148 },
149 [SMD_WCNSS] = {
150 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700151 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530152 },
153};
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700154struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530155
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
157#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
158 entry * SMSM_NUM_HOSTS + host)
159#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
160
161/* Internal definitions which are not exported in some targets */
162enum {
163 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700164};
165
166static int msm_smd_debug_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700167module_param_named(debug_mask, msm_smd_debug_mask,
168 int, S_IRUGO | S_IWUSR | S_IWGRP);
169
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700170#if defined(CONFIG_MSM_SMD_DEBUG)
171#define SMD_DBG(x...) do { \
172 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
173 printk(KERN_DEBUG x); \
174 } while (0)
175
176#define SMSM_DBG(x...) do { \
177 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
178 printk(KERN_DEBUG x); \
179 } while (0)
180
181#define SMD_INFO(x...) do { \
182 if (msm_smd_debug_mask & MSM_SMD_INFO) \
183 printk(KERN_INFO x); \
184 } while (0)
185
186#define SMSM_INFO(x...) do { \
187 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
188 printk(KERN_INFO x); \
189 } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700190#define SMx_POWER_INFO(x...) do { \
191 if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \
192 printk(KERN_INFO x); \
193 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194#else
195#define SMD_DBG(x...) do { } while (0)
196#define SMSM_DBG(x...) do { } while (0)
197#define SMD_INFO(x...) do { } while (0)
198#define SMSM_INFO(x...) do { } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700199#define SMx_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700200#endif
201
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700202static unsigned last_heap_free = 0xffffffff;
203
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204static inline void smd_write_intr(unsigned int val,
205 const void __iomem *addr);
206
207#if defined(CONFIG_ARCH_MSM7X30)
208#define MSM_TRIG_A2M_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530209 (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700210#define MSM_TRIG_A2Q6_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530211 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700212#define MSM_TRIG_A2M_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530213 (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700214#define MSM_TRIG_A2Q6_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530215 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600217#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218#define MSM_TRIG_A2WCNSS_SMD_INT
219#define MSM_TRIG_A2WCNSS_SMSM_INT
220#elif defined(CONFIG_ARCH_MSM8X60)
221#define MSM_TRIG_A2M_SMD_INT \
222 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
223#define MSM_TRIG_A2Q6_SMD_INT \
224 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
225#define MSM_TRIG_A2M_SMSM_INT \
226 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
227#define MSM_TRIG_A2Q6_SMSM_INT \
228 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
229#define MSM_TRIG_A2DSPS_SMD_INT \
230 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600231#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232#define MSM_TRIG_A2WCNSS_SMD_INT
233#define MSM_TRIG_A2WCNSS_SMSM_INT
Jeff Hugo56b933a2011-09-28 14:42:05 -0600234#elif defined(CONFIG_ARCH_MSM9615)
235#define MSM_TRIG_A2M_SMD_INT \
236 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
237#define MSM_TRIG_A2Q6_SMD_INT \
238 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
239#define MSM_TRIG_A2M_SMSM_INT \
240 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
241#define MSM_TRIG_A2Q6_SMSM_INT \
242 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
243#define MSM_TRIG_A2DSPS_SMD_INT
244#define MSM_TRIG_A2DSPS_SMSM_INT
245#define MSM_TRIG_A2WCNSS_SMD_INT
246#define MSM_TRIG_A2WCNSS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700247#elif defined(CONFIG_ARCH_FSM9XXX)
248#define MSM_TRIG_A2Q6_SMD_INT \
249 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
250#define MSM_TRIG_A2Q6_SMSM_INT \
251 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
252#define MSM_TRIG_A2M_SMD_INT \
253 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
254#define MSM_TRIG_A2M_SMSM_INT \
255 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
256#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600257#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258#define MSM_TRIG_A2WCNSS_SMD_INT
259#define MSM_TRIG_A2WCNSS_SMSM_INT
Eric Holmberg73d45462012-02-28 10:41:31 -0700260#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700261#define MSM_TRIG_A2M_SMD_INT \
262 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700263#define MSM_TRIG_A2Q6_SMD_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264#define MSM_TRIG_A2M_SMSM_INT \
265 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700266#define MSM_TRIG_A2Q6_SMSM_INT
267#define MSM_TRIG_A2DSPS_SMD_INT
268#define MSM_TRIG_A2DSPS_SMSM_INT
269#define MSM_TRIG_A2WCNSS_SMD_INT
270#define MSM_TRIG_A2WCNSS_SMSM_INT
271#elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A)
272#define MSM_TRIG_A2M_SMD_INT \
273 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
274#define MSM_TRIG_A2Q6_SMD_INT
275#define MSM_TRIG_A2M_SMSM_INT \
276 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
277#define MSM_TRIG_A2Q6_SMSM_INT
278#define MSM_TRIG_A2DSPS_SMD_INT
279#define MSM_TRIG_A2DSPS_SMSM_INT
280#define MSM_TRIG_A2WCNSS_SMD_INT
281#define MSM_TRIG_A2WCNSS_SMSM_INT
282#else /* use platform device / device tree configuration */
283#define MSM_TRIG_A2M_SMD_INT
284#define MSM_TRIG_A2Q6_SMD_INT
285#define MSM_TRIG_A2M_SMSM_INT
286#define MSM_TRIG_A2Q6_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600288#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700289#define MSM_TRIG_A2WCNSS_SMD_INT
290#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700291#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700292
Jeff Hugoee40b152012-02-09 17:39:47 -0700293/*
294 * stub out legacy macros if they are not being used so that the legacy
295 * code compiles even though it is not used
296 *
297 * these definitions should not be used in active code and will cause
298 * an early failure
299 */
300#ifndef INT_A9_M2A_0
301#define INT_A9_M2A_0 -1
302#endif
303#ifndef INT_A9_M2A_5
304#define INT_A9_M2A_5 -1
305#endif
306#ifndef INT_ADSP_A11
307#define INT_ADSP_A11 -1
308#endif
309#ifndef INT_ADSP_A11_SMSM
310#define INT_ADSP_A11_SMSM -1
311#endif
312#ifndef INT_DSPS_A11
313#define INT_DSPS_A11 -1
314#endif
315#ifndef INT_DSPS_A11_SMSM
316#define INT_DSPS_A11_SMSM -1
317#endif
318#ifndef INT_WCNSS_A11
319#define INT_WCNSS_A11 -1
320#endif
321#ifndef INT_WCNSS_A11_SMSM
322#define INT_WCNSS_A11_SMSM -1
323#endif
324
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700325#define SMD_LOOPBACK_CID 100
326
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600327#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
328static remote_spinlock_t remote_spinlock;
329
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700330static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700331static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergda31d042012-03-28 14:01:02 -0600332static void smsm_cb_snapshot(uint32_t use_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700333
334static void notify_smsm_cb_clients_worker(struct work_struct *work);
335static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600336static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700337static struct smsm_state_info *smsm_states;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +0530338static int spinlocks_initialized;
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -0600339static RAW_NOTIFIER_HEAD(smsm_driver_state_notifier_list);
340static DEFINE_MUTEX(smsm_driver_state_notifier_lock);
341static void smsm_driver_state_notify(uint32_t state, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700342
343static inline void smd_write_intr(unsigned int val,
344 const void __iomem *addr)
345{
346 wmb();
347 __raw_writel(val, addr);
348}
349
350#ifdef CONFIG_WCNSS
351static inline void wakeup_v1_riva(void)
352{
353 /*
354 * workaround hack for RIVA v1 hardware bug
355 * trigger GPIO 40 to wake up RIVA from power collaspe
356 * not to be sent to customers
357 */
Jeff Hugoafb8c4a2011-10-27 15:57:27 -0600358 if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 1) {
359 __raw_writel(0x0, MSM_TLMM_BASE + 0x1284);
360 __raw_writel(0x2, MSM_TLMM_BASE + 0x1284);
361 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362 /* end workaround */
363}
364#else
365static inline void wakeup_v1_riva(void) {}
366#endif
367
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530368static inline void notify_modem_smd(void)
369{
370 static const struct interrupt_config_item *intr
371 = &private_intr_config[SMD_MODEM].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700372 if (intr->out_base) {
373 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530374 smd_write_intr(intr->out_bit_pos,
375 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700376 } else {
377 ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530378 MSM_TRIG_A2M_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700379 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530380}
381
382static inline void notify_dsp_smd(void)
383{
384 static const struct interrupt_config_item *intr
385 = &private_intr_config[SMD_Q6].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700386 if (intr->out_base) {
387 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530388 smd_write_intr(intr->out_bit_pos,
389 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700390 } else {
391 ++interrupt_stats[SMD_Q6].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530392 MSM_TRIG_A2Q6_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700393 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530394}
395
396static inline void notify_dsps_smd(void)
397{
398 static const struct interrupt_config_item *intr
399 = &private_intr_config[SMD_DSPS].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700400 if (intr->out_base) {
401 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530402 smd_write_intr(intr->out_bit_pos,
403 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700404 } else {
405 ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530406 MSM_TRIG_A2DSPS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700407 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530408}
409
410static inline void notify_wcnss_smd(void)
411{
412 static const struct interrupt_config_item *intr
413 = &private_intr_config[SMD_WCNSS].smd;
414 wakeup_v1_riva();
415
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700416 if (intr->out_base) {
417 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530418 smd_write_intr(intr->out_bit_pos,
419 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700420 } else {
421 ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530422 MSM_TRIG_A2WCNSS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700423 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530424}
425
426static inline void notify_modem_smsm(void)
427{
428 static const struct interrupt_config_item *intr
429 = &private_intr_config[SMD_MODEM].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700430 if (intr->out_base) {
431 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530432 smd_write_intr(intr->out_bit_pos,
433 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700434 } else {
435 ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530436 MSM_TRIG_A2M_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700437 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530438}
439
440static inline void notify_dsp_smsm(void)
441{
442 static const struct interrupt_config_item *intr
443 = &private_intr_config[SMD_Q6].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700444 if (intr->out_base) {
445 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530446 smd_write_intr(intr->out_bit_pos,
447 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700448 } else {
449 ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530450 MSM_TRIG_A2Q6_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700451 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530452}
453
454static inline void notify_dsps_smsm(void)
455{
456 static const struct interrupt_config_item *intr
457 = &private_intr_config[SMD_DSPS].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700458 if (intr->out_base) {
459 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530460 smd_write_intr(intr->out_bit_pos,
461 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700462 } else {
463 ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530464 MSM_TRIG_A2DSPS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700465 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530466}
467
468static inline void notify_wcnss_smsm(void)
469{
470 static const struct interrupt_config_item *intr
471 = &private_intr_config[SMD_WCNSS].smsm;
472 wakeup_v1_riva();
473
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700474 if (intr->out_base) {
475 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530476 smd_write_intr(intr->out_bit_pos,
477 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700478 } else {
479 ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530480 MSM_TRIG_A2WCNSS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700481 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530482}
483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
485{
486 /* older protocol don't use smsm_intr_mask,
487 but still communicates with modem */
488 if (!smsm_info.intr_mask ||
489 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
490 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530491 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700492
493 if (smsm_info.intr_mask &&
494 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
495 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700496 uint32_t mux_val;
497
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600498 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499 mux_val = __raw_readl(
500 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
501 mux_val++;
502 __raw_writel(mux_val,
503 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
504 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530505 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700506 }
507
508 if (smsm_info.intr_mask &&
509 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
510 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530511 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700512 }
513
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600514 if (smsm_info.intr_mask &&
515 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
516 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530517 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600518 }
519
Eric Holmbergda31d042012-03-28 14:01:02 -0600520 /*
521 * Notify local SMSM callback clients without wakelock since this
522 * code is used by power management during power-down/-up sequencing
523 * on DEM-based targets. Grabbing a wakelock in this case will
524 * abort the power-down sequencing.
525 */
526 smsm_cb_snapshot(0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700527}
528
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700530{
531 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700532 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700533
534 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
535 if (x != 0) {
536 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700537 SMD_INFO("smem: DIAG '%s'\n", x);
538 }
539
540 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
541 if (x != 0) {
542 x[size - 1] = 0;
543 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700544 }
545}
546
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700547
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700548static void handle_modem_crash(void)
549{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700550 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700551 smd_diag();
552
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700553 /* hard reboot if possible FIXME
554 if (msm_reset_hook)
555 msm_reset_hook();
556 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700557
558 /* in this case the modem or watchdog should reboot us */
559 for (;;)
560 ;
561}
562
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700563int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700564{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565 /* if the modem's not ready yet, we have to hope for the best */
566 if (!smsm_info.state)
567 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700568
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700569 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700570 handle_modem_crash();
571 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700572 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700573 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700574}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700575EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700576
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700577/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700578 * irq handler and code that mutates the channel
579 * list or fiddles with channel state
580 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700582DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700583
584/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700585 * operations to avoid races while creating or
586 * destroying smd_channel structures
587 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700588static DEFINE_MUTEX(smd_creation_mutex);
589
590static int smd_initialized;
591
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700592struct smd_shared_v1 {
593 struct smd_half_channel ch0;
594 unsigned char data0[SMD_BUF_SIZE];
595 struct smd_half_channel ch1;
596 unsigned char data1[SMD_BUF_SIZE];
597};
598
599struct smd_shared_v2 {
600 struct smd_half_channel ch0;
601 struct smd_half_channel ch1;
602};
603
604struct smd_channel {
605 volatile struct smd_half_channel *send;
606 volatile struct smd_half_channel *recv;
607 unsigned char *send_data;
608 unsigned char *recv_data;
609 unsigned fifo_size;
610 unsigned fifo_mask;
611 struct list_head ch_list;
612
613 unsigned current_packet;
614 unsigned n;
615 void *priv;
616 void (*notify)(void *priv, unsigned flags);
617
618 int (*read)(smd_channel_t *ch, void *data, int len, int user_buf);
619 int (*write)(smd_channel_t *ch, const void *data, int len,
620 int user_buf);
621 int (*read_avail)(smd_channel_t *ch);
622 int (*write_avail)(smd_channel_t *ch);
623 int (*read_from_cb)(smd_channel_t *ch, void *data, int len,
624 int user_buf);
625
626 void (*update_state)(smd_channel_t *ch);
627 unsigned last_state;
628 void (*notify_other_cpu)(void);
629
630 char name[20];
631 struct platform_device pdev;
632 unsigned type;
633
634 int pending_pkt_sz;
635
636 char is_pkt_ch;
637};
638
639struct edge_to_pid {
640 uint32_t local_pid;
641 uint32_t remote_pid;
Eric Holmberg17992c12012-02-29 12:54:44 -0700642 char subsys_name[SMD_MAX_CH_NAME_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700643};
644
645/**
646 * Maps edge type to local and remote processor ID's.
647 */
648static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700649 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
650 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "q6"},
651 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
652 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
653 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
654 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
655 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
656 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
657 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
658 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
659 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
660 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
661 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
662 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
663 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700664};
665
666struct restart_notifier_block {
667 unsigned processor;
668 char *name;
669 struct notifier_block nb;
670};
671
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -0600672static int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700673static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
674
675static LIST_HEAD(smd_ch_closed_list);
676static LIST_HEAD(smd_ch_closing_list);
677static LIST_HEAD(smd_ch_to_close_list);
678static LIST_HEAD(smd_ch_list_modem);
679static LIST_HEAD(smd_ch_list_dsp);
680static LIST_HEAD(smd_ch_list_dsps);
681static LIST_HEAD(smd_ch_list_wcnss);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700682
683static unsigned char smd_ch_allocated[64];
684static struct work_struct probe_work;
685
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700686static void finalize_channel_close_fn(struct work_struct *work);
687static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
688static struct workqueue_struct *channel_close_wq;
689
690static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
691
692/* on smp systems, the probe might get called from multiple cores,
693 hence use a lock */
694static DEFINE_MUTEX(smd_probe_lock);
695
696static void smd_channel_probe_worker(struct work_struct *work)
697{
698 struct smd_alloc_elm *shared;
699 unsigned n;
700 uint32_t type;
701
702 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
703
704 if (!shared) {
705 pr_err("%s: allocation table not initialized\n", __func__);
706 return;
707 }
708
709 mutex_lock(&smd_probe_lock);
710 for (n = 0; n < 64; n++) {
711 if (smd_ch_allocated[n])
712 continue;
713
714 /* channel should be allocated only if APPS
715 processor is involved */
716 type = SMD_CHANNEL_TYPE(shared[n].type);
717 if ((type != SMD_APPS_MODEM) && (type != SMD_APPS_QDSP) &&
718 (type != SMD_APPS_DSPS) && (type != SMD_APPS_WCNSS))
719 continue;
720 if (!shared[n].ref_count)
721 continue;
722 if (!shared[n].name[0])
723 continue;
724
725 if (!smd_alloc_channel(&shared[n]))
726 smd_ch_allocated[n] = 1;
727 else
728 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
729 }
730 mutex_unlock(&smd_probe_lock);
731}
732
733/**
734 * Lookup processor ID and determine if it belongs to the proved edge
735 * type.
736 *
737 * @shared2: Pointer to v2 shared channel structure
738 * @type: Edge type
739 * @pid: Processor ID of processor on edge
740 * @local_ch: Channel that belongs to processor @pid
741 * @remote_ch: Other side of edge contained @pid
742 *
743 * Returns 0 for not on edge, 1 for found on edge
744 */
745static int pid_is_on_edge(struct smd_shared_v2 *shared2,
746 uint32_t type, uint32_t pid,
747 struct smd_half_channel **local_ch,
748 struct smd_half_channel **remote_ch
749 )
750{
751 int ret = 0;
752 struct edge_to_pid *edge;
753
754 *local_ch = 0;
755 *remote_ch = 0;
756
757 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
758 return 0;
759
760 edge = &edge_to_pids[type];
761 if (edge->local_pid != edge->remote_pid) {
762 if (pid == edge->local_pid) {
763 *local_ch = &shared2->ch0;
764 *remote_ch = &shared2->ch1;
765 ret = 1;
766 } else if (pid == edge->remote_pid) {
767 *local_ch = &shared2->ch1;
768 *remote_ch = &shared2->ch0;
769 ret = 1;
770 }
771 }
772
773 return ret;
774}
775
Eric Holmberg17992c12012-02-29 12:54:44 -0700776/*
777 * Returns a pointer to the subsystem name or NULL if no
778 * subsystem name is available.
779 *
780 * @type - Edge definition
781 */
782const char *smd_edge_to_subsystem(uint32_t type)
783{
784 const char *subsys = NULL;
785
786 if (type < ARRAY_SIZE(edge_to_pids)) {
787 subsys = edge_to_pids[type].subsys_name;
788 if (subsys[0] == 0x0)
789 subsys = NULL;
790 }
791 return subsys;
792}
793EXPORT_SYMBOL(smd_edge_to_subsystem);
794
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700795/*
796 * Returns a pointer to the subsystem name given the
797 * remote processor ID.
798 *
799 * @pid Remote processor ID
800 * @returns Pointer to subsystem name or NULL if not found
801 */
802const char *smd_pid_to_subsystem(uint32_t pid)
803{
804 const char *subsys = NULL;
805 int i;
806
807 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
808 if (pid == edge_to_pids[i].remote_pid &&
809 edge_to_pids[i].subsys_name[0] != 0x0
810 ) {
811 subsys = edge_to_pids[i].subsys_name;
812 break;
813 }
814 }
815
816 return subsys;
817}
818EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700819
Eric Holmberg2a563c32011-10-05 14:51:43 -0600820static void smd_reset_edge(struct smd_half_channel *ch, unsigned new_state)
821{
822 if (ch->state != SMD_SS_CLOSED) {
823 ch->state = new_state;
824 ch->fDSR = 0;
825 ch->fCTS = 0;
826 ch->fCD = 0;
827 ch->fSTATE = 1;
828 }
829}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700830
831static void smd_channel_reset_state(struct smd_alloc_elm *shared,
832 unsigned new_state, unsigned pid)
833{
834 unsigned n;
835 struct smd_shared_v2 *shared2;
836 uint32_t type;
837 struct smd_half_channel *local_ch;
838 struct smd_half_channel *remote_ch;
839
840 for (n = 0; n < SMD_CHANNELS; n++) {
841 if (!shared[n].ref_count)
842 continue;
843 if (!shared[n].name[0])
844 continue;
845
846 type = SMD_CHANNEL_TYPE(shared[n].type);
847 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n, sizeof(*shared2));
848 if (!shared2)
849 continue;
850
Eric Holmberg2a563c32011-10-05 14:51:43 -0600851 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch))
852 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700853
Eric Holmberg2a563c32011-10-05 14:51:43 -0600854 /*
855 * ModemFW is in the same subsystem as ModemSW, but has
856 * separate SMD edges that need to be reset.
857 */
858 if (pid == SMSM_MODEM &&
859 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
860 &local_ch, &remote_ch))
861 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700862 }
863}
864
865
866void smd_channel_reset(uint32_t restart_pid)
867{
868 struct smd_alloc_elm *shared;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700869 unsigned long flags;
870
871 SMD_DBG("%s: starting reset\n", __func__);
872 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
873 if (!shared) {
874 pr_err("%s: allocation table not initialized\n", __func__);
875 return;
876 }
877
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600878 /* release any held spinlocks */
879 remote_spin_release(&remote_spinlock, restart_pid);
880 remote_spin_release_all(restart_pid);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700881
882 /* reset SMSM entry */
883 if (smsm_info.state) {
884 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
885
Eric Holmberg351a63c2011-12-02 17:49:43 -0700886 /* restart SMSM init handshake */
887 if (restart_pid == SMSM_MODEM) {
888 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -0700889 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
890 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -0700891 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700892
893 /* notify SMSM processors */
894 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -0700895 notify_modem_smsm();
896 notify_dsp_smsm();
897 notify_dsps_smsm();
898 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899 }
900
901 /* change all remote states to CLOSING */
902 mutex_lock(&smd_probe_lock);
903 spin_lock_irqsave(&smd_lock, flags);
904 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
905 spin_unlock_irqrestore(&smd_lock, flags);
906 mutex_unlock(&smd_probe_lock);
907
908 /* notify SMD processors */
909 mb();
910 smd_fake_irq_handler(0);
911 notify_modem_smd();
912 notify_dsp_smd();
913 notify_dsps_smd();
914 notify_wcnss_smd();
915
916 /* change all remote states to CLOSED */
917 mutex_lock(&smd_probe_lock);
918 spin_lock_irqsave(&smd_lock, flags);
919 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
920 spin_unlock_irqrestore(&smd_lock, flags);
921 mutex_unlock(&smd_probe_lock);
922
923 /* notify SMD processors */
924 mb();
925 smd_fake_irq_handler(0);
926 notify_modem_smd();
927 notify_dsp_smd();
928 notify_dsps_smd();
929 notify_wcnss_smd();
930
931 SMD_DBG("%s: finished reset\n", __func__);
932}
933
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700934/* how many bytes are available for reading */
935static int smd_stream_read_avail(struct smd_channel *ch)
936{
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700937 return (ch->recv->head - ch->recv->tail) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700938}
939
940/* how many bytes we are free to write */
941static int smd_stream_write_avail(struct smd_channel *ch)
942{
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700943 return ch->fifo_mask -
944 ((ch->send->head - ch->send->tail) & ch->fifo_mask);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700945}
946
947static int smd_packet_read_avail(struct smd_channel *ch)
948{
949 if (ch->current_packet) {
950 int n = smd_stream_read_avail(ch);
951 if (n > ch->current_packet)
952 n = ch->current_packet;
953 return n;
954 } else {
955 return 0;
956 }
957}
958
959static int smd_packet_write_avail(struct smd_channel *ch)
960{
961 int n = smd_stream_write_avail(ch);
962 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
963}
964
965static int ch_is_open(struct smd_channel *ch)
966{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700967 return (ch->recv->state == SMD_SS_OPENED ||
968 ch->recv->state == SMD_SS_FLUSHING)
969 && (ch->send->state == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700970}
971
972/* provide a pointer and length to readable data in the fifo */
973static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
974{
975 unsigned head = ch->recv->head;
976 unsigned tail = ch->recv->tail;
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700977 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700978
979 if (tail <= head)
980 return head - tail;
981 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700982 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700983}
984
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700985static int read_intr_blocked(struct smd_channel *ch)
986{
987 return ch->recv->fBLOCKREADINTR;
988}
989
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700990/* advance the fifo read pointer after data from ch_read_buffer is consumed */
991static void ch_read_done(struct smd_channel *ch, unsigned count)
992{
993 BUG_ON(count > smd_stream_read_avail(ch));
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700994 ch->recv->tail = (ch->recv->tail + count) & ch->fifo_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700995 wmb();
Haley Teng7632fba2009-10-12 10:38:10 -0700996 ch->send->fTAIL = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700997}
998
999/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -07001000 * by smd_*_read() and update_packet_state()
1001 * will read-and-discard if the _data pointer is null
1002 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001003static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001004{
1005 void *ptr;
1006 unsigned n;
1007 unsigned char *data = _data;
1008 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001009 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001010
1011 while (len > 0) {
1012 n = ch_read_buffer(ch, &ptr);
1013 if (n == 0)
1014 break;
1015
1016 if (n > len)
1017 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001018 if (_data) {
1019 if (user_buf) {
1020 r = copy_to_user(data, ptr, n);
1021 if (r > 0) {
1022 pr_err("%s: "
1023 "copy_to_user could not copy "
1024 "%i bytes.\n",
1025 __func__,
1026 r);
1027 }
1028 } else
1029 memcpy(data, ptr, n);
1030 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001031
1032 data += n;
1033 len -= n;
1034 ch_read_done(ch, n);
1035 }
1036
1037 return orig_len - len;
1038}
1039
1040static void update_stream_state(struct smd_channel *ch)
1041{
1042 /* streams have no special state requiring updating */
1043}
1044
1045static void update_packet_state(struct smd_channel *ch)
1046{
1047 unsigned hdr[5];
1048 int r;
1049
1050 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001051 while (ch->current_packet == 0) {
1052 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001053
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001054 /* don't bother unless we can get the full header */
1055 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1056 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001057
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001058 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1059 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001060
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001061 ch->current_packet = hdr[0];
1062 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001063}
1064
1065/* provide a pointer and length to next free space in the fifo */
1066static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1067{
1068 unsigned head = ch->send->head;
1069 unsigned tail = ch->send->tail;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001070 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001071
1072 if (head < tail) {
1073 return tail - head - 1;
1074 } else {
1075 if (tail == 0)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001076 return ch->fifo_size - head - 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001077 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001078 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001079 }
1080}
1081
1082/* advace the fifo write pointer after freespace
1083 * from ch_write_buffer is filled
1084 */
1085static void ch_write_done(struct smd_channel *ch, unsigned count)
1086{
1087 BUG_ON(count > smd_stream_write_avail(ch));
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001088 ch->send->head = (ch->send->head + count) & ch->fifo_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001089 wmb();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001090 ch->send->fHEAD = 1;
1091}
1092
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001093static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001094{
1095 if (n == SMD_SS_OPENED) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001096 ch->send->fDSR = 1;
1097 ch->send->fCTS = 1;
1098 ch->send->fCD = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001099 } else {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001100 ch->send->fDSR = 0;
1101 ch->send->fCTS = 0;
1102 ch->send->fCD = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001103 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001104 ch->send->state = n;
1105 ch->send->fSTATE = 1;
1106 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001107}
1108
1109static void do_smd_probe(void)
1110{
1111 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1112 if (shared->heap_info.free_offset != last_heap_free) {
1113 last_heap_free = shared->heap_info.free_offset;
1114 schedule_work(&probe_work);
1115 }
1116}
1117
1118static void smd_state_change(struct smd_channel *ch,
1119 unsigned last, unsigned next)
1120{
1121 ch->last_state = next;
1122
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001123 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001124
1125 switch (next) {
1126 case SMD_SS_OPENING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001127 if (ch->send->state == SMD_SS_CLOSING ||
1128 ch->send->state == SMD_SS_CLOSED) {
1129 ch->recv->tail = 0;
1130 ch->send->head = 0;
1131 ch->send->fBLOCKREADINTR = 0;
1132 ch_set_state(ch, SMD_SS_OPENING);
1133 }
1134 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001135 case SMD_SS_OPENED:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001136 if (ch->send->state == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001137 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001138 ch->notify(ch->priv, SMD_EVENT_OPEN);
1139 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001140 break;
1141 case SMD_SS_FLUSHING:
1142 case SMD_SS_RESET:
1143 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001144 break;
1145 case SMD_SS_CLOSED:
1146 if (ch->send->state == SMD_SS_OPENED) {
1147 ch_set_state(ch, SMD_SS_CLOSING);
1148 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001149 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001150 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1151 }
1152 break;
1153 case SMD_SS_CLOSING:
1154 if (ch->send->state == SMD_SS_CLOSED) {
1155 list_move(&ch->ch_list,
1156 &smd_ch_to_close_list);
1157 queue_work(channel_close_wq,
1158 &finalize_channel_close_work);
1159 }
1160 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001161 }
1162}
1163
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001164static void handle_smd_irq_closing_list(void)
1165{
1166 unsigned long flags;
1167 struct smd_channel *ch;
1168 struct smd_channel *index;
1169 unsigned tmp;
1170
1171 spin_lock_irqsave(&smd_lock, flags);
1172 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
1173 if (ch->recv->fSTATE)
1174 ch->recv->fSTATE = 0;
1175 tmp = ch->recv->state;
1176 if (tmp != ch->last_state)
1177 smd_state_change(ch, ch->last_state, tmp);
1178 }
1179 spin_unlock_irqrestore(&smd_lock, flags);
1180}
1181
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001182static void handle_smd_irq(struct list_head *list, void (*notify)(void))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001183{
1184 unsigned long flags;
1185 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001186 unsigned ch_flags;
1187 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001188 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001189
1190 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001191 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001192 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001193 ch_flags = 0;
1194 if (ch_is_open(ch)) {
1195 if (ch->recv->fHEAD) {
1196 ch->recv->fHEAD = 0;
1197 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001198 }
1199 if (ch->recv->fTAIL) {
1200 ch->recv->fTAIL = 0;
1201 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001202 }
1203 if (ch->recv->fSTATE) {
1204 ch->recv->fSTATE = 0;
1205 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001206 }
1207 }
1208 tmp = ch->recv->state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001209 if (tmp != ch->last_state) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07001210 SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
1211 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001212 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001213 state_change = 1;
1214 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001215 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001216 ch->update_state(ch);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001217 SMx_POWER_INFO("SMD ch%d '%s' Data event r%d/w%d\n",
1218 ch->n, ch->name,
1219 ch->read_avail(ch),
1220 ch->fifo_size - ch->write_avail(ch));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001221 ch->notify(ch->priv, SMD_EVENT_DATA);
1222 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001223 if (ch_flags & 0x4 && !state_change) {
1224 SMx_POWER_INFO("SMD ch%d '%s' State update\n",
1225 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001226 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001227 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001228 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001229 spin_unlock_irqrestore(&smd_lock, flags);
1230 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001231}
1232
Brian Swetland37521a32009-07-01 18:30:47 -07001233static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001234{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001235 SMx_POWER_INFO("SMD Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001236 ++interrupt_stats[SMD_MODEM].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001237 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001238 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001239 return IRQ_HANDLED;
1240}
1241
1242static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
1243{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001244 SMx_POWER_INFO("SMD Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001245 ++interrupt_stats[SMD_Q6].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001246 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001247 handle_smd_irq_closing_list();
1248 return IRQ_HANDLED;
1249}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001250
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001251static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
1252{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001253 SMx_POWER_INFO("SMD Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001254 ++interrupt_stats[SMD_DSPS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001255 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1256 handle_smd_irq_closing_list();
1257 return IRQ_HANDLED;
1258}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001260static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1261{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001262 SMx_POWER_INFO("SMD Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001263 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001264 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1265 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001266 return IRQ_HANDLED;
1267}
1268
1269static void smd_fake_irq_handler(unsigned long arg)
1270{
Brian Swetland37521a32009-07-01 18:30:47 -07001271 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1272 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001273 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1274 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1275 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001276}
1277
1278static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1279
Brian Swetland37521a32009-07-01 18:30:47 -07001280static inline int smd_need_int(struct smd_channel *ch)
1281{
1282 if (ch_is_open(ch)) {
1283 if (ch->recv->fHEAD || ch->recv->fTAIL || ch->recv->fSTATE)
1284 return 1;
1285 if (ch->recv->state != ch->last_state)
1286 return 1;
1287 }
1288 return 0;
1289}
1290
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001291void smd_sleep_exit(void)
1292{
1293 unsigned long flags;
1294 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001295 int need_int = 0;
1296
1297 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001298 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1299 if (smd_need_int(ch)) {
1300 need_int = 1;
1301 break;
1302 }
1303 }
1304 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1305 if (smd_need_int(ch)) {
1306 need_int = 1;
1307 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001308 }
1309 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001310 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1311 if (smd_need_int(ch)) {
1312 need_int = 1;
1313 break;
1314 }
1315 }
1316 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1317 if (smd_need_int(ch)) {
1318 need_int = 1;
1319 break;
1320 }
1321 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001322 spin_unlock_irqrestore(&smd_lock, flags);
1323 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001324
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001325 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001326 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001327 tasklet_schedule(&smd_fake_irq_tasklet);
1328 }
1329}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001330EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001331
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001332static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001333{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001334 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1335 return 0;
1336 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001337 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001338
1339 /* for cases where xfer type is 0 */
1340 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001341 return 0;
1342
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001343 /* for cases where xfer type is 0 */
1344 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1345 return 0;
1346
1347 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001348 return 1;
1349 else
1350 return 0;
1351}
1352
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001353static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1354 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001355{
1356 void *ptr;
1357 const unsigned char *buf = _data;
1358 unsigned xfer;
1359 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001360 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001361
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001362 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001363 if (len < 0)
1364 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001365 else if (len == 0)
1366 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001367
1368 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001369 if (!ch_is_open(ch)) {
1370 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001371 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001372 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001373 if (xfer > len)
1374 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001375 if (user_buf) {
1376 r = copy_from_user(ptr, buf, xfer);
1377 if (r > 0) {
1378 pr_err("%s: "
1379 "copy_from_user could not copy %i "
1380 "bytes.\n",
1381 __func__,
1382 r);
1383 }
1384 } else
1385 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001386 ch_write_done(ch, xfer);
1387 len -= xfer;
1388 buf += xfer;
1389 if (len == 0)
1390 break;
1391 }
1392
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001393 if (orig_len - len)
1394 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001395
1396 return orig_len - len;
1397}
1398
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001399static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1400 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001401{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001402 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001403 unsigned hdr[5];
1404
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001405 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001406 if (len < 0)
1407 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001408 else if (len == 0)
1409 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001410
1411 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1412 return -ENOMEM;
1413
1414 hdr[0] = len;
1415 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1416
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001417
1418 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1419 if (ret < 0 || ret != sizeof(hdr)) {
1420 SMD_DBG("%s failed to write pkt header: "
1421 "%d returned\n", __func__, ret);
1422 return -1;
1423 }
1424
1425
1426 ret = smd_stream_write(ch, _data, len, user_buf);
1427 if (ret < 0 || ret != len) {
1428 SMD_DBG("%s failed to write pkt data: "
1429 "%d returned\n", __func__, ret);
1430 return ret;
1431 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001432
1433 return len;
1434}
1435
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001436static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001437{
1438 int r;
1439
1440 if (len < 0)
1441 return -EINVAL;
1442
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001443 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001444 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001445 if (!read_intr_blocked(ch))
1446 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001447
1448 return r;
1449}
1450
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001451static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001452{
1453 unsigned long flags;
1454 int r;
1455
1456 if (len < 0)
1457 return -EINVAL;
1458
1459 if (len > ch->current_packet)
1460 len = ch->current_packet;
1461
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001462 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001463 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001464 if (!read_intr_blocked(ch))
1465 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001466
1467 spin_lock_irqsave(&smd_lock, flags);
1468 ch->current_packet -= r;
1469 update_packet_state(ch);
1470 spin_unlock_irqrestore(&smd_lock, flags);
1471
1472 return r;
1473}
1474
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001475static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1476 int user_buf)
1477{
1478 int r;
1479
1480 if (len < 0)
1481 return -EINVAL;
1482
1483 if (len > ch->current_packet)
1484 len = ch->current_packet;
1485
1486 r = ch_read(ch, data, len, user_buf);
1487 if (r > 0)
1488 if (!read_intr_blocked(ch))
1489 ch->notify_other_cpu();
1490
1491 ch->current_packet -= r;
1492 update_packet_state(ch);
1493
1494 return r;
1495}
1496
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301497#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001498static int smd_alloc_v2(struct smd_channel *ch)
1499{
1500 struct smd_shared_v2 *shared2;
1501 void *buffer;
1502 unsigned buffer_sz;
1503
1504 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n, sizeof(*shared2));
1505 if (!shared2) {
1506 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301507 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001508 }
1509 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1510 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301511 SMD_INFO("smem_get_entry failed\n");
1512 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001513 }
1514
1515 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301516 if (buffer_sz & (buffer_sz - 1)) {
1517 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1518 return -EINVAL;
1519 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001520 buffer_sz /= 2;
1521 ch->send = &shared2->ch0;
1522 ch->recv = &shared2->ch1;
1523 ch->send_data = buffer;
1524 ch->recv_data = buffer + buffer_sz;
1525 ch->fifo_size = buffer_sz;
1526 return 0;
1527}
1528
1529static int smd_alloc_v1(struct smd_channel *ch)
1530{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301531 return -EINVAL;
1532}
1533
1534#else /* define v1 for older targets */
1535static int smd_alloc_v2(struct smd_channel *ch)
1536{
1537 return -EINVAL;
1538}
1539
1540static int smd_alloc_v1(struct smd_channel *ch)
1541{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001542 struct smd_shared_v1 *shared1;
1543 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1544 if (!shared1) {
1545 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301546 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001547 }
1548 ch->send = &shared1->ch0;
1549 ch->recv = &shared1->ch1;
1550 ch->send_data = shared1->data0;
1551 ch->recv_data = shared1->data1;
1552 ch->fifo_size = SMD_BUF_SIZE;
1553 return 0;
1554}
1555
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301556#endif
1557
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001558static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001559{
1560 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001561
1562 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1563 if (ch == 0) {
1564 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001565 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001566 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001567 ch->n = alloc_elm->cid;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001568
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001569 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001570 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001571 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001572 }
1573
1574 ch->fifo_mask = ch->fifo_size - 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001575 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001576
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001577 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001578 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001579 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001580 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001581 else if (ch->type == SMD_APPS_DSPS)
1582 ch->notify_other_cpu = notify_dsps_smd;
1583 else
1584 ch->notify_other_cpu = notify_wcnss_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001585
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001586 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001587 ch->read = smd_packet_read;
1588 ch->write = smd_packet_write;
1589 ch->read_avail = smd_packet_read_avail;
1590 ch->write_avail = smd_packet_write_avail;
1591 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001592 ch->read_from_cb = smd_packet_read_from_cb;
1593 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001594 } else {
1595 ch->read = smd_stream_read;
1596 ch->write = smd_stream_write;
1597 ch->read_avail = smd_stream_read_avail;
1598 ch->write_avail = smd_stream_write_avail;
1599 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001600 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001601 }
1602
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001603 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1604 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001605
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001606 ch->pdev.name = ch->name;
1607 ch->pdev.id = ch->type;
1608
1609 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1610 ch->name, ch->n);
1611
1612 mutex_lock(&smd_creation_mutex);
1613 list_add(&ch->ch_list, &smd_ch_closed_list);
1614 mutex_unlock(&smd_creation_mutex);
1615
1616 platform_device_register(&ch->pdev);
1617 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1618 /* create a platform driver to be used by smd_tty driver
1619 * so that it can access the loopback port
1620 */
1621 loopback_tty_pdev.id = ch->type;
1622 platform_device_register(&loopback_tty_pdev);
1623 }
1624 return 0;
1625}
1626
1627static inline void notify_loopback_smd(void)
1628{
1629 unsigned long flags;
1630 struct smd_channel *ch;
1631
1632 spin_lock_irqsave(&smd_lock, flags);
1633 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1634 ch->notify(ch->priv, SMD_EVENT_DATA);
1635 }
1636 spin_unlock_irqrestore(&smd_lock, flags);
1637}
1638
1639static int smd_alloc_loopback_channel(void)
1640{
1641 static struct smd_half_channel smd_loopback_ctl;
1642 static char smd_loopback_data[SMD_BUF_SIZE];
1643 struct smd_channel *ch;
1644
1645 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1646 if (ch == 0) {
1647 pr_err("%s: out of memory\n", __func__);
1648 return -1;
1649 }
1650 ch->n = SMD_LOOPBACK_CID;
1651
1652 ch->send = &smd_loopback_ctl;
1653 ch->recv = &smd_loopback_ctl;
1654 ch->send_data = smd_loopback_data;
1655 ch->recv_data = smd_loopback_data;
1656 ch->fifo_size = SMD_BUF_SIZE;
1657
1658 ch->fifo_mask = ch->fifo_size - 1;
1659 ch->type = SMD_LOOPBACK_TYPE;
1660 ch->notify_other_cpu = notify_loopback_smd;
1661
1662 ch->read = smd_stream_read;
1663 ch->write = smd_stream_write;
1664 ch->read_avail = smd_stream_read_avail;
1665 ch->write_avail = smd_stream_write_avail;
1666 ch->update_state = update_stream_state;
1667 ch->read_from_cb = smd_stream_read;
1668
1669 memset(ch->name, 0, 20);
1670 memcpy(ch->name, "local_loopback", 14);
1671
1672 ch->pdev.name = ch->name;
1673 ch->pdev.id = ch->type;
1674
1675 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001676
1677 mutex_lock(&smd_creation_mutex);
1678 list_add(&ch->ch_list, &smd_ch_closed_list);
1679 mutex_unlock(&smd_creation_mutex);
1680
1681 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001682 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001683}
1684
1685static void do_nothing_notify(void *priv, unsigned flags)
1686{
1687}
1688
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001689static void finalize_channel_close_fn(struct work_struct *work)
1690{
1691 unsigned long flags;
1692 struct smd_channel *ch;
1693 struct smd_channel *index;
1694
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001695 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001696 spin_lock_irqsave(&smd_lock, flags);
1697 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1698 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001699 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001700 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1701 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001702 }
1703 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001704 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001705}
1706
1707struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001708{
1709 struct smd_channel *ch;
1710
1711 mutex_lock(&smd_creation_mutex);
1712 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001713 if (!strcmp(name, ch->name) &&
1714 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001715 list_del(&ch->ch_list);
1716 mutex_unlock(&smd_creation_mutex);
1717 return ch;
1718 }
1719 }
1720 mutex_unlock(&smd_creation_mutex);
1721
1722 return NULL;
1723}
1724
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001725int smd_named_open_on_edge(const char *name, uint32_t edge,
1726 smd_channel_t **_ch,
1727 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001728{
1729 struct smd_channel *ch;
1730 unsigned long flags;
1731
1732 if (smd_initialized == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001733 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001734 return -ENODEV;
1735 }
1736
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001737 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1738
1739 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001740 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001741 /* check closing list for port */
1742 spin_lock_irqsave(&smd_lock, flags);
1743 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1744 if (!strncmp(name, ch->name, 20) &&
1745 (edge == ch->type)) {
1746 /* channel exists, but is being closed */
1747 spin_unlock_irqrestore(&smd_lock, flags);
1748 return -EAGAIN;
1749 }
1750 }
1751
1752 /* check closing workqueue list for port */
1753 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1754 if (!strncmp(name, ch->name, 20) &&
1755 (edge == ch->type)) {
1756 /* channel exists, but is being closed */
1757 spin_unlock_irqrestore(&smd_lock, flags);
1758 return -EAGAIN;
1759 }
1760 }
1761 spin_unlock_irqrestore(&smd_lock, flags);
1762
1763 /* one final check to handle closing->closed race condition */
1764 ch = smd_get_channel(name, edge);
1765 if (!ch)
1766 return -ENODEV;
1767 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001768
1769 if (notify == 0)
1770 notify = do_nothing_notify;
1771
1772 ch->notify = notify;
1773 ch->current_packet = 0;
1774 ch->last_state = SMD_SS_CLOSED;
1775 ch->priv = priv;
1776
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001777 if (edge == SMD_LOOPBACK_TYPE) {
1778 ch->last_state = SMD_SS_OPENED;
1779 ch->send->state = SMD_SS_OPENED;
1780 ch->send->fDSR = 1;
1781 ch->send->fCTS = 1;
1782 ch->send->fCD = 1;
1783 }
1784
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001785 *_ch = ch;
1786
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001787 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1788
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001789 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001790 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001791 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001792 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001793 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001794 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1795 list_add(&ch->ch_list, &smd_ch_list_dsps);
1796 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1797 list_add(&ch->ch_list, &smd_ch_list_wcnss);
1798 else
1799 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001800
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001801 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1802
1803 if (edge != SMD_LOOPBACK_TYPE)
1804 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1805
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001806 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001807
1808 return 0;
1809}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001810EXPORT_SYMBOL(smd_named_open_on_edge);
1811
1812
1813int smd_open(const char *name, smd_channel_t **_ch,
1814 void *priv, void (*notify)(void *, unsigned))
1815{
1816 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
1817 notify);
1818}
1819EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001820
1821int smd_close(smd_channel_t *ch)
1822{
1823 unsigned long flags;
1824
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001825 if (ch == 0)
1826 return -1;
1827
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001828 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001829
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001830 spin_lock_irqsave(&smd_lock, flags);
1831 list_del(&ch->ch_list);
1832 if (ch->n == SMD_LOOPBACK_CID) {
1833 ch->send->fDSR = 0;
1834 ch->send->fCTS = 0;
1835 ch->send->fCD = 0;
1836 ch->send->state = SMD_SS_CLOSED;
1837 } else
1838 ch_set_state(ch, SMD_SS_CLOSED);
1839
1840 if (ch->recv->state == SMD_SS_OPENED) {
1841 list_add(&ch->ch_list, &smd_ch_closing_list);
1842 spin_unlock_irqrestore(&smd_lock, flags);
1843 } else {
1844 spin_unlock_irqrestore(&smd_lock, flags);
1845 ch->notify = do_nothing_notify;
1846 mutex_lock(&smd_creation_mutex);
1847 list_add(&ch->ch_list, &smd_ch_closed_list);
1848 mutex_unlock(&smd_creation_mutex);
1849 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001850
1851 return 0;
1852}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001853EXPORT_SYMBOL(smd_close);
1854
1855int smd_write_start(smd_channel_t *ch, int len)
1856{
1857 int ret;
1858 unsigned hdr[5];
1859
1860 if (!ch) {
1861 pr_err("%s: Invalid channel specified\n", __func__);
1862 return -ENODEV;
1863 }
1864 if (!ch->is_pkt_ch) {
1865 pr_err("%s: non-packet channel specified\n", __func__);
1866 return -EACCES;
1867 }
1868 if (len < 1) {
1869 pr_err("%s: invalid length: %d\n", __func__, len);
1870 return -EINVAL;
1871 }
1872
1873 if (ch->pending_pkt_sz) {
1874 pr_err("%s: packet of size: %d in progress\n", __func__,
1875 ch->pending_pkt_sz);
1876 return -EBUSY;
1877 }
1878 ch->pending_pkt_sz = len;
1879
1880 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
1881 ch->pending_pkt_sz = 0;
1882 SMD_DBG("%s: no space to write packet header\n", __func__);
1883 return -EAGAIN;
1884 }
1885
1886 hdr[0] = len;
1887 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1888
1889
1890 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1891 if (ret < 0 || ret != sizeof(hdr)) {
1892 ch->pending_pkt_sz = 0;
1893 pr_err("%s: packet header failed to write\n", __func__);
1894 return -EPERM;
1895 }
1896 return 0;
1897}
1898EXPORT_SYMBOL(smd_write_start);
1899
1900int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
1901{
1902 int bytes_written;
1903
1904 if (!ch) {
1905 pr_err("%s: Invalid channel specified\n", __func__);
1906 return -ENODEV;
1907 }
1908 if (len < 1) {
1909 pr_err("%s: invalid length: %d\n", __func__, len);
1910 return -EINVAL;
1911 }
1912
1913 if (!ch->pending_pkt_sz) {
1914 pr_err("%s: no transaction in progress\n", __func__);
1915 return -ENOEXEC;
1916 }
1917 if (ch->pending_pkt_sz - len < 0) {
1918 pr_err("%s: segment of size: %d will make packet go over "
1919 "length\n", __func__, len);
1920 return -EINVAL;
1921 }
1922
1923 bytes_written = smd_stream_write(ch, data, len, user_buf);
1924
1925 ch->pending_pkt_sz -= bytes_written;
1926
1927 return bytes_written;
1928}
1929EXPORT_SYMBOL(smd_write_segment);
1930
1931int smd_write_end(smd_channel_t *ch)
1932{
1933
1934 if (!ch) {
1935 pr_err("%s: Invalid channel specified\n", __func__);
1936 return -ENODEV;
1937 }
1938 if (ch->pending_pkt_sz) {
1939 pr_err("%s: current packet not completely written\n", __func__);
1940 return -E2BIG;
1941 }
1942
1943 return 0;
1944}
1945EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001946
1947int smd_read(smd_channel_t *ch, void *data, int len)
1948{
Jack Pham1b236d12012-03-19 15:27:18 -07001949 if (!ch) {
1950 pr_err("%s: Invalid channel specified\n", __func__);
1951 return -ENODEV;
1952 }
1953
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001954 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001955}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001956EXPORT_SYMBOL(smd_read);
1957
1958int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
1959{
Jack Pham1b236d12012-03-19 15:27:18 -07001960 if (!ch) {
1961 pr_err("%s: Invalid channel specified\n", __func__);
1962 return -ENODEV;
1963 }
1964
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001965 return ch->read(ch, data, len, 1);
1966}
1967EXPORT_SYMBOL(smd_read_user_buffer);
1968
1969int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
1970{
Jack Pham1b236d12012-03-19 15:27:18 -07001971 if (!ch) {
1972 pr_err("%s: Invalid channel specified\n", __func__);
1973 return -ENODEV;
1974 }
1975
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001976 return ch->read_from_cb(ch, data, len, 0);
1977}
1978EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001979
1980int smd_write(smd_channel_t *ch, const void *data, int len)
1981{
Jack Pham1b236d12012-03-19 15:27:18 -07001982 if (!ch) {
1983 pr_err("%s: Invalid channel specified\n", __func__);
1984 return -ENODEV;
1985 }
1986
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001987 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001988}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001989EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001990
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001991int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08001992{
Jack Pham1b236d12012-03-19 15:27:18 -07001993 if (!ch) {
1994 pr_err("%s: Invalid channel specified\n", __func__);
1995 return -ENODEV;
1996 }
1997
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001998 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08001999}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002000EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002001
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002002int smd_read_avail(smd_channel_t *ch)
2003{
Jack Pham1b236d12012-03-19 15:27:18 -07002004 if (!ch) {
2005 pr_err("%s: Invalid channel specified\n", __func__);
2006 return -ENODEV;
2007 }
2008
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002009 return ch->read_avail(ch);
2010}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002011EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002012
2013int smd_write_avail(smd_channel_t *ch)
2014{
Jack Pham1b236d12012-03-19 15:27:18 -07002015 if (!ch) {
2016 pr_err("%s: Invalid channel specified\n", __func__);
2017 return -ENODEV;
2018 }
2019
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002020 return ch->write_avail(ch);
2021}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002022EXPORT_SYMBOL(smd_write_avail);
2023
2024void smd_enable_read_intr(smd_channel_t *ch)
2025{
2026 if (ch)
2027 ch->send->fBLOCKREADINTR = 0;
2028}
2029EXPORT_SYMBOL(smd_enable_read_intr);
2030
2031void smd_disable_read_intr(smd_channel_t *ch)
2032{
2033 if (ch)
2034 ch->send->fBLOCKREADINTR = 1;
2035}
2036EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002037
2038int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2039{
2040 return -1;
2041}
2042
2043int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2044{
2045 return -1;
2046}
2047
2048int smd_cur_packet_size(smd_channel_t *ch)
2049{
Jack Pham1b236d12012-03-19 15:27:18 -07002050 if (!ch) {
2051 pr_err("%s: Invalid channel specified\n", __func__);
2052 return -ENODEV;
2053 }
2054
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002055 return ch->current_packet;
2056}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002057EXPORT_SYMBOL(smd_cur_packet_size);
2058
2059int smd_tiocmget(smd_channel_t *ch)
2060{
Jack Pham1b236d12012-03-19 15:27:18 -07002061 if (!ch) {
2062 pr_err("%s: Invalid channel specified\n", __func__);
2063 return -ENODEV;
2064 }
2065
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002066 return (ch->recv->fDSR ? TIOCM_DSR : 0) |
2067 (ch->recv->fCTS ? TIOCM_CTS : 0) |
2068 (ch->recv->fCD ? TIOCM_CD : 0) |
2069 (ch->recv->fRI ? TIOCM_RI : 0) |
2070 (ch->send->fCTS ? TIOCM_RTS : 0) |
2071 (ch->send->fDSR ? TIOCM_DTR : 0);
2072}
2073EXPORT_SYMBOL(smd_tiocmget);
2074
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002075/* this api will be called while holding smd_lock */
2076int
2077smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002078{
Jack Pham1b236d12012-03-19 15:27:18 -07002079 if (!ch) {
2080 pr_err("%s: Invalid channel specified\n", __func__);
2081 return -ENODEV;
2082 }
2083
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002084 if (set & TIOCM_DTR)
2085 ch->send->fDSR = 1;
2086
2087 if (set & TIOCM_RTS)
2088 ch->send->fCTS = 1;
2089
2090 if (clear & TIOCM_DTR)
2091 ch->send->fDSR = 0;
2092
2093 if (clear & TIOCM_RTS)
2094 ch->send->fCTS = 0;
2095
2096 ch->send->fSTATE = 1;
2097 barrier();
2098 ch->notify_other_cpu();
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002099
2100 return 0;
2101}
2102EXPORT_SYMBOL(smd_tiocmset_from_cb);
2103
2104int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2105{
2106 unsigned long flags;
2107
Jack Pham1b236d12012-03-19 15:27:18 -07002108 if (!ch) {
2109 pr_err("%s: Invalid channel specified\n", __func__);
2110 return -ENODEV;
2111 }
2112
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002113 spin_lock_irqsave(&smd_lock, flags);
2114 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002115 spin_unlock_irqrestore(&smd_lock, flags);
2116
2117 return 0;
2118}
2119EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002120
2121
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002122/* -------------------------------------------------------------------------- */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002123
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002124/* smem_alloc returns the pointer to smem item if it is already allocated.
2125 * Otherwise, it returns NULL.
2126 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002127void *smem_alloc(unsigned id, unsigned size)
2128{
2129 return smem_find(id, size);
2130}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002131EXPORT_SYMBOL(smem_alloc);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002132
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002133/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
2134 * it allocates it and then returns the pointer to it.
2135 */
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302136void *smem_alloc2(unsigned id, unsigned size_in)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002137{
2138 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2139 struct smem_heap_entry *toc = shared->heap_toc;
2140 unsigned long flags;
2141 void *ret = NULL;
2142
2143 if (!shared->heap_info.initialized) {
2144 pr_err("%s: smem heap info not initialized\n", __func__);
2145 return NULL;
2146 }
2147
2148 if (id >= SMEM_NUM_ITEMS)
2149 return NULL;
2150
2151 size_in = ALIGN(size_in, 8);
2152 remote_spin_lock_irqsave(&remote_spinlock, flags);
2153 if (toc[id].allocated) {
2154 SMD_DBG("%s: %u already allocated\n", __func__, id);
2155 if (size_in != toc[id].size)
2156 pr_err("%s: wrong size %u (expected %u)\n",
2157 __func__, toc[id].size, size_in);
2158 else
2159 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2160 } else if (id > SMEM_FIXED_ITEM_LAST) {
2161 SMD_DBG("%s: allocating %u\n", __func__, id);
2162 if (shared->heap_info.heap_remaining >= size_in) {
2163 toc[id].offset = shared->heap_info.free_offset;
2164 toc[id].size = size_in;
2165 wmb();
2166 toc[id].allocated = 1;
2167
2168 shared->heap_info.free_offset += size_in;
2169 shared->heap_info.heap_remaining -= size_in;
2170 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2171 } else
2172 pr_err("%s: not enough memory %u (required %u)\n",
2173 __func__, shared->heap_info.heap_remaining,
2174 size_in);
2175 }
2176 wmb();
2177 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
2178 return ret;
2179}
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302180EXPORT_SYMBOL(smem_alloc2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002181
2182void *smem_get_entry(unsigned id, unsigned *size)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002183{
2184 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2185 struct smem_heap_entry *toc = shared->heap_toc;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302186 int use_spinlocks = spinlocks_initialized;
2187 void *ret = 0;
2188 unsigned long flags = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002189
2190 if (id >= SMEM_NUM_ITEMS)
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302191 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002192
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302193 if (use_spinlocks)
2194 remote_spin_lock_irqsave(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002195 /* toc is in device memory and cannot be speculatively accessed */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002196 if (toc[id].allocated) {
2197 *size = toc[id].size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002198 barrier();
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302199 ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002200 } else {
2201 *size = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002202 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302203 if (use_spinlocks)
2204 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002205
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302206 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002207}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002208EXPORT_SYMBOL(smem_get_entry);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002209
2210void *smem_find(unsigned id, unsigned size_in)
2211{
2212 unsigned size;
2213 void *ptr;
2214
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002215 ptr = smem_get_entry(id, &size);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002216 if (!ptr)
2217 return 0;
2218
2219 size_in = ALIGN(size_in, 8);
2220 if (size_in != size) {
2221 pr_err("smem_find(%d, %d): wrong size %d\n",
2222 id, size_in, size);
2223 return 0;
2224 }
2225
2226 return ptr;
2227}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002228EXPORT_SYMBOL(smem_find);
2229
2230static int smsm_cb_init(void)
2231{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002232 struct smsm_state_info *state_info;
2233 int n;
2234 int ret = 0;
2235
2236 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2237 GFP_KERNEL);
2238
2239 if (!smsm_states) {
2240 pr_err("%s: SMSM init failed\n", __func__);
2241 return -ENOMEM;
2242 }
2243
Eric Holmbergc8002902011-09-16 13:55:57 -06002244 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002245 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2246 state_info = &smsm_states[n];
2247 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
2248 INIT_LIST_HEAD(&state_info->callbacks);
2249 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002250 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002251
2252 return ret;
2253}
2254
2255static int smsm_init(void)
2256{
2257 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2258 int i;
2259 struct smsm_size_info_type *smsm_size_info;
2260
2261 i = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_ALLOC);
2262 if (i) {
2263 pr_err("%s: remote spinlock init failed %d\n", __func__, i);
2264 return i;
2265 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302266 spinlocks_initialized = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002267
2268 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
2269 sizeof(struct smsm_size_info_type));
2270 if (smsm_size_info) {
2271 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2272 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2273 }
2274
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002275 i = kfifo_alloc(&smsm_snapshot_fifo,
2276 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2277 GFP_KERNEL);
2278 if (i) {
2279 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2280 return i;
2281 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002282 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2283 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002284
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002285 if (!smsm_info.state) {
2286 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
2287 SMSM_NUM_ENTRIES *
2288 sizeof(uint32_t));
2289
2290 if (smsm_info.state) {
2291 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2292 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2293 __raw_writel(0, \
2294 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2295 }
2296 }
2297
2298 if (!smsm_info.intr_mask) {
2299 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
2300 SMSM_NUM_ENTRIES *
2301 SMSM_NUM_HOSTS *
2302 sizeof(uint32_t));
2303
2304 if (smsm_info.intr_mask)
2305 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
2306 __raw_writel(0xffffffff,
2307 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2308 }
2309
2310 if (!smsm_info.intr_mux)
2311 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
2312 SMSM_NUM_INTR_MUX *
2313 sizeof(uint32_t));
2314
2315 i = smsm_cb_init();
2316 if (i)
2317 return i;
2318
2319 wmb();
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06002320 smsm_driver_state_notify(SMSM_INIT, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002321 return 0;
2322}
2323
2324void smsm_reset_modem(unsigned mode)
2325{
2326 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2327 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2328 } else if (mode == SMSM_MODEM_WAIT) {
2329 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2330 } else { /* reset_mode is SMSM_RESET or default */
2331 mode = SMSM_RESET;
2332 }
2333
2334 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2335}
2336EXPORT_SYMBOL(smsm_reset_modem);
2337
2338void smsm_reset_modem_cont(void)
2339{
2340 unsigned long flags;
2341 uint32_t state;
2342
2343 if (!smsm_info.state)
2344 return;
2345
2346 spin_lock_irqsave(&smem_lock, flags);
2347 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2348 & ~SMSM_MODEM_WAIT;
2349 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2350 wmb();
2351 spin_unlock_irqrestore(&smem_lock, flags);
2352}
2353EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002354
Eric Holmbergda31d042012-03-28 14:01:02 -06002355static void smsm_cb_snapshot(uint32_t use_wakelock)
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002356{
2357 int n;
2358 uint32_t new_state;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002359 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002360 int ret;
2361
2362 ret = kfifo_avail(&smsm_snapshot_fifo);
Eric Holmbergda31d042012-03-28 14:01:02 -06002363 if (ret < SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002364 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2365 return;
2366 }
2367
Eric Holmbergda31d042012-03-28 14:01:02 -06002368 /* queue state entries */
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002369 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2370 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2371
2372 ret = kfifo_in(&smsm_snapshot_fifo,
2373 &new_state, sizeof(new_state));
2374 if (ret != sizeof(new_state)) {
2375 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2376 return;
2377 }
2378 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002379
Eric Holmbergda31d042012-03-28 14:01:02 -06002380 /* queue wakelock usage flag */
2381 ret = kfifo_in(&smsm_snapshot_fifo,
2382 &use_wakelock, sizeof(use_wakelock));
2383 if (ret != sizeof(use_wakelock)) {
2384 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2385 return;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002386 }
Eric Holmbergda31d042012-03-28 14:01:02 -06002387
2388 if (use_wakelock) {
2389 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2390 if (smsm_snapshot_count == 0) {
2391 SMx_POWER_INFO("SMSM snapshot wake lock\n");
2392 wake_lock(&smsm_snapshot_wakelock);
2393 }
2394 ++smsm_snapshot_count;
2395 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2396 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002397 schedule_work(&smsm_cb_work);
2398}
2399
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002400static irqreturn_t smsm_irq_handler(int irq, void *data)
2401{
2402 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002403
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002404 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002405 uint32_t mux_val;
2406 static uint32_t prev_smem_q6_apps_smsm;
2407
2408 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2409 mux_val = __raw_readl(
2410 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2411 if (mux_val != prev_smem_q6_apps_smsm)
2412 prev_smem_q6_apps_smsm = mux_val;
2413 }
2414
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002415 spin_lock_irqsave(&smem_lock, flags);
Eric Holmbergda31d042012-03-28 14:01:02 -06002416 smsm_cb_snapshot(1);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002417 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002418 return IRQ_HANDLED;
2419 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002420
2421 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002422 if (!smsm_info.state) {
2423 SMSM_INFO("<SM NO STATE>\n");
2424 } else {
2425 unsigned old_apps, apps;
2426 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002427
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002428 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002429
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002430 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2431 if (apps & SMSM_RESET) {
2432 /* If we get an interrupt and the apps SMSM_RESET
2433 bit is already set, the modem is acking the
2434 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002435 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302436 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002437 /* Issue a fake irq to handle any
2438 * smd state changes during reset
2439 */
2440 smd_fake_irq_handler(0);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002441
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002442 /* queue modem restart notify chain */
2443 modem_queue_start_reset_notify();
2444
2445 } else if (modm & SMSM_RESET) {
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002446 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302447 apps |= SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002448
2449 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
2450 modem_queue_start_reset_notify();
2451
2452 } else if (modm & SMSM_INIT) {
2453 if (!(apps & SMSM_INIT)) {
2454 apps |= SMSM_INIT;
2455 modem_queue_smsm_init_notify();
2456 }
2457
2458 if (modm & SMSM_SMDINIT)
2459 apps |= SMSM_SMDINIT;
2460 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2461 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2462 apps |= SMSM_RUN;
2463 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2464 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2465 modem_queue_start_reset_notify();
2466 }
2467
2468 if (old_apps != apps) {
2469 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2470 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2471 do_smd_probe();
2472 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2473 }
2474
Eric Holmbergda31d042012-03-28 14:01:02 -06002475 smsm_cb_snapshot(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002476 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002477 spin_unlock_irqrestore(&smem_lock, flags);
2478 return IRQ_HANDLED;
2479}
2480
Eric Holmberg98c6c642012-02-24 11:29:35 -07002481static irqreturn_t smsm_modem_irq_handler(int irq, void *data)
2482{
2483 SMx_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002484 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002485 return smsm_irq_handler(irq, data);
2486}
2487
2488static irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
2489{
2490 SMx_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002491 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002492 return smsm_irq_handler(irq, data);
2493}
2494
2495static irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
2496{
2497 SMx_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002498 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002499 return smsm_irq_handler(irq, data);
2500}
2501
2502static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
2503{
2504 SMx_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002505 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002506 return smsm_irq_handler(irq, data);
2507}
2508
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002509int smsm_change_intr_mask(uint32_t smsm_entry,
2510 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002511{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002512 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002513 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002514
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002515 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2516 pr_err("smsm_change_state: Invalid entry %d\n",
2517 smsm_entry);
2518 return -EINVAL;
2519 }
2520
2521 if (!smsm_info.intr_mask) {
2522 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002523 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002524 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002525
2526 spin_lock_irqsave(&smem_lock, flags);
2527
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002528 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2529 new_mask = (old_mask & ~clear_mask) | set_mask;
2530 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002531
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002532 wmb();
2533 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002534
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002535 return 0;
2536}
2537EXPORT_SYMBOL(smsm_change_intr_mask);
2538
2539int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2540{
2541 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2542 pr_err("smsm_change_state: Invalid entry %d\n",
2543 smsm_entry);
2544 return -EINVAL;
2545 }
2546
2547 if (!smsm_info.intr_mask) {
2548 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2549 return -EIO;
2550 }
2551
2552 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2553 return 0;
2554}
2555EXPORT_SYMBOL(smsm_get_intr_mask);
2556
2557int smsm_change_state(uint32_t smsm_entry,
2558 uint32_t clear_mask, uint32_t set_mask)
2559{
2560 unsigned long flags;
2561 uint32_t old_state, new_state;
2562
2563 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2564 pr_err("smsm_change_state: Invalid entry %d",
2565 smsm_entry);
2566 return -EINVAL;
2567 }
2568
2569 if (!smsm_info.state) {
2570 pr_err("smsm_change_state <SM NO STATE>\n");
2571 return -EIO;
2572 }
2573 spin_lock_irqsave(&smem_lock, flags);
2574
2575 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2576 new_state = (old_state & ~clear_mask) | set_mask;
2577 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2578 SMSM_DBG("smsm_change_state %x\n", new_state);
2579 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002580
2581 spin_unlock_irqrestore(&smem_lock, flags);
2582
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002583 return 0;
2584}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002585EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002586
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002587uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002588{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002589 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002590
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002591 /* needs interface change to return error code */
2592 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2593 pr_err("smsm_change_state: Invalid entry %d",
2594 smsm_entry);
2595 return 0;
2596 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002597
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002598 if (!smsm_info.state) {
2599 pr_err("smsm_get_state <SM NO STATE>\n");
2600 } else {
2601 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2602 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002603
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002604 return rv;
2605}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002606EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002607
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002608/**
2609 * Performs SMSM callback client notifiction.
2610 */
2611void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002612{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002613 struct smsm_state_cb_info *cb_info;
2614 struct smsm_state_info *state_info;
2615 int n;
2616 uint32_t new_state;
2617 uint32_t state_changes;
Eric Holmbergda31d042012-03-28 14:01:02 -06002618 uint32_t use_wakelock;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002619 int ret;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002620 unsigned long flags;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002621
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002622 if (!smd_initialized)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002623 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002624
Eric Holmbergda31d042012-03-28 14:01:02 -06002625 while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002626 mutex_lock(&smsm_lock);
2627 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2628 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002629
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002630 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2631 sizeof(new_state));
2632 if (ret != sizeof(new_state)) {
2633 pr_err("%s: snapshot underflow %d\n",
2634 __func__, ret);
2635 mutex_unlock(&smsm_lock);
2636 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002637 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002638
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002639 state_changes = state_info->last_value ^ new_state;
2640 if (state_changes) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07002641 SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n",
2642 n, state_info->last_value,
2643 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002644 list_for_each_entry(cb_info,
2645 &state_info->callbacks, cb_list) {
2646
2647 if (cb_info->mask & state_changes)
2648 cb_info->notify(cb_info->data,
2649 state_info->last_value,
2650 new_state);
2651 }
2652 state_info->last_value = new_state;
2653 }
2654 }
2655 mutex_unlock(&smsm_lock);
Eric Holmberg59a9f942012-03-19 10:04:22 -06002656
Eric Holmbergda31d042012-03-28 14:01:02 -06002657 /* read wakelock flag */
2658 ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock,
2659 sizeof(use_wakelock));
2660 if (ret != sizeof(use_wakelock)) {
2661 pr_err("%s: snapshot underflow %d\n",
2662 __func__, ret);
2663 return;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002664 }
Eric Holmbergda31d042012-03-28 14:01:02 -06002665
2666 if (use_wakelock) {
2667 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2668 if (smsm_snapshot_count) {
2669 --smsm_snapshot_count;
2670 if (smsm_snapshot_count == 0) {
2671 SMx_POWER_INFO("SMSM snapshot"
2672 " wake unlock\n");
2673 wake_unlock(&smsm_snapshot_wakelock);
2674 }
2675 } else {
2676 pr_err("%s: invalid snapshot count\n",
2677 __func__);
2678 }
2679 spin_unlock_irqrestore(&smsm_snapshot_count_lock,
2680 flags);
2681 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002682 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002683}
2684
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002685
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002686/**
2687 * Registers callback for SMSM state notifications when the specified
2688 * bits change.
2689 *
2690 * @smsm_entry Processor entry to deregister
2691 * @mask Bits to deregister (if result is 0, callback is removed)
2692 * @notify Notification function to deregister
2693 * @data Opaque data passed in to callback
2694 *
2695 * @returns Status code
2696 * <0 error code
2697 * 0 inserted new entry
2698 * 1 updated mask of existing entry
2699 */
2700int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
2701 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002702{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002703 struct smsm_state_cb_info *cb_info;
2704 struct smsm_state_cb_info *cb_found = 0;
2705 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002706
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002707 if (smsm_entry >= SMSM_NUM_ENTRIES)
2708 return -EINVAL;
2709
Eric Holmbergc8002902011-09-16 13:55:57 -06002710 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002711
2712 if (!smsm_states) {
2713 /* smsm not yet initialized */
2714 ret = -ENODEV;
2715 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002716 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002717
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002718 list_for_each_entry(cb_info,
2719 &smsm_states[smsm_entry].callbacks, cb_list) {
2720 if ((cb_info->notify == notify) &&
2721 (cb_info->data == data)) {
2722 cb_info->mask |= mask;
2723 cb_found = cb_info;
2724 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002725 break;
2726 }
2727 }
2728
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002729 if (!cb_found) {
2730 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
2731 GFP_ATOMIC);
2732 if (!cb_info) {
2733 ret = -ENOMEM;
2734 goto cleanup;
2735 }
2736
2737 cb_info->mask = mask;
2738 cb_info->notify = notify;
2739 cb_info->data = data;
2740 INIT_LIST_HEAD(&cb_info->cb_list);
2741 list_add_tail(&cb_info->cb_list,
2742 &smsm_states[smsm_entry].callbacks);
2743 }
2744
2745cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06002746 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002747 return ret;
2748}
2749EXPORT_SYMBOL(smsm_state_cb_register);
2750
2751
2752/**
2753 * Deregisters for SMSM state notifications for the specified bits.
2754 *
2755 * @smsm_entry Processor entry to deregister
2756 * @mask Bits to deregister (if result is 0, callback is removed)
2757 * @notify Notification function to deregister
2758 * @data Opaque data passed in to callback
2759 *
2760 * @returns Status code
2761 * <0 error code
2762 * 0 not found
2763 * 1 updated mask
2764 * 2 removed callback
2765 */
2766int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
2767 void (*notify)(void *, uint32_t, uint32_t), void *data)
2768{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002769 struct smsm_state_cb_info *cb_info;
2770 int ret = 0;
2771
2772 if (smsm_entry >= SMSM_NUM_ENTRIES)
2773 return -EINVAL;
2774
Eric Holmbergc8002902011-09-16 13:55:57 -06002775 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002776
2777 if (!smsm_states) {
2778 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06002779 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002780 return -ENODEV;
2781 }
2782
2783 list_for_each_entry(cb_info,
2784 &smsm_states[smsm_entry].callbacks, cb_list) {
2785 if ((cb_info->notify == notify) &&
2786 (cb_info->data == data)) {
2787 cb_info->mask &= ~mask;
2788 ret = 1;
2789 if (!cb_info->mask) {
2790 /* no mask bits set, remove callback */
2791 list_del(&cb_info->cb_list);
2792 kfree(cb_info);
2793 ret = 2;
2794 }
2795 break;
2796 }
2797 }
2798
Eric Holmbergc8002902011-09-16 13:55:57 -06002799 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002800 return ret;
2801}
2802EXPORT_SYMBOL(smsm_state_cb_deregister);
2803
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06002804int smsm_driver_state_notifier_register(struct notifier_block *nb)
2805{
2806 int ret;
2807 if (!nb)
2808 return -EINVAL;
2809 mutex_lock(&smsm_driver_state_notifier_lock);
2810 ret = raw_notifier_chain_register(&smsm_driver_state_notifier_list, nb);
2811 mutex_unlock(&smsm_driver_state_notifier_lock);
2812 return ret;
2813}
2814EXPORT_SYMBOL(smsm_driver_state_notifier_register);
2815
2816int smsm_driver_state_notifier_unregister(struct notifier_block *nb)
2817{
2818 int ret;
2819 if (!nb)
2820 return -EINVAL;
2821 mutex_lock(&smsm_driver_state_notifier_lock);
2822 ret = raw_notifier_chain_unregister(&smsm_driver_state_notifier_list,
2823 nb);
2824 mutex_unlock(&smsm_driver_state_notifier_lock);
2825 return ret;
2826}
2827EXPORT_SYMBOL(smsm_driver_state_notifier_unregister);
2828
2829static void smsm_driver_state_notify(uint32_t state, void *data)
2830{
2831 mutex_lock(&smsm_driver_state_notifier_lock);
2832 raw_notifier_call_chain(&smsm_driver_state_notifier_list,
2833 state, data);
2834 mutex_unlock(&smsm_driver_state_notifier_lock);
2835}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002836
2837int smd_core_init(void)
2838{
2839 int r;
2840 unsigned long flags = IRQF_TRIGGER_RISING;
2841 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002842
Brian Swetland37521a32009-07-01 18:30:47 -07002843 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002844 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002845 if (r < 0)
2846 return r;
2847 r = enable_irq_wake(INT_A9_M2A_0);
2848 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002849 pr_err("smd_core_init: "
2850 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002851
Eric Holmberg98c6c642012-02-24 11:29:35 -07002852 r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002853 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002854 if (r < 0) {
2855 free_irq(INT_A9_M2A_0, 0);
2856 return r;
2857 }
2858 r = enable_irq_wake(INT_A9_M2A_5);
2859 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002860 pr_err("smd_core_init: "
2861 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002862
Brian Swetland37521a32009-07-01 18:30:47 -07002863#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002864#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
2865 flags |= IRQF_SHARED;
2866#endif
Brian Swetland37521a32009-07-01 18:30:47 -07002867 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002868 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07002869 if (r < 0) {
2870 free_irq(INT_A9_M2A_0, 0);
2871 free_irq(INT_A9_M2A_5, 0);
2872 return r;
2873 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002874
Eric Holmberg98c6c642012-02-24 11:29:35 -07002875 r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler,
2876 flags, "smsm_dev", smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002877 if (r < 0) {
2878 free_irq(INT_A9_M2A_0, 0);
2879 free_irq(INT_A9_M2A_5, 0);
2880 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
2881 return r;
2882 }
2883
2884 r = enable_irq_wake(INT_ADSP_A11);
2885 if (r < 0)
2886 pr_err("smd_core_init: "
2887 "enable_irq_wake failed for INT_ADSP_A11\n");
2888
2889#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
2890 r = enable_irq_wake(INT_ADSP_A11_SMSM);
2891 if (r < 0)
2892 pr_err("smd_core_init: enable_irq_wake "
2893 "failed for INT_ADSP_A11_SMSM\n");
2894#endif
2895 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07002896#endif
2897
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002898#if defined(CONFIG_DSPS)
2899 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
2900 flags, "smd_dev", smd_dsps_irq_handler);
2901 if (r < 0) {
2902 free_irq(INT_A9_M2A_0, 0);
2903 free_irq(INT_A9_M2A_5, 0);
2904 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07002905 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002906 return r;
2907 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002908
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002909 r = enable_irq_wake(INT_DSPS_A11);
2910 if (r < 0)
2911 pr_err("smd_core_init: "
2912 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002913#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002914
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002915#if defined(CONFIG_WCNSS)
2916 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
2917 flags, "smd_dev", smd_wcnss_irq_handler);
2918 if (r < 0) {
2919 free_irq(INT_A9_M2A_0, 0);
2920 free_irq(INT_A9_M2A_5, 0);
2921 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07002922 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002923 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
2924 return r;
2925 }
2926
2927 r = enable_irq_wake(INT_WCNSS_A11);
2928 if (r < 0)
2929 pr_err("smd_core_init: "
2930 "enable_irq_wake failed for INT_WCNSS_A11\n");
2931
Eric Holmberg98c6c642012-02-24 11:29:35 -07002932 r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler,
2933 flags, "smsm_dev", smsm_wcnss_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002934 if (r < 0) {
2935 free_irq(INT_A9_M2A_0, 0);
2936 free_irq(INT_A9_M2A_5, 0);
2937 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07002938 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002939 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
2940 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
2941 return r;
2942 }
2943
2944 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
2945 if (r < 0)
2946 pr_err("smd_core_init: "
2947 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
2948#endif
2949
Jeff Hugo6a8057c2011-08-16 13:47:12 -06002950#if defined(CONFIG_DSPS_SMSM)
Eric Holmberg98c6c642012-02-24 11:29:35 -07002951 r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler,
2952 flags, "smsm_dev", smsm_dsps_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06002953 if (r < 0) {
2954 free_irq(INT_A9_M2A_0, 0);
2955 free_irq(INT_A9_M2A_5, 0);
2956 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07002957 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06002958 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
2959 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07002960 free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06002961 return r;
2962 }
2963
2964 r = enable_irq_wake(INT_DSPS_A11_SMSM);
2965 if (r < 0)
2966 pr_err("smd_core_init: "
2967 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
2968#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002969 SMD_INFO("smd_core_init() done\n");
2970
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002971 return 0;
2972}
2973
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05302974static int intr_init(struct interrupt_config_item *private_irq,
2975 struct smd_irq_config *platform_irq,
2976 struct platform_device *pdev
2977 )
2978{
2979 int irq_id;
2980 int ret;
2981 int ret_wake;
2982
2983 private_irq->out_bit_pos = platform_irq->out_bit_pos;
2984 private_irq->out_offset = platform_irq->out_offset;
2985 private_irq->out_base = platform_irq->out_base;
2986
2987 irq_id = platform_get_irq_byname(
2988 pdev,
2989 platform_irq->irq_name
2990 );
2991 SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__,
2992 platform_irq->irq_name, irq_id);
2993 ret = request_irq(irq_id,
2994 private_irq->irq_handler,
2995 platform_irq->flags,
2996 platform_irq->device_name,
2997 (void *)platform_irq->dev_id
2998 );
2999 if (ret < 0) {
3000 platform_irq->irq_id = ret;
3001 } else {
3002 platform_irq->irq_id = irq_id;
3003 ret_wake = enable_irq_wake(irq_id);
3004 if (ret_wake < 0) {
3005 pr_err("smd: enable_irq_wake failed on %s",
3006 platform_irq->irq_name);
3007 }
3008 }
3009
3010 return ret;
3011}
3012
3013int smd_core_platform_init(struct platform_device *pdev)
3014{
3015 int i;
3016 int ret;
3017 uint32_t num_ss;
3018 struct smd_platform *smd_platform_data;
3019 struct smd_subsystem_config *smd_ss_config_list;
3020 struct smd_subsystem_config *cfg;
3021 int err_ret = 0;
3022
3023 smd_platform_data = pdev->dev.platform_data;
3024 num_ss = smd_platform_data->num_ss_configs;
3025 smd_ss_config_list = smd_platform_data->smd_ss_configs;
3026
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06003027 if (smd_platform_data->smd_ssr_config)
3028 disable_smsm_reset_handshake = smd_platform_data->
3029 smd_ssr_config->disable_smsm_reset_handshake;
3030
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303031 for (i = 0; i < num_ss; i++) {
3032 cfg = &smd_ss_config_list[i];
3033
3034 ret = intr_init(
3035 &private_intr_config[cfg->irq_config_id].smd,
3036 &cfg->smd_int,
3037 pdev
3038 );
3039
3040 if (ret < 0) {
3041 err_ret = ret;
3042 pr_err("smd: register irq failed on %s\n",
3043 cfg->smd_int.irq_name);
3044 break;
3045 }
3046
3047 ret = intr_init(
3048 &private_intr_config[cfg->irq_config_id].smsm,
3049 &cfg->smsm_int,
3050 pdev
3051 );
3052
3053 if (ret < 0) {
3054 err_ret = ret;
3055 pr_err("smd: register irq failed on %s\n",
3056 cfg->smsm_int.irq_name);
3057 break;
3058 }
Eric Holmberg17992c12012-02-29 12:54:44 -07003059
3060 strncpy(edge_to_pids[cfg->edge].subsys_name,
3061 cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303062 }
3063
3064 if (err_ret < 0) {
3065 pr_err("smd: deregistering IRQs\n");
3066 for (i = 0; i < num_ss; ++i) {
3067 cfg = &smd_ss_config_list[i];
3068
3069 if (cfg->smd_int.irq_id >= 0)
3070 free_irq(cfg->smd_int.irq_id,
3071 (void *)cfg->smd_int.dev_id
3072 );
3073 if (cfg->smsm_int.irq_id >= 0)
3074 free_irq(cfg->smsm_int.irq_id,
3075 (void *)cfg->smsm_int.dev_id
3076 );
3077 }
3078 return err_ret;
3079 }
3080
3081 SMD_INFO("smd_core_platform_init() done\n");
3082 return 0;
3083
3084}
3085
Gregory Bean4416e9e2010-07-28 10:22:12 -07003086static int __devinit msm_smd_probe(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003087{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303088 int ret;
Daniel Walker0aec66d2010-03-18 12:31:08 -07003089
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303090 SMD_INFO("smd probe\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003091 INIT_WORK(&probe_work, smd_channel_probe_worker);
3092
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003093 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3094 if (IS_ERR(channel_close_wq)) {
3095 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3096 return -ENOMEM;
3097 }
3098
3099 if (smsm_init()) {
3100 pr_err("smsm_init() failed\n");
3101 return -1;
3102 }
3103
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303104 if (pdev) {
3105 if (pdev->dev.of_node) {
3106 pr_err("SMD: Device tree not currently supported\n");
3107 return -ENODEV;
3108 } else if (pdev->dev.platform_data) {
3109 ret = smd_core_platform_init(pdev);
3110 if (ret) {
3111 pr_err(
3112 "SMD: smd_core_platform_init() failed\n");
3113 return -ENODEV;
3114 }
3115 } else {
3116 ret = smd_core_init();
3117 if (ret) {
3118 pr_err("smd_core_init() failed\n");
3119 return -ENODEV;
3120 }
3121 }
3122 } else {
3123 pr_err("SMD: PDEV not found\n");
3124 return -ENODEV;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003125 }
3126
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003127 smd_initialized = 1;
3128
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003129 smd_alloc_loopback_channel();
Eric Holmbergc33d4ab2011-10-24 10:28:25 -06003130 smsm_irq_handler(0, 0);
3131 tasklet_schedule(&smd_fake_irq_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003132
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003133 return 0;
3134}
3135
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003136static int restart_notifier_cb(struct notifier_block *this,
3137 unsigned long code,
3138 void *data);
3139
3140static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003141 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3142 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
3143 {SMD_WCNSS, "riva", .nb.notifier_call = restart_notifier_cb},
3144 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003145 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003146};
3147
3148static int restart_notifier_cb(struct notifier_block *this,
3149 unsigned long code,
3150 void *data)
3151{
3152 if (code == SUBSYS_AFTER_SHUTDOWN) {
3153 struct restart_notifier_block *notifier;
3154
3155 notifier = container_of(this,
3156 struct restart_notifier_block, nb);
3157 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3158 __func__, notifier->processor,
3159 notifier->name);
3160
3161 smd_channel_reset(notifier->processor);
3162 }
3163
3164 return NOTIFY_DONE;
3165}
3166
3167static __init int modem_restart_late_init(void)
3168{
3169 int i;
3170 void *handle;
3171 struct restart_notifier_block *nb;
3172
3173 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
3174 nb = &restart_notifiers[i];
3175 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
3176 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
3177 __func__, nb->name, handle);
3178 }
3179 return 0;
3180}
3181late_initcall(modem_restart_late_init);
3182
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003183static struct platform_driver msm_smd_driver = {
3184 .probe = msm_smd_probe,
3185 .driver = {
3186 .name = MODULE_NAME,
3187 .owner = THIS_MODULE,
3188 },
3189};
3190
3191static int __init msm_smd_init(void)
3192{
3193 return platform_driver_register(&msm_smd_driver);
3194}
3195
3196module_init(msm_smd_init);
3197
3198MODULE_DESCRIPTION("MSM Shared Memory Core");
3199MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
3200MODULE_LICENSE("GPL");