blob: ddc3a8d948a71d2fc73be5f48b5aae0f82759012 [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07004 * Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f942012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Jeff Hugobdc734d2012-03-26 16:05:39 -060037#include <linux/sort.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070038#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070040#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053042#include <mach/socinfo.h>
Ram Somani8b9589f2012-04-03 12:07:18 +053043#include <asm/cacheflush.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070044
45#include "smd_private.h"
46#include "proc_comm.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047#include "modem_notifier.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
Jeff Hugo56b933a2011-09-28 14:42:05 -060050 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060051 || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064)
Brian Swetland37521a32009-07-01 18:30:47 -070052#define CONFIG_QDSP6 1
53#endif
54
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060055#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \
56 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057#define CONFIG_DSPS 1
58#endif
59
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060060#if defined(CONFIG_ARCH_MSM8960) \
61 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060063#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070065
66#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067#define SMEM_VERSION 0x000B
68#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070069#define SMSM_SNAPSHOT_CNT 64
Eric Holmbergda31d042012-03-28 14:01:02 -060070#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72uint32_t SMSM_NUM_ENTRIES = 8;
73uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070074
Eric Holmberge8a39322012-04-03 15:14:02 -060075/* Legacy SMSM interrupt notifications */
76#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT \
77 | SMSM_RUN | SMSM_SYSTEM_DOWNLOAD)
78
Brian Swetland2eb44eb2008-09-29 16:00:48 -070079enum {
80 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081 MSM_SMSM_DEBUG = 1U << 1,
82 MSM_SMD_INFO = 1U << 2,
83 MSM_SMSM_INFO = 1U << 3,
Eric Holmberg98c6c642012-02-24 11:29:35 -070084 MSM_SMx_POWER_INFO = 1U << 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085};
86
87struct smsm_shared_info {
88 uint32_t *state;
89 uint32_t *intr_mask;
90 uint32_t *intr_mux;
91};
92
93static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f942012-03-19 10:04:22 -060094static struct kfifo smsm_snapshot_fifo;
95static struct wake_lock smsm_snapshot_wakelock;
96static int smsm_snapshot_count;
97static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098
99struct smsm_size_info_type {
100 uint32_t num_hosts;
101 uint32_t num_entries;
102 uint32_t reserved0;
103 uint32_t reserved1;
104};
105
106struct smsm_state_cb_info {
107 struct list_head cb_list;
108 uint32_t mask;
109 void *data;
110 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
111};
112
113struct smsm_state_info {
114 struct list_head callbacks;
115 uint32_t last_value;
Eric Holmberge8a39322012-04-03 15:14:02 -0600116 uint32_t intr_mask_set;
117 uint32_t intr_mask_clear;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118};
119
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530120struct interrupt_config_item {
121 /* must be initialized */
122 irqreturn_t (*irq_handler)(int req, void *data);
123 /* outgoing interrupt config (set from platform data) */
124 uint32_t out_bit_pos;
125 void __iomem *out_base;
126 uint32_t out_offset;
127};
128
129struct interrupt_config {
130 struct interrupt_config_item smd;
131 struct interrupt_config_item smsm;
132};
133
134static irqreturn_t smd_modem_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700135static irqreturn_t smsm_modem_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530136static irqreturn_t smd_dsp_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700137static irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530138static irqreturn_t smd_dsps_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700139static irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530140static irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700141static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600142static irqreturn_t smd_rpm_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530143static irqreturn_t smsm_irq_handler(int irq, void *data);
144
145static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
146 [SMD_MODEM] = {
147 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700148 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530149 },
150 [SMD_Q6] = {
151 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700152 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530153 },
154 [SMD_DSPS] = {
155 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700156 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530157 },
158 [SMD_WCNSS] = {
159 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700160 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530161 },
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600162 [SMD_RPM] = {
163 .smd.irq_handler = smd_rpm_irq_handler,
164 .smsm.irq_handler = NULL, /* does not support smsm */
165 },
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530166};
Jeff Hugobdc734d2012-03-26 16:05:39 -0600167
168struct smem_area {
169 void *phys_addr;
170 unsigned size;
171 void __iomem *virt_addr;
172};
173static uint32_t num_smem_areas;
174static struct smem_area *smem_areas;
175static void *smem_range_check(void *base, unsigned offset);
176
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700177struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530178
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
180#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
181 entry * SMSM_NUM_HOSTS + host)
182#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
183
184/* Internal definitions which are not exported in some targets */
185enum {
186 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700187};
188
189static int msm_smd_debug_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700190module_param_named(debug_mask, msm_smd_debug_mask,
191 int, S_IRUGO | S_IWUSR | S_IWGRP);
192
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700193#if defined(CONFIG_MSM_SMD_DEBUG)
194#define SMD_DBG(x...) do { \
195 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
196 printk(KERN_DEBUG x); \
197 } while (0)
198
199#define SMSM_DBG(x...) do { \
200 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
201 printk(KERN_DEBUG x); \
202 } while (0)
203
204#define SMD_INFO(x...) do { \
205 if (msm_smd_debug_mask & MSM_SMD_INFO) \
206 printk(KERN_INFO x); \
207 } while (0)
208
209#define SMSM_INFO(x...) do { \
210 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
211 printk(KERN_INFO x); \
212 } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700213#define SMx_POWER_INFO(x...) do { \
214 if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \
215 printk(KERN_INFO x); \
216 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217#else
218#define SMD_DBG(x...) do { } while (0)
219#define SMSM_DBG(x...) do { } while (0)
220#define SMD_INFO(x...) do { } while (0)
221#define SMSM_INFO(x...) do { } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700222#define SMx_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700223#endif
224
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700225static unsigned last_heap_free = 0xffffffff;
226
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227static inline void smd_write_intr(unsigned int val,
228 const void __iomem *addr);
229
230#if defined(CONFIG_ARCH_MSM7X30)
231#define MSM_TRIG_A2M_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530232 (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700233#define MSM_TRIG_A2Q6_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530234 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235#define MSM_TRIG_A2M_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530236 (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237#define MSM_TRIG_A2Q6_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530238 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600240#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241#define MSM_TRIG_A2WCNSS_SMD_INT
242#define MSM_TRIG_A2WCNSS_SMSM_INT
243#elif defined(CONFIG_ARCH_MSM8X60)
244#define MSM_TRIG_A2M_SMD_INT \
245 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
246#define MSM_TRIG_A2Q6_SMD_INT \
247 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
248#define MSM_TRIG_A2M_SMSM_INT \
249 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
250#define MSM_TRIG_A2Q6_SMSM_INT \
251 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
252#define MSM_TRIG_A2DSPS_SMD_INT \
253 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600254#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255#define MSM_TRIG_A2WCNSS_SMD_INT
256#define MSM_TRIG_A2WCNSS_SMSM_INT
Jeff Hugo56b933a2011-09-28 14:42:05 -0600257#elif defined(CONFIG_ARCH_MSM9615)
258#define MSM_TRIG_A2M_SMD_INT \
259 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
260#define MSM_TRIG_A2Q6_SMD_INT \
261 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
262#define MSM_TRIG_A2M_SMSM_INT \
263 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
264#define MSM_TRIG_A2Q6_SMSM_INT \
265 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
266#define MSM_TRIG_A2DSPS_SMD_INT
267#define MSM_TRIG_A2DSPS_SMSM_INT
268#define MSM_TRIG_A2WCNSS_SMD_INT
269#define MSM_TRIG_A2WCNSS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700270#elif defined(CONFIG_ARCH_FSM9XXX)
271#define MSM_TRIG_A2Q6_SMD_INT \
272 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
273#define MSM_TRIG_A2Q6_SMSM_INT \
274 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
275#define MSM_TRIG_A2M_SMD_INT \
276 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
277#define MSM_TRIG_A2M_SMSM_INT \
278 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
279#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600280#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700281#define MSM_TRIG_A2WCNSS_SMD_INT
282#define MSM_TRIG_A2WCNSS_SMSM_INT
Eric Holmberg73d45462012-02-28 10:41:31 -0700283#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700284#define MSM_TRIG_A2M_SMD_INT \
285 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700286#define MSM_TRIG_A2Q6_SMD_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287#define MSM_TRIG_A2M_SMSM_INT \
288 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700289#define MSM_TRIG_A2Q6_SMSM_INT
290#define MSM_TRIG_A2DSPS_SMD_INT
291#define MSM_TRIG_A2DSPS_SMSM_INT
292#define MSM_TRIG_A2WCNSS_SMD_INT
293#define MSM_TRIG_A2WCNSS_SMSM_INT
294#elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A)
295#define MSM_TRIG_A2M_SMD_INT \
296 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
297#define MSM_TRIG_A2Q6_SMD_INT
298#define MSM_TRIG_A2M_SMSM_INT \
299 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
300#define MSM_TRIG_A2Q6_SMSM_INT
301#define MSM_TRIG_A2DSPS_SMD_INT
302#define MSM_TRIG_A2DSPS_SMSM_INT
303#define MSM_TRIG_A2WCNSS_SMD_INT
304#define MSM_TRIG_A2WCNSS_SMSM_INT
305#else /* use platform device / device tree configuration */
306#define MSM_TRIG_A2M_SMD_INT
307#define MSM_TRIG_A2Q6_SMD_INT
308#define MSM_TRIG_A2M_SMSM_INT
309#define MSM_TRIG_A2Q6_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700310#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600311#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312#define MSM_TRIG_A2WCNSS_SMD_INT
313#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700314#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700315
Jeff Hugoee40b152012-02-09 17:39:47 -0700316/*
317 * stub out legacy macros if they are not being used so that the legacy
318 * code compiles even though it is not used
319 *
320 * these definitions should not be used in active code and will cause
321 * an early failure
322 */
323#ifndef INT_A9_M2A_0
324#define INT_A9_M2A_0 -1
325#endif
326#ifndef INT_A9_M2A_5
327#define INT_A9_M2A_5 -1
328#endif
329#ifndef INT_ADSP_A11
330#define INT_ADSP_A11 -1
331#endif
332#ifndef INT_ADSP_A11_SMSM
333#define INT_ADSP_A11_SMSM -1
334#endif
335#ifndef INT_DSPS_A11
336#define INT_DSPS_A11 -1
337#endif
338#ifndef INT_DSPS_A11_SMSM
339#define INT_DSPS_A11_SMSM -1
340#endif
341#ifndef INT_WCNSS_A11
342#define INT_WCNSS_A11 -1
343#endif
344#ifndef INT_WCNSS_A11_SMSM
345#define INT_WCNSS_A11_SMSM -1
346#endif
347
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700348#define SMD_LOOPBACK_CID 100
349
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600350#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
351static remote_spinlock_t remote_spinlock;
352
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700353static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergda31d042012-03-28 14:01:02 -0600355static void smsm_cb_snapshot(uint32_t use_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -0600357static struct workqueue_struct *smsm_cb_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358static void notify_smsm_cb_clients_worker(struct work_struct *work);
359static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600360static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361static struct smsm_state_info *smsm_states;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +0530362static int spinlocks_initialized;
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -0600363static RAW_NOTIFIER_HEAD(smsm_driver_state_notifier_list);
364static DEFINE_MUTEX(smsm_driver_state_notifier_lock);
365static void smsm_driver_state_notify(uint32_t state, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366
367static inline void smd_write_intr(unsigned int val,
368 const void __iomem *addr)
369{
370 wmb();
371 __raw_writel(val, addr);
372}
373
374#ifdef CONFIG_WCNSS
375static inline void wakeup_v1_riva(void)
376{
377 /*
378 * workaround hack for RIVA v1 hardware bug
379 * trigger GPIO 40 to wake up RIVA from power collaspe
380 * not to be sent to customers
381 */
Jeff Hugoafb8c4a2011-10-27 15:57:27 -0600382 if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 1) {
383 __raw_writel(0x0, MSM_TLMM_BASE + 0x1284);
384 __raw_writel(0x2, MSM_TLMM_BASE + 0x1284);
385 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700386 /* end workaround */
387}
388#else
389static inline void wakeup_v1_riva(void) {}
390#endif
391
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530392static inline void notify_modem_smd(void)
393{
394 static const struct interrupt_config_item *intr
395 = &private_intr_config[SMD_MODEM].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700396 if (intr->out_base) {
397 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530398 smd_write_intr(intr->out_bit_pos,
399 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700400 } else {
401 ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530402 MSM_TRIG_A2M_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700403 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530404}
405
406static inline void notify_dsp_smd(void)
407{
408 static const struct interrupt_config_item *intr
409 = &private_intr_config[SMD_Q6].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700410 if (intr->out_base) {
411 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530412 smd_write_intr(intr->out_bit_pos,
413 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700414 } else {
415 ++interrupt_stats[SMD_Q6].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530416 MSM_TRIG_A2Q6_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700417 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530418}
419
420static inline void notify_dsps_smd(void)
421{
422 static const struct interrupt_config_item *intr
423 = &private_intr_config[SMD_DSPS].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700424 if (intr->out_base) {
425 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530426 smd_write_intr(intr->out_bit_pos,
427 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700428 } else {
429 ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530430 MSM_TRIG_A2DSPS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700431 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530432}
433
434static inline void notify_wcnss_smd(void)
435{
436 static const struct interrupt_config_item *intr
437 = &private_intr_config[SMD_WCNSS].smd;
438 wakeup_v1_riva();
439
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700440 if (intr->out_base) {
441 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530442 smd_write_intr(intr->out_bit_pos,
443 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700444 } else {
445 ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530446 MSM_TRIG_A2WCNSS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700447 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530448}
449
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600450static inline void notify_rpm_smd(void)
451{
452 static const struct interrupt_config_item *intr
453 = &private_intr_config[SMD_RPM].smd;
454
455 if (intr->out_base) {
456 ++interrupt_stats[SMD_RPM].smd_out_config_count;
457 smd_write_intr(intr->out_bit_pos,
458 intr->out_base + intr->out_offset);
459 }
460}
461
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530462static inline void notify_modem_smsm(void)
463{
464 static const struct interrupt_config_item *intr
465 = &private_intr_config[SMD_MODEM].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700466 if (intr->out_base) {
467 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530468 smd_write_intr(intr->out_bit_pos,
469 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700470 } else {
471 ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530472 MSM_TRIG_A2M_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700473 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530474}
475
476static inline void notify_dsp_smsm(void)
477{
478 static const struct interrupt_config_item *intr
479 = &private_intr_config[SMD_Q6].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700480 if (intr->out_base) {
481 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530482 smd_write_intr(intr->out_bit_pos,
483 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700484 } else {
485 ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530486 MSM_TRIG_A2Q6_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700487 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530488}
489
490static inline void notify_dsps_smsm(void)
491{
492 static const struct interrupt_config_item *intr
493 = &private_intr_config[SMD_DSPS].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700494 if (intr->out_base) {
495 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530496 smd_write_intr(intr->out_bit_pos,
497 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700498 } else {
499 ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530500 MSM_TRIG_A2DSPS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700501 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530502}
503
504static inline void notify_wcnss_smsm(void)
505{
506 static const struct interrupt_config_item *intr
507 = &private_intr_config[SMD_WCNSS].smsm;
508 wakeup_v1_riva();
509
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700510 if (intr->out_base) {
511 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530512 smd_write_intr(intr->out_bit_pos,
513 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700514 } else {
515 ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530516 MSM_TRIG_A2WCNSS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700517 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530518}
519
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
521{
522 /* older protocol don't use smsm_intr_mask,
523 but still communicates with modem */
524 if (!smsm_info.intr_mask ||
525 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
526 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530527 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700528
529 if (smsm_info.intr_mask &&
530 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
531 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700532 uint32_t mux_val;
533
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600534 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700535 mux_val = __raw_readl(
536 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
537 mux_val++;
538 __raw_writel(mux_val,
539 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
540 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530541 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700542 }
543
544 if (smsm_info.intr_mask &&
545 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
546 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530547 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548 }
549
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600550 if (smsm_info.intr_mask &&
551 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
552 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530553 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600554 }
555
Eric Holmbergda31d042012-03-28 14:01:02 -0600556 /*
557 * Notify local SMSM callback clients without wakelock since this
558 * code is used by power management during power-down/-up sequencing
559 * on DEM-based targets. Grabbing a wakelock in this case will
560 * abort the power-down sequencing.
561 */
562 smsm_cb_snapshot(0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700563}
564
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700566{
567 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700568 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700569
570 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
571 if (x != 0) {
572 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700573 SMD_INFO("smem: DIAG '%s'\n", x);
574 }
575
576 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
577 if (x != 0) {
578 x[size - 1] = 0;
579 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700580 }
581}
582
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700583
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700584static void handle_modem_crash(void)
585{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700586 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700587 smd_diag();
588
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589 /* hard reboot if possible FIXME
590 if (msm_reset_hook)
591 msm_reset_hook();
592 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700593
594 /* in this case the modem or watchdog should reboot us */
595 for (;;)
596 ;
597}
598
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700599int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700600{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700601 /* if the modem's not ready yet, we have to hope for the best */
602 if (!smsm_info.state)
603 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700604
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700605 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700606 handle_modem_crash();
607 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700608 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700609 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700610}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700611EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700612
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700613/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700614 * irq handler and code that mutates the channel
615 * list or fiddles with channel state
616 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700617static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700618DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700619
620/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700621 * operations to avoid races while creating or
622 * destroying smd_channel structures
623 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700624static DEFINE_MUTEX(smd_creation_mutex);
625
626static int smd_initialized;
627
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700628struct smd_shared_v1 {
629 struct smd_half_channel ch0;
630 unsigned char data0[SMD_BUF_SIZE];
631 struct smd_half_channel ch1;
632 unsigned char data1[SMD_BUF_SIZE];
633};
634
635struct smd_shared_v2 {
636 struct smd_half_channel ch0;
637 struct smd_half_channel ch1;
638};
639
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600640struct smd_shared_v2_word_access {
641 struct smd_half_channel_word_access ch0;
642 struct smd_half_channel_word_access ch1;
643};
644
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700645struct smd_channel {
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600646 volatile void *send; /* some variant of smd_half_channel */
647 volatile void *recv; /* some variant of smd_half_channel */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700648 unsigned char *send_data;
649 unsigned char *recv_data;
650 unsigned fifo_size;
651 unsigned fifo_mask;
652 struct list_head ch_list;
653
654 unsigned current_packet;
655 unsigned n;
656 void *priv;
657 void (*notify)(void *priv, unsigned flags);
658
659 int (*read)(smd_channel_t *ch, void *data, int len, int user_buf);
660 int (*write)(smd_channel_t *ch, const void *data, int len,
661 int user_buf);
662 int (*read_avail)(smd_channel_t *ch);
663 int (*write_avail)(smd_channel_t *ch);
664 int (*read_from_cb)(smd_channel_t *ch, void *data, int len,
665 int user_buf);
666
667 void (*update_state)(smd_channel_t *ch);
668 unsigned last_state;
669 void (*notify_other_cpu)(void);
670
671 char name[20];
672 struct platform_device pdev;
673 unsigned type;
674
675 int pending_pkt_sz;
676
677 char is_pkt_ch;
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600678
679 /*
680 * private internal functions to access *send and *recv.
681 * never to be exported outside of smd
682 */
683 struct smd_half_channel_access *half_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700684};
685
686struct edge_to_pid {
687 uint32_t local_pid;
688 uint32_t remote_pid;
Eric Holmberg17992c12012-02-29 12:54:44 -0700689 char subsys_name[SMD_MAX_CH_NAME_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690};
691
692/**
693 * Maps edge type to local and remote processor ID's.
694 */
695static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700696 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
697 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "q6"},
698 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
699 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
700 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
701 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
702 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
703 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
704 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
705 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
706 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
707 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
708 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
709 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
710 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600711 [SMD_APPS_RPM] = {SMD_APPS, SMD_RPM},
712 [SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
713 [SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
714 [SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700715};
716
717struct restart_notifier_block {
718 unsigned processor;
719 char *name;
720 struct notifier_block nb;
721};
722
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -0600723static int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700724static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
725
726static LIST_HEAD(smd_ch_closed_list);
727static LIST_HEAD(smd_ch_closing_list);
728static LIST_HEAD(smd_ch_to_close_list);
729static LIST_HEAD(smd_ch_list_modem);
730static LIST_HEAD(smd_ch_list_dsp);
731static LIST_HEAD(smd_ch_list_dsps);
732static LIST_HEAD(smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600733static LIST_HEAD(smd_ch_list_rpm);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700734
735static unsigned char smd_ch_allocated[64];
736static struct work_struct probe_work;
737
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738static void finalize_channel_close_fn(struct work_struct *work);
739static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
740static struct workqueue_struct *channel_close_wq;
741
742static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
743
744/* on smp systems, the probe might get called from multiple cores,
745 hence use a lock */
746static DEFINE_MUTEX(smd_probe_lock);
747
748static void smd_channel_probe_worker(struct work_struct *work)
749{
750 struct smd_alloc_elm *shared;
751 unsigned n;
752 uint32_t type;
753
754 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
755
756 if (!shared) {
757 pr_err("%s: allocation table not initialized\n", __func__);
758 return;
759 }
760
761 mutex_lock(&smd_probe_lock);
762 for (n = 0; n < 64; n++) {
763 if (smd_ch_allocated[n])
764 continue;
765
766 /* channel should be allocated only if APPS
767 processor is involved */
768 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600769 if (type >= ARRAY_SIZE(edge_to_pids) ||
770 edge_to_pids[type].local_pid != SMD_APPS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700771 continue;
772 if (!shared[n].ref_count)
773 continue;
774 if (!shared[n].name[0])
775 continue;
776
777 if (!smd_alloc_channel(&shared[n]))
778 smd_ch_allocated[n] = 1;
779 else
780 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
781 }
782 mutex_unlock(&smd_probe_lock);
783}
784
785/**
786 * Lookup processor ID and determine if it belongs to the proved edge
787 * type.
788 *
789 * @shared2: Pointer to v2 shared channel structure
790 * @type: Edge type
791 * @pid: Processor ID of processor on edge
792 * @local_ch: Channel that belongs to processor @pid
793 * @remote_ch: Other side of edge contained @pid
794 *
795 * Returns 0 for not on edge, 1 for found on edge
796 */
797static int pid_is_on_edge(struct smd_shared_v2 *shared2,
798 uint32_t type, uint32_t pid,
799 struct smd_half_channel **local_ch,
800 struct smd_half_channel **remote_ch
801 )
802{
803 int ret = 0;
804 struct edge_to_pid *edge;
805
806 *local_ch = 0;
807 *remote_ch = 0;
808
809 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
810 return 0;
811
812 edge = &edge_to_pids[type];
813 if (edge->local_pid != edge->remote_pid) {
814 if (pid == edge->local_pid) {
815 *local_ch = &shared2->ch0;
816 *remote_ch = &shared2->ch1;
817 ret = 1;
818 } else if (pid == edge->remote_pid) {
819 *local_ch = &shared2->ch1;
820 *remote_ch = &shared2->ch0;
821 ret = 1;
822 }
823 }
824
825 return ret;
826}
827
Eric Holmberg17992c12012-02-29 12:54:44 -0700828/*
829 * Returns a pointer to the subsystem name or NULL if no
830 * subsystem name is available.
831 *
832 * @type - Edge definition
833 */
834const char *smd_edge_to_subsystem(uint32_t type)
835{
836 const char *subsys = NULL;
837
838 if (type < ARRAY_SIZE(edge_to_pids)) {
839 subsys = edge_to_pids[type].subsys_name;
840 if (subsys[0] == 0x0)
841 subsys = NULL;
842 }
843 return subsys;
844}
845EXPORT_SYMBOL(smd_edge_to_subsystem);
846
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700847/*
848 * Returns a pointer to the subsystem name given the
849 * remote processor ID.
850 *
851 * @pid Remote processor ID
852 * @returns Pointer to subsystem name or NULL if not found
853 */
854const char *smd_pid_to_subsystem(uint32_t pid)
855{
856 const char *subsys = NULL;
857 int i;
858
859 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
860 if (pid == edge_to_pids[i].remote_pid &&
861 edge_to_pids[i].subsys_name[0] != 0x0
862 ) {
863 subsys = edge_to_pids[i].subsys_name;
864 break;
865 }
866 }
867
868 return subsys;
869}
870EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700871
Eric Holmberg2a563c32011-10-05 14:51:43 -0600872static void smd_reset_edge(struct smd_half_channel *ch, unsigned new_state)
873{
874 if (ch->state != SMD_SS_CLOSED) {
875 ch->state = new_state;
876 ch->fDSR = 0;
877 ch->fCTS = 0;
878 ch->fCD = 0;
879 ch->fSTATE = 1;
880 }
881}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700882
883static void smd_channel_reset_state(struct smd_alloc_elm *shared,
884 unsigned new_state, unsigned pid)
885{
886 unsigned n;
887 struct smd_shared_v2 *shared2;
888 uint32_t type;
889 struct smd_half_channel *local_ch;
890 struct smd_half_channel *remote_ch;
891
892 for (n = 0; n < SMD_CHANNELS; n++) {
893 if (!shared[n].ref_count)
894 continue;
895 if (!shared[n].name[0])
896 continue;
897
898 type = SMD_CHANNEL_TYPE(shared[n].type);
899 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n, sizeof(*shared2));
900 if (!shared2)
901 continue;
902
Eric Holmberg2a563c32011-10-05 14:51:43 -0600903 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch))
904 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700905
Eric Holmberg2a563c32011-10-05 14:51:43 -0600906 /*
907 * ModemFW is in the same subsystem as ModemSW, but has
908 * separate SMD edges that need to be reset.
909 */
910 if (pid == SMSM_MODEM &&
911 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
912 &local_ch, &remote_ch))
913 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700914 }
915}
916
917
918void smd_channel_reset(uint32_t restart_pid)
919{
920 struct smd_alloc_elm *shared;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700921 unsigned long flags;
922
923 SMD_DBG("%s: starting reset\n", __func__);
924 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
925 if (!shared) {
926 pr_err("%s: allocation table not initialized\n", __func__);
927 return;
928 }
929
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600930 /* release any held spinlocks */
931 remote_spin_release(&remote_spinlock, restart_pid);
932 remote_spin_release_all(restart_pid);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700933
934 /* reset SMSM entry */
935 if (smsm_info.state) {
936 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
937
Eric Holmberg351a63c2011-12-02 17:49:43 -0700938 /* restart SMSM init handshake */
939 if (restart_pid == SMSM_MODEM) {
940 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -0700941 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
942 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -0700943 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700944
945 /* notify SMSM processors */
946 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -0700947 notify_modem_smsm();
948 notify_dsp_smsm();
949 notify_dsps_smsm();
950 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700951 }
952
953 /* change all remote states to CLOSING */
954 mutex_lock(&smd_probe_lock);
955 spin_lock_irqsave(&smd_lock, flags);
956 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
957 spin_unlock_irqrestore(&smd_lock, flags);
958 mutex_unlock(&smd_probe_lock);
959
960 /* notify SMD processors */
961 mb();
962 smd_fake_irq_handler(0);
963 notify_modem_smd();
964 notify_dsp_smd();
965 notify_dsps_smd();
966 notify_wcnss_smd();
967
968 /* change all remote states to CLOSED */
969 mutex_lock(&smd_probe_lock);
970 spin_lock_irqsave(&smd_lock, flags);
971 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
972 spin_unlock_irqrestore(&smd_lock, flags);
973 mutex_unlock(&smd_probe_lock);
974
975 /* notify SMD processors */
976 mb();
977 smd_fake_irq_handler(0);
978 notify_modem_smd();
979 notify_dsp_smd();
980 notify_dsps_smd();
981 notify_wcnss_smd();
982
983 SMD_DBG("%s: finished reset\n", __func__);
984}
985
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700986/* how many bytes are available for reading */
987static int smd_stream_read_avail(struct smd_channel *ch)
988{
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600989 return (ch->half_ch->get_head(ch->recv) -
990 ch->half_ch->get_tail(ch->recv)) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700991}
992
993/* how many bytes we are free to write */
994static int smd_stream_write_avail(struct smd_channel *ch)
995{
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600996 return ch->fifo_mask - ((ch->half_ch->get_head(ch->send) -
997 ch->half_ch->get_tail(ch->send)) & ch->fifo_mask);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700998}
999
1000static int smd_packet_read_avail(struct smd_channel *ch)
1001{
1002 if (ch->current_packet) {
1003 int n = smd_stream_read_avail(ch);
1004 if (n > ch->current_packet)
1005 n = ch->current_packet;
1006 return n;
1007 } else {
1008 return 0;
1009 }
1010}
1011
1012static int smd_packet_write_avail(struct smd_channel *ch)
1013{
1014 int n = smd_stream_write_avail(ch);
1015 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
1016}
1017
1018static int ch_is_open(struct smd_channel *ch)
1019{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001020 return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED ||
1021 ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING)
1022 && (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001023}
1024
1025/* provide a pointer and length to readable data in the fifo */
1026static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
1027{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001028 unsigned head = ch->half_ch->get_head(ch->recv);
1029 unsigned tail = ch->half_ch->get_tail(ch->recv);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001030 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001031
1032 if (tail <= head)
1033 return head - tail;
1034 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001035 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001036}
1037
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001038static int read_intr_blocked(struct smd_channel *ch)
1039{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001040 return ch->half_ch->get_fBLOCKREADINTR(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001041}
1042
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001043/* advance the fifo read pointer after data from ch_read_buffer is consumed */
1044static void ch_read_done(struct smd_channel *ch, unsigned count)
1045{
1046 BUG_ON(count > smd_stream_read_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001047 ch->half_ch->set_tail(ch->recv,
1048 (ch->half_ch->get_tail(ch->recv) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001049 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001050 ch->half_ch->set_fTAIL(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001051}
1052
1053/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -07001054 * by smd_*_read() and update_packet_state()
1055 * will read-and-discard if the _data pointer is null
1056 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001057static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001058{
1059 void *ptr;
1060 unsigned n;
1061 unsigned char *data = _data;
1062 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001063 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001064
1065 while (len > 0) {
1066 n = ch_read_buffer(ch, &ptr);
1067 if (n == 0)
1068 break;
1069
1070 if (n > len)
1071 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001072 if (_data) {
1073 if (user_buf) {
1074 r = copy_to_user(data, ptr, n);
1075 if (r > 0) {
1076 pr_err("%s: "
1077 "copy_to_user could not copy "
1078 "%i bytes.\n",
1079 __func__,
1080 r);
1081 }
1082 } else
1083 memcpy(data, ptr, n);
1084 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001085
1086 data += n;
1087 len -= n;
1088 ch_read_done(ch, n);
1089 }
1090
1091 return orig_len - len;
1092}
1093
1094static void update_stream_state(struct smd_channel *ch)
1095{
1096 /* streams have no special state requiring updating */
1097}
1098
1099static void update_packet_state(struct smd_channel *ch)
1100{
1101 unsigned hdr[5];
1102 int r;
1103
1104 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001105 while (ch->current_packet == 0) {
1106 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001107
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001108 /* don't bother unless we can get the full header */
1109 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1110 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001111
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001112 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1113 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001114
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001115 ch->current_packet = hdr[0];
1116 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001117}
1118
1119/* provide a pointer and length to next free space in the fifo */
1120static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1121{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001122 unsigned head = ch->half_ch->get_head(ch->send);
1123 unsigned tail = ch->half_ch->get_tail(ch->send);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001124 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001125
1126 if (head < tail) {
1127 return tail - head - 1;
1128 } else {
1129 if (tail == 0)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001130 return ch->fifo_size - head - 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001131 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001132 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001133 }
1134}
1135
1136/* advace the fifo write pointer after freespace
1137 * from ch_write_buffer is filled
1138 */
1139static void ch_write_done(struct smd_channel *ch, unsigned count)
1140{
1141 BUG_ON(count > smd_stream_write_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001142 ch->half_ch->set_head(ch->send,
1143 (ch->half_ch->get_head(ch->send) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001144 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001145 ch->half_ch->set_fHEAD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001146}
1147
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001148static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001149{
1150 if (n == SMD_SS_OPENED) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001151 ch->half_ch->set_fDSR(ch->send, 1);
1152 ch->half_ch->set_fCTS(ch->send, 1);
1153 ch->half_ch->set_fCD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001154 } else {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001155 ch->half_ch->set_fDSR(ch->send, 0);
1156 ch->half_ch->set_fCTS(ch->send, 0);
1157 ch->half_ch->set_fCD(ch->send, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001158 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001159 ch->half_ch->set_state(ch->send, n);
1160 ch->half_ch->set_fSTATE(ch->send, 1);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001161 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001162}
1163
1164static void do_smd_probe(void)
1165{
1166 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1167 if (shared->heap_info.free_offset != last_heap_free) {
1168 last_heap_free = shared->heap_info.free_offset;
1169 schedule_work(&probe_work);
1170 }
1171}
1172
1173static void smd_state_change(struct smd_channel *ch,
1174 unsigned last, unsigned next)
1175{
1176 ch->last_state = next;
1177
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001178 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001179
1180 switch (next) {
1181 case SMD_SS_OPENING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001182 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING ||
1183 ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
1184 ch->half_ch->set_tail(ch->recv, 0);
1185 ch->half_ch->set_head(ch->send, 0);
1186 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001187 ch_set_state(ch, SMD_SS_OPENING);
1188 }
1189 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001190 case SMD_SS_OPENED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001191 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001192 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001193 ch->notify(ch->priv, SMD_EVENT_OPEN);
1194 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001195 break;
1196 case SMD_SS_FLUSHING:
1197 case SMD_SS_RESET:
1198 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001199 break;
1200 case SMD_SS_CLOSED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001201 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001202 ch_set_state(ch, SMD_SS_CLOSING);
1203 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001204 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001205 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1206 }
1207 break;
1208 case SMD_SS_CLOSING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001209 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001210 list_move(&ch->ch_list,
1211 &smd_ch_to_close_list);
1212 queue_work(channel_close_wq,
1213 &finalize_channel_close_work);
1214 }
1215 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001216 }
1217}
1218
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001219static void handle_smd_irq_closing_list(void)
1220{
1221 unsigned long flags;
1222 struct smd_channel *ch;
1223 struct smd_channel *index;
1224 unsigned tmp;
1225
1226 spin_lock_irqsave(&smd_lock, flags);
1227 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001228 if (ch->half_ch->get_fSTATE(ch->recv))
1229 ch->half_ch->set_fSTATE(ch->recv, 0);
1230 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001231 if (tmp != ch->last_state)
1232 smd_state_change(ch, ch->last_state, tmp);
1233 }
1234 spin_unlock_irqrestore(&smd_lock, flags);
1235}
1236
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001237static void handle_smd_irq(struct list_head *list, void (*notify)(void))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001238{
1239 unsigned long flags;
1240 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001241 unsigned ch_flags;
1242 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001243 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001244
1245 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001246 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001247 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001248 ch_flags = 0;
1249 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001250 if (ch->half_ch->get_fHEAD(ch->recv)) {
1251 ch->half_ch->set_fHEAD(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001252 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001253 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001254 if (ch->half_ch->get_fTAIL(ch->recv)) {
1255 ch->half_ch->set_fTAIL(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001256 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001257 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001258 if (ch->half_ch->get_fSTATE(ch->recv)) {
1259 ch->half_ch->set_fSTATE(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001260 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001261 }
1262 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001263 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001264 if (tmp != ch->last_state) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07001265 SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
1266 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001267 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001268 state_change = 1;
1269 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001270 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001271 ch->update_state(ch);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001272 SMx_POWER_INFO("SMD ch%d '%s' Data event r%d/w%d\n",
1273 ch->n, ch->name,
1274 ch->read_avail(ch),
1275 ch->fifo_size - ch->write_avail(ch));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001276 ch->notify(ch->priv, SMD_EVENT_DATA);
1277 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001278 if (ch_flags & 0x4 && !state_change) {
1279 SMx_POWER_INFO("SMD ch%d '%s' State update\n",
1280 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001281 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001282 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001283 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001284 spin_unlock_irqrestore(&smd_lock, flags);
1285 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001286}
1287
Brian Swetland37521a32009-07-01 18:30:47 -07001288static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001289{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001290 SMx_POWER_INFO("SMD Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001291 ++interrupt_stats[SMD_MODEM].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001292 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001293 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001294 return IRQ_HANDLED;
1295}
1296
1297static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
1298{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001299 SMx_POWER_INFO("SMD Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001300 ++interrupt_stats[SMD_Q6].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001301 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001302 handle_smd_irq_closing_list();
1303 return IRQ_HANDLED;
1304}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001305
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001306static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
1307{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001308 SMx_POWER_INFO("SMD Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001309 ++interrupt_stats[SMD_DSPS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001310 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1311 handle_smd_irq_closing_list();
1312 return IRQ_HANDLED;
1313}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001314
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001315static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1316{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001317 SMx_POWER_INFO("SMD Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001318 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001319 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1320 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001321 return IRQ_HANDLED;
1322}
1323
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001324static irqreturn_t smd_rpm_irq_handler(int irq, void *data)
1325{
1326 SMx_POWER_INFO("SMD Int RPM->Apps\n");
1327 ++interrupt_stats[SMD_RPM].smd_in_count;
1328 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
1329 handle_smd_irq_closing_list();
1330 return IRQ_HANDLED;
1331}
1332
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001333static void smd_fake_irq_handler(unsigned long arg)
1334{
Brian Swetland37521a32009-07-01 18:30:47 -07001335 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1336 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001337 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1338 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001339 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001340 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001341}
1342
1343static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1344
Brian Swetland37521a32009-07-01 18:30:47 -07001345static inline int smd_need_int(struct smd_channel *ch)
1346{
1347 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001348 if (ch->half_ch->get_fHEAD(ch->recv) ||
1349 ch->half_ch->get_fTAIL(ch->recv) ||
1350 ch->half_ch->get_fSTATE(ch->recv))
Brian Swetland37521a32009-07-01 18:30:47 -07001351 return 1;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001352 if (ch->half_ch->get_state(ch->recv) != ch->last_state)
Brian Swetland37521a32009-07-01 18:30:47 -07001353 return 1;
1354 }
1355 return 0;
1356}
1357
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001358void smd_sleep_exit(void)
1359{
1360 unsigned long flags;
1361 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001362 int need_int = 0;
1363
1364 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001365 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1366 if (smd_need_int(ch)) {
1367 need_int = 1;
1368 break;
1369 }
1370 }
1371 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1372 if (smd_need_int(ch)) {
1373 need_int = 1;
1374 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001375 }
1376 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001377 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1378 if (smd_need_int(ch)) {
1379 need_int = 1;
1380 break;
1381 }
1382 }
1383 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1384 if (smd_need_int(ch)) {
1385 need_int = 1;
1386 break;
1387 }
1388 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001389 spin_unlock_irqrestore(&smd_lock, flags);
1390 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001391
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001392 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001393 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001394 tasklet_schedule(&smd_fake_irq_tasklet);
1395 }
1396}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001397EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001398
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001399static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001400{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001401 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1402 return 0;
1403 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001404 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001405
1406 /* for cases where xfer type is 0 */
1407 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001408 return 0;
1409
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001410 /* for cases where xfer type is 0 */
1411 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1412 return 0;
1413
1414 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001415 return 1;
1416 else
1417 return 0;
1418}
1419
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001420static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1421 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001422{
1423 void *ptr;
1424 const unsigned char *buf = _data;
1425 unsigned xfer;
1426 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001427 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001428
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001429 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001430 if (len < 0)
1431 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001432 else if (len == 0)
1433 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001434
1435 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001436 if (!ch_is_open(ch)) {
1437 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001438 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001439 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001440 if (xfer > len)
1441 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001442 if (user_buf) {
1443 r = copy_from_user(ptr, buf, xfer);
1444 if (r > 0) {
1445 pr_err("%s: "
1446 "copy_from_user could not copy %i "
1447 "bytes.\n",
1448 __func__,
1449 r);
1450 }
1451 } else
1452 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001453 ch_write_done(ch, xfer);
1454 len -= xfer;
1455 buf += xfer;
1456 if (len == 0)
1457 break;
1458 }
1459
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001460 if (orig_len - len)
1461 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001462
1463 return orig_len - len;
1464}
1465
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001466static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1467 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001468{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001469 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001470 unsigned hdr[5];
1471
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001472 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001473 if (len < 0)
1474 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001475 else if (len == 0)
1476 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001477
1478 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1479 return -ENOMEM;
1480
1481 hdr[0] = len;
1482 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001484
1485 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1486 if (ret < 0 || ret != sizeof(hdr)) {
1487 SMD_DBG("%s failed to write pkt header: "
1488 "%d returned\n", __func__, ret);
1489 return -1;
1490 }
1491
1492
1493 ret = smd_stream_write(ch, _data, len, user_buf);
1494 if (ret < 0 || ret != len) {
1495 SMD_DBG("%s failed to write pkt data: "
1496 "%d returned\n", __func__, ret);
1497 return ret;
1498 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001499
1500 return len;
1501}
1502
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001503static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001504{
1505 int r;
1506
1507 if (len < 0)
1508 return -EINVAL;
1509
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001510 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001511 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001512 if (!read_intr_blocked(ch))
1513 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001514
1515 return r;
1516}
1517
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001518static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001519{
1520 unsigned long flags;
1521 int r;
1522
1523 if (len < 0)
1524 return -EINVAL;
1525
1526 if (len > ch->current_packet)
1527 len = ch->current_packet;
1528
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001529 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001530 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001531 if (!read_intr_blocked(ch))
1532 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001533
1534 spin_lock_irqsave(&smd_lock, flags);
1535 ch->current_packet -= r;
1536 update_packet_state(ch);
1537 spin_unlock_irqrestore(&smd_lock, flags);
1538
1539 return r;
1540}
1541
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001542static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1543 int user_buf)
1544{
1545 int r;
1546
1547 if (len < 0)
1548 return -EINVAL;
1549
1550 if (len > ch->current_packet)
1551 len = ch->current_packet;
1552
1553 r = ch_read(ch, data, len, user_buf);
1554 if (r > 0)
1555 if (!read_intr_blocked(ch))
1556 ch->notify_other_cpu();
1557
1558 ch->current_packet -= r;
1559 update_packet_state(ch);
1560
1561 return r;
1562}
1563
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301564#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001565static int smd_alloc_v2(struct smd_channel *ch)
1566{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001567 void *buffer;
1568 unsigned buffer_sz;
1569
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001570 if (is_word_access_ch(ch->type)) {
1571 struct smd_shared_v2_word_access *shared2;
1572 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1573 sizeof(*shared2));
1574 if (!shared2) {
1575 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1576 return -EINVAL;
1577 }
1578 ch->send = &shared2->ch0;
1579 ch->recv = &shared2->ch1;
1580 } else {
1581 struct smd_shared_v2 *shared2;
1582 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1583 sizeof(*shared2));
1584 if (!shared2) {
1585 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1586 return -EINVAL;
1587 }
1588 ch->send = &shared2->ch0;
1589 ch->recv = &shared2->ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001590 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001591 ch->half_ch = get_half_ch_funcs(ch->type);
1592
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001593 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1594 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301595 SMD_INFO("smem_get_entry failed\n");
1596 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001597 }
1598
1599 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301600 if (buffer_sz & (buffer_sz - 1)) {
1601 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1602 return -EINVAL;
1603 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001604 buffer_sz /= 2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001605 ch->send_data = buffer;
1606 ch->recv_data = buffer + buffer_sz;
1607 ch->fifo_size = buffer_sz;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001608
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001609 return 0;
1610}
1611
1612static int smd_alloc_v1(struct smd_channel *ch)
1613{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301614 return -EINVAL;
1615}
1616
1617#else /* define v1 for older targets */
1618static int smd_alloc_v2(struct smd_channel *ch)
1619{
1620 return -EINVAL;
1621}
1622
1623static int smd_alloc_v1(struct smd_channel *ch)
1624{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001625 struct smd_shared_v1 *shared1;
1626 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1627 if (!shared1) {
1628 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301629 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001630 }
1631 ch->send = &shared1->ch0;
1632 ch->recv = &shared1->ch1;
1633 ch->send_data = shared1->data0;
1634 ch->recv_data = shared1->data1;
1635 ch->fifo_size = SMD_BUF_SIZE;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001636 ch->half_ch = get_half_ch_funcs(ch->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001637 return 0;
1638}
1639
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301640#endif
1641
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001642static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001643{
1644 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001645
1646 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1647 if (ch == 0) {
1648 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001649 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001650 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001651 ch->n = alloc_elm->cid;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001652 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001653
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001654 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001655 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001656 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001657 }
1658
1659 ch->fifo_mask = ch->fifo_size - 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001660
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001661 /* probe_worker guarentees ch->type will be a valid type */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001662 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001663 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001664 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001665 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001666 else if (ch->type == SMD_APPS_DSPS)
1667 ch->notify_other_cpu = notify_dsps_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001668 else if (ch->type == SMD_APPS_WCNSS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001669 ch->notify_other_cpu = notify_wcnss_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001670 else if (ch->type == SMD_APPS_RPM)
1671 ch->notify_other_cpu = notify_rpm_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001672
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001673 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001674 ch->read = smd_packet_read;
1675 ch->write = smd_packet_write;
1676 ch->read_avail = smd_packet_read_avail;
1677 ch->write_avail = smd_packet_write_avail;
1678 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001679 ch->read_from_cb = smd_packet_read_from_cb;
1680 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001681 } else {
1682 ch->read = smd_stream_read;
1683 ch->write = smd_stream_write;
1684 ch->read_avail = smd_stream_read_avail;
1685 ch->write_avail = smd_stream_write_avail;
1686 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001687 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001688 }
1689
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001690 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1691 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001692
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001693 ch->pdev.name = ch->name;
1694 ch->pdev.id = ch->type;
1695
1696 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1697 ch->name, ch->n);
1698
1699 mutex_lock(&smd_creation_mutex);
1700 list_add(&ch->ch_list, &smd_ch_closed_list);
1701 mutex_unlock(&smd_creation_mutex);
1702
1703 platform_device_register(&ch->pdev);
1704 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1705 /* create a platform driver to be used by smd_tty driver
1706 * so that it can access the loopback port
1707 */
1708 loopback_tty_pdev.id = ch->type;
1709 platform_device_register(&loopback_tty_pdev);
1710 }
1711 return 0;
1712}
1713
1714static inline void notify_loopback_smd(void)
1715{
1716 unsigned long flags;
1717 struct smd_channel *ch;
1718
1719 spin_lock_irqsave(&smd_lock, flags);
1720 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1721 ch->notify(ch->priv, SMD_EVENT_DATA);
1722 }
1723 spin_unlock_irqrestore(&smd_lock, flags);
1724}
1725
1726static int smd_alloc_loopback_channel(void)
1727{
1728 static struct smd_half_channel smd_loopback_ctl;
1729 static char smd_loopback_data[SMD_BUF_SIZE];
1730 struct smd_channel *ch;
1731
1732 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1733 if (ch == 0) {
1734 pr_err("%s: out of memory\n", __func__);
1735 return -1;
1736 }
1737 ch->n = SMD_LOOPBACK_CID;
1738
1739 ch->send = &smd_loopback_ctl;
1740 ch->recv = &smd_loopback_ctl;
1741 ch->send_data = smd_loopback_data;
1742 ch->recv_data = smd_loopback_data;
1743 ch->fifo_size = SMD_BUF_SIZE;
1744
1745 ch->fifo_mask = ch->fifo_size - 1;
1746 ch->type = SMD_LOOPBACK_TYPE;
1747 ch->notify_other_cpu = notify_loopback_smd;
1748
1749 ch->read = smd_stream_read;
1750 ch->write = smd_stream_write;
1751 ch->read_avail = smd_stream_read_avail;
1752 ch->write_avail = smd_stream_write_avail;
1753 ch->update_state = update_stream_state;
1754 ch->read_from_cb = smd_stream_read;
1755
1756 memset(ch->name, 0, 20);
1757 memcpy(ch->name, "local_loopback", 14);
1758
1759 ch->pdev.name = ch->name;
1760 ch->pdev.id = ch->type;
1761
1762 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001763
1764 mutex_lock(&smd_creation_mutex);
1765 list_add(&ch->ch_list, &smd_ch_closed_list);
1766 mutex_unlock(&smd_creation_mutex);
1767
1768 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001769 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001770}
1771
1772static void do_nothing_notify(void *priv, unsigned flags)
1773{
1774}
1775
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001776static void finalize_channel_close_fn(struct work_struct *work)
1777{
1778 unsigned long flags;
1779 struct smd_channel *ch;
1780 struct smd_channel *index;
1781
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001782 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001783 spin_lock_irqsave(&smd_lock, flags);
1784 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1785 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001786 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001787 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1788 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001789 }
1790 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001791 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001792}
1793
1794struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001795{
1796 struct smd_channel *ch;
1797
1798 mutex_lock(&smd_creation_mutex);
1799 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001800 if (!strcmp(name, ch->name) &&
1801 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001802 list_del(&ch->ch_list);
1803 mutex_unlock(&smd_creation_mutex);
1804 return ch;
1805 }
1806 }
1807 mutex_unlock(&smd_creation_mutex);
1808
1809 return NULL;
1810}
1811
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001812int smd_named_open_on_edge(const char *name, uint32_t edge,
1813 smd_channel_t **_ch,
1814 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001815{
1816 struct smd_channel *ch;
1817 unsigned long flags;
1818
1819 if (smd_initialized == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001820 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001821 return -ENODEV;
1822 }
1823
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001824 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1825
1826 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001827 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001828 /* check closing list for port */
1829 spin_lock_irqsave(&smd_lock, flags);
1830 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1831 if (!strncmp(name, ch->name, 20) &&
1832 (edge == ch->type)) {
1833 /* channel exists, but is being closed */
1834 spin_unlock_irqrestore(&smd_lock, flags);
1835 return -EAGAIN;
1836 }
1837 }
1838
1839 /* check closing workqueue list for port */
1840 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1841 if (!strncmp(name, ch->name, 20) &&
1842 (edge == ch->type)) {
1843 /* channel exists, but is being closed */
1844 spin_unlock_irqrestore(&smd_lock, flags);
1845 return -EAGAIN;
1846 }
1847 }
1848 spin_unlock_irqrestore(&smd_lock, flags);
1849
1850 /* one final check to handle closing->closed race condition */
1851 ch = smd_get_channel(name, edge);
1852 if (!ch)
1853 return -ENODEV;
1854 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001855
1856 if (notify == 0)
1857 notify = do_nothing_notify;
1858
1859 ch->notify = notify;
1860 ch->current_packet = 0;
1861 ch->last_state = SMD_SS_CLOSED;
1862 ch->priv = priv;
1863
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001864 if (edge == SMD_LOOPBACK_TYPE) {
1865 ch->last_state = SMD_SS_OPENED;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001866 ch->half_ch->set_state(ch->send, SMD_SS_OPENED);
1867 ch->half_ch->set_fDSR(ch->send, 1);
1868 ch->half_ch->set_fCTS(ch->send, 1);
1869 ch->half_ch->set_fCD(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001870 }
1871
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001872 *_ch = ch;
1873
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001874 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1875
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001876 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001877 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001878 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001879 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001880 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001881 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1882 list_add(&ch->ch_list, &smd_ch_list_dsps);
1883 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1884 list_add(&ch->ch_list, &smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001885 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_RPM)
1886 list_add(&ch->ch_list, &smd_ch_list_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001887 else
1888 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001889
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001890 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1891
1892 if (edge != SMD_LOOPBACK_TYPE)
1893 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1894
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001895 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001896
1897 return 0;
1898}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001899EXPORT_SYMBOL(smd_named_open_on_edge);
1900
1901
1902int smd_open(const char *name, smd_channel_t **_ch,
1903 void *priv, void (*notify)(void *, unsigned))
1904{
1905 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
1906 notify);
1907}
1908EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001909
1910int smd_close(smd_channel_t *ch)
1911{
1912 unsigned long flags;
1913
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001914 if (ch == 0)
1915 return -1;
1916
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001917 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001918
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001919 spin_lock_irqsave(&smd_lock, flags);
1920 list_del(&ch->ch_list);
1921 if (ch->n == SMD_LOOPBACK_CID) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001922 ch->half_ch->set_fDSR(ch->send, 0);
1923 ch->half_ch->set_fCTS(ch->send, 0);
1924 ch->half_ch->set_fCD(ch->send, 0);
1925 ch->half_ch->set_state(ch->send, SMD_SS_CLOSED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001926 } else
1927 ch_set_state(ch, SMD_SS_CLOSED);
1928
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001929 if (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001930 list_add(&ch->ch_list, &smd_ch_closing_list);
1931 spin_unlock_irqrestore(&smd_lock, flags);
1932 } else {
1933 spin_unlock_irqrestore(&smd_lock, flags);
1934 ch->notify = do_nothing_notify;
1935 mutex_lock(&smd_creation_mutex);
1936 list_add(&ch->ch_list, &smd_ch_closed_list);
1937 mutex_unlock(&smd_creation_mutex);
1938 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001939
1940 return 0;
1941}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001942EXPORT_SYMBOL(smd_close);
1943
1944int smd_write_start(smd_channel_t *ch, int len)
1945{
1946 int ret;
1947 unsigned hdr[5];
1948
1949 if (!ch) {
1950 pr_err("%s: Invalid channel specified\n", __func__);
1951 return -ENODEV;
1952 }
1953 if (!ch->is_pkt_ch) {
1954 pr_err("%s: non-packet channel specified\n", __func__);
1955 return -EACCES;
1956 }
1957 if (len < 1) {
1958 pr_err("%s: invalid length: %d\n", __func__, len);
1959 return -EINVAL;
1960 }
1961
1962 if (ch->pending_pkt_sz) {
1963 pr_err("%s: packet of size: %d in progress\n", __func__,
1964 ch->pending_pkt_sz);
1965 return -EBUSY;
1966 }
1967 ch->pending_pkt_sz = len;
1968
1969 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
1970 ch->pending_pkt_sz = 0;
1971 SMD_DBG("%s: no space to write packet header\n", __func__);
1972 return -EAGAIN;
1973 }
1974
1975 hdr[0] = len;
1976 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1977
1978
1979 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1980 if (ret < 0 || ret != sizeof(hdr)) {
1981 ch->pending_pkt_sz = 0;
1982 pr_err("%s: packet header failed to write\n", __func__);
1983 return -EPERM;
1984 }
1985 return 0;
1986}
1987EXPORT_SYMBOL(smd_write_start);
1988
1989int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
1990{
1991 int bytes_written;
1992
1993 if (!ch) {
1994 pr_err("%s: Invalid channel specified\n", __func__);
1995 return -ENODEV;
1996 }
1997 if (len < 1) {
1998 pr_err("%s: invalid length: %d\n", __func__, len);
1999 return -EINVAL;
2000 }
2001
2002 if (!ch->pending_pkt_sz) {
2003 pr_err("%s: no transaction in progress\n", __func__);
2004 return -ENOEXEC;
2005 }
2006 if (ch->pending_pkt_sz - len < 0) {
2007 pr_err("%s: segment of size: %d will make packet go over "
2008 "length\n", __func__, len);
2009 return -EINVAL;
2010 }
2011
2012 bytes_written = smd_stream_write(ch, data, len, user_buf);
2013
2014 ch->pending_pkt_sz -= bytes_written;
2015
2016 return bytes_written;
2017}
2018EXPORT_SYMBOL(smd_write_segment);
2019
2020int smd_write_end(smd_channel_t *ch)
2021{
2022
2023 if (!ch) {
2024 pr_err("%s: Invalid channel specified\n", __func__);
2025 return -ENODEV;
2026 }
2027 if (ch->pending_pkt_sz) {
2028 pr_err("%s: current packet not completely written\n", __func__);
2029 return -E2BIG;
2030 }
2031
2032 return 0;
2033}
2034EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002035
2036int smd_read(smd_channel_t *ch, void *data, int len)
2037{
Jack Pham1b236d12012-03-19 15:27:18 -07002038 if (!ch) {
2039 pr_err("%s: Invalid channel specified\n", __func__);
2040 return -ENODEV;
2041 }
2042
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002043 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002044}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002045EXPORT_SYMBOL(smd_read);
2046
2047int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
2048{
Jack Pham1b236d12012-03-19 15:27:18 -07002049 if (!ch) {
2050 pr_err("%s: Invalid channel specified\n", __func__);
2051 return -ENODEV;
2052 }
2053
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002054 return ch->read(ch, data, len, 1);
2055}
2056EXPORT_SYMBOL(smd_read_user_buffer);
2057
2058int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
2059{
Jack Pham1b236d12012-03-19 15:27:18 -07002060 if (!ch) {
2061 pr_err("%s: Invalid channel specified\n", __func__);
2062 return -ENODEV;
2063 }
2064
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002065 return ch->read_from_cb(ch, data, len, 0);
2066}
2067EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002068
2069int smd_write(smd_channel_t *ch, const void *data, int len)
2070{
Jack Pham1b236d12012-03-19 15:27:18 -07002071 if (!ch) {
2072 pr_err("%s: Invalid channel specified\n", __func__);
2073 return -ENODEV;
2074 }
2075
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002076 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002077}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002078EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002079
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002080int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08002081{
Jack Pham1b236d12012-03-19 15:27:18 -07002082 if (!ch) {
2083 pr_err("%s: Invalid channel specified\n", __func__);
2084 return -ENODEV;
2085 }
2086
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002087 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002088}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002089EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002090
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002091int smd_read_avail(smd_channel_t *ch)
2092{
Jack Pham1b236d12012-03-19 15:27:18 -07002093 if (!ch) {
2094 pr_err("%s: Invalid channel specified\n", __func__);
2095 return -ENODEV;
2096 }
2097
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002098 return ch->read_avail(ch);
2099}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002100EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002101
2102int smd_write_avail(smd_channel_t *ch)
2103{
Jack Pham1b236d12012-03-19 15:27:18 -07002104 if (!ch) {
2105 pr_err("%s: Invalid channel specified\n", __func__);
2106 return -ENODEV;
2107 }
2108
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002109 return ch->write_avail(ch);
2110}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002111EXPORT_SYMBOL(smd_write_avail);
2112
2113void smd_enable_read_intr(smd_channel_t *ch)
2114{
2115 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002116 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002117}
2118EXPORT_SYMBOL(smd_enable_read_intr);
2119
2120void smd_disable_read_intr(smd_channel_t *ch)
2121{
2122 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002123 ch->half_ch->set_fBLOCKREADINTR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002124}
2125EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002126
2127int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2128{
2129 return -1;
2130}
2131
2132int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2133{
2134 return -1;
2135}
2136
2137int smd_cur_packet_size(smd_channel_t *ch)
2138{
Jack Pham1b236d12012-03-19 15:27:18 -07002139 if (!ch) {
2140 pr_err("%s: Invalid channel specified\n", __func__);
2141 return -ENODEV;
2142 }
2143
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002144 return ch->current_packet;
2145}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002146EXPORT_SYMBOL(smd_cur_packet_size);
2147
2148int smd_tiocmget(smd_channel_t *ch)
2149{
Jack Pham1b236d12012-03-19 15:27:18 -07002150 if (!ch) {
2151 pr_err("%s: Invalid channel specified\n", __func__);
2152 return -ENODEV;
2153 }
2154
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002155 return (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) |
2156 (ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) |
2157 (ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) |
2158 (ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) |
2159 (ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) |
2160 (ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002161}
2162EXPORT_SYMBOL(smd_tiocmget);
2163
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002164/* this api will be called while holding smd_lock */
2165int
2166smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002167{
Jack Pham1b236d12012-03-19 15:27:18 -07002168 if (!ch) {
2169 pr_err("%s: Invalid channel specified\n", __func__);
2170 return -ENODEV;
2171 }
2172
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002173 if (set & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002174 ch->half_ch->set_fDSR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002175
2176 if (set & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002177 ch->half_ch->set_fCTS(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002178
2179 if (clear & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002180 ch->half_ch->set_fDSR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002181
2182 if (clear & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002183 ch->half_ch->set_fCTS(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002184
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002185 ch->half_ch->set_fSTATE(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002186 barrier();
2187 ch->notify_other_cpu();
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002188
2189 return 0;
2190}
2191EXPORT_SYMBOL(smd_tiocmset_from_cb);
2192
2193int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2194{
2195 unsigned long flags;
2196
Jack Pham1b236d12012-03-19 15:27:18 -07002197 if (!ch) {
2198 pr_err("%s: Invalid channel specified\n", __func__);
2199 return -ENODEV;
2200 }
2201
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002202 spin_lock_irqsave(&smd_lock, flags);
2203 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002204 spin_unlock_irqrestore(&smd_lock, flags);
2205
2206 return 0;
2207}
2208EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002209
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002210int smd_is_pkt_avail(smd_channel_t *ch)
2211{
2212 if (!ch || !ch->is_pkt_ch)
2213 return -EINVAL;
2214
2215 if (ch->current_packet)
2216 return 1;
2217
2218 update_packet_state(ch);
2219
2220 return ch->current_packet ? 1 : 0;
2221}
2222EXPORT_SYMBOL(smd_is_pkt_avail);
2223
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002224
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002225/* -------------------------------------------------------------------------- */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002226
Jeff Hugobdc734d2012-03-26 16:05:39 -06002227/*
2228 * Shared Memory Range Check
2229 *
2230 * Takes a physical address and an offset and checks if the resulting physical
2231 * address would fit into one of the aux smem regions. If so, returns the
2232 * corresponding virtual address. Otherwise returns NULL. Expects the array
2233 * of smem regions to be in ascending physical address order.
2234 *
2235 * @base: physical base address to check
2236 * @offset: offset from the base to get the final address
2237 */
2238static void *smem_range_check(void *base, unsigned offset)
2239{
2240 int i;
2241 void *phys_addr;
2242 unsigned size;
2243
2244 for (i = 0; i < num_smem_areas; ++i) {
2245 phys_addr = smem_areas[i].phys_addr;
2246 size = smem_areas[i].size;
2247 if (base < phys_addr)
2248 return NULL;
2249 if (base > phys_addr + size)
2250 continue;
2251 if (base >= phys_addr && base + offset < phys_addr + size)
2252 return smem_areas[i].virt_addr + offset;
2253 }
2254
2255 return NULL;
2256}
2257
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002258/* smem_alloc returns the pointer to smem item if it is already allocated.
2259 * Otherwise, it returns NULL.
2260 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002261void *smem_alloc(unsigned id, unsigned size)
2262{
2263 return smem_find(id, size);
2264}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002265EXPORT_SYMBOL(smem_alloc);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002266
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002267/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
2268 * it allocates it and then returns the pointer to it.
2269 */
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302270void *smem_alloc2(unsigned id, unsigned size_in)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002271{
2272 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2273 struct smem_heap_entry *toc = shared->heap_toc;
2274 unsigned long flags;
2275 void *ret = NULL;
2276
2277 if (!shared->heap_info.initialized) {
2278 pr_err("%s: smem heap info not initialized\n", __func__);
2279 return NULL;
2280 }
2281
2282 if (id >= SMEM_NUM_ITEMS)
2283 return NULL;
2284
2285 size_in = ALIGN(size_in, 8);
2286 remote_spin_lock_irqsave(&remote_spinlock, flags);
2287 if (toc[id].allocated) {
2288 SMD_DBG("%s: %u already allocated\n", __func__, id);
2289 if (size_in != toc[id].size)
2290 pr_err("%s: wrong size %u (expected %u)\n",
2291 __func__, toc[id].size, size_in);
2292 else
2293 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2294 } else if (id > SMEM_FIXED_ITEM_LAST) {
2295 SMD_DBG("%s: allocating %u\n", __func__, id);
2296 if (shared->heap_info.heap_remaining >= size_in) {
2297 toc[id].offset = shared->heap_info.free_offset;
2298 toc[id].size = size_in;
2299 wmb();
2300 toc[id].allocated = 1;
2301
2302 shared->heap_info.free_offset += size_in;
2303 shared->heap_info.heap_remaining -= size_in;
2304 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2305 } else
2306 pr_err("%s: not enough memory %u (required %u)\n",
2307 __func__, shared->heap_info.heap_remaining,
2308 size_in);
2309 }
2310 wmb();
2311 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
2312 return ret;
2313}
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302314EXPORT_SYMBOL(smem_alloc2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002315
2316void *smem_get_entry(unsigned id, unsigned *size)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002317{
2318 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2319 struct smem_heap_entry *toc = shared->heap_toc;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302320 int use_spinlocks = spinlocks_initialized;
2321 void *ret = 0;
2322 unsigned long flags = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002323
2324 if (id >= SMEM_NUM_ITEMS)
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302325 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002326
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302327 if (use_spinlocks)
2328 remote_spin_lock_irqsave(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002329 /* toc is in device memory and cannot be speculatively accessed */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002330 if (toc[id].allocated) {
2331 *size = toc[id].size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002332 barrier();
Jeff Hugobdc734d2012-03-26 16:05:39 -06002333 if (!(toc[id].reserved & BASE_ADDR_MASK))
2334 ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
2335 else
2336 ret = smem_range_check(
2337 (void *)(toc[id].reserved & BASE_ADDR_MASK),
2338 toc[id].offset);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002339 } else {
2340 *size = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002341 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302342 if (use_spinlocks)
2343 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002344
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302345 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002346}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002347EXPORT_SYMBOL(smem_get_entry);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002348
2349void *smem_find(unsigned id, unsigned size_in)
2350{
2351 unsigned size;
2352 void *ptr;
2353
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002354 ptr = smem_get_entry(id, &size);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002355 if (!ptr)
2356 return 0;
2357
2358 size_in = ALIGN(size_in, 8);
2359 if (size_in != size) {
2360 pr_err("smem_find(%d, %d): wrong size %d\n",
2361 id, size_in, size);
2362 return 0;
2363 }
2364
2365 return ptr;
2366}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002367EXPORT_SYMBOL(smem_find);
2368
2369static int smsm_cb_init(void)
2370{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002371 struct smsm_state_info *state_info;
2372 int n;
2373 int ret = 0;
2374
2375 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2376 GFP_KERNEL);
2377
2378 if (!smsm_states) {
2379 pr_err("%s: SMSM init failed\n", __func__);
2380 return -ENOMEM;
2381 }
2382
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002383 smsm_cb_wq = create_singlethread_workqueue("smsm_cb_wq");
2384 if (!smsm_cb_wq) {
2385 pr_err("%s: smsm_cb_wq creation failed\n", __func__);
2386 kfree(smsm_states);
2387 return -EFAULT;
2388 }
2389
Eric Holmbergc8002902011-09-16 13:55:57 -06002390 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002391 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2392 state_info = &smsm_states[n];
2393 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
Eric Holmberge8a39322012-04-03 15:14:02 -06002394 state_info->intr_mask_set = 0x0;
2395 state_info->intr_mask_clear = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002396 INIT_LIST_HEAD(&state_info->callbacks);
2397 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002398 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002399
2400 return ret;
2401}
2402
2403static int smsm_init(void)
2404{
2405 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2406 int i;
2407 struct smsm_size_info_type *smsm_size_info;
2408
2409 i = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_ALLOC);
2410 if (i) {
2411 pr_err("%s: remote spinlock init failed %d\n", __func__, i);
2412 return i;
2413 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302414 spinlocks_initialized = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002415
2416 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
2417 sizeof(struct smsm_size_info_type));
2418 if (smsm_size_info) {
2419 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2420 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2421 }
2422
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002423 i = kfifo_alloc(&smsm_snapshot_fifo,
2424 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2425 GFP_KERNEL);
2426 if (i) {
2427 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2428 return i;
2429 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002430 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2431 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002432
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002433 if (!smsm_info.state) {
2434 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
2435 SMSM_NUM_ENTRIES *
2436 sizeof(uint32_t));
2437
2438 if (smsm_info.state) {
2439 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2440 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2441 __raw_writel(0, \
2442 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2443 }
2444 }
2445
2446 if (!smsm_info.intr_mask) {
2447 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
2448 SMSM_NUM_ENTRIES *
2449 SMSM_NUM_HOSTS *
2450 sizeof(uint32_t));
2451
Eric Holmberge8a39322012-04-03 15:14:02 -06002452 if (smsm_info.intr_mask) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002453 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
Eric Holmberge8a39322012-04-03 15:14:02 -06002454 __raw_writel(0x0,
2455 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2456
2457 /* Configure legacy modem bits */
2458 __raw_writel(LEGACY_MODEM_SMSM_MASK,
2459 SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
2460 SMSM_APPS));
2461 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002462 }
2463
2464 if (!smsm_info.intr_mux)
2465 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
2466 SMSM_NUM_INTR_MUX *
2467 sizeof(uint32_t));
2468
2469 i = smsm_cb_init();
2470 if (i)
2471 return i;
2472
2473 wmb();
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06002474 smsm_driver_state_notify(SMSM_INIT, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002475 return 0;
2476}
2477
2478void smsm_reset_modem(unsigned mode)
2479{
2480 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2481 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2482 } else if (mode == SMSM_MODEM_WAIT) {
2483 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2484 } else { /* reset_mode is SMSM_RESET or default */
2485 mode = SMSM_RESET;
2486 }
2487
2488 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2489}
2490EXPORT_SYMBOL(smsm_reset_modem);
2491
2492void smsm_reset_modem_cont(void)
2493{
2494 unsigned long flags;
2495 uint32_t state;
2496
2497 if (!smsm_info.state)
2498 return;
2499
2500 spin_lock_irqsave(&smem_lock, flags);
2501 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2502 & ~SMSM_MODEM_WAIT;
2503 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2504 wmb();
2505 spin_unlock_irqrestore(&smem_lock, flags);
2506}
2507EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002508
Eric Holmbergda31d042012-03-28 14:01:02 -06002509static void smsm_cb_snapshot(uint32_t use_wakelock)
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002510{
2511 int n;
2512 uint32_t new_state;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002513 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002514 int ret;
2515
2516 ret = kfifo_avail(&smsm_snapshot_fifo);
Eric Holmbergda31d042012-03-28 14:01:02 -06002517 if (ret < SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002518 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2519 return;
2520 }
2521
Eric Holmberg96b55f62012-04-03 19:10:46 -06002522 /*
2523 * To avoid a race condition with notify_smsm_cb_clients_worker, the
2524 * following sequence must be followed:
2525 * 1) increment snapshot count
2526 * 2) insert data into FIFO
2527 *
2528 * Potentially in parallel, the worker:
2529 * a) verifies >= 1 snapshots are in FIFO
2530 * b) processes snapshot
2531 * c) decrements reference count
2532 *
2533 * This order ensures that 1 will always occur before abc.
2534 */
Eric Holmbergda31d042012-03-28 14:01:02 -06002535 if (use_wakelock) {
2536 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2537 if (smsm_snapshot_count == 0) {
2538 SMx_POWER_INFO("SMSM snapshot wake lock\n");
2539 wake_lock(&smsm_snapshot_wakelock);
2540 }
2541 ++smsm_snapshot_count;
2542 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2543 }
Eric Holmberg96b55f62012-04-03 19:10:46 -06002544
2545 /* queue state entries */
2546 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2547 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2548
2549 ret = kfifo_in(&smsm_snapshot_fifo,
2550 &new_state, sizeof(new_state));
2551 if (ret != sizeof(new_state)) {
2552 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2553 goto restore_snapshot_count;
2554 }
2555 }
2556
2557 /* queue wakelock usage flag */
2558 ret = kfifo_in(&smsm_snapshot_fifo,
2559 &use_wakelock, sizeof(use_wakelock));
2560 if (ret != sizeof(use_wakelock)) {
2561 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2562 goto restore_snapshot_count;
2563 }
2564
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002565 queue_work(smsm_cb_wq, &smsm_cb_work);
Eric Holmberg96b55f62012-04-03 19:10:46 -06002566 return;
2567
2568restore_snapshot_count:
2569 if (use_wakelock) {
2570 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2571 if (smsm_snapshot_count) {
2572 --smsm_snapshot_count;
2573 if (smsm_snapshot_count == 0) {
2574 SMx_POWER_INFO("SMSM snapshot wake unlock\n");
2575 wake_unlock(&smsm_snapshot_wakelock);
2576 }
2577 } else {
2578 pr_err("%s: invalid snapshot count\n", __func__);
2579 }
2580 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2581 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002582}
2583
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002584static irqreturn_t smsm_irq_handler(int irq, void *data)
2585{
2586 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002587
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002588 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002589 uint32_t mux_val;
2590 static uint32_t prev_smem_q6_apps_smsm;
2591
2592 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2593 mux_val = __raw_readl(
2594 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2595 if (mux_val != prev_smem_q6_apps_smsm)
2596 prev_smem_q6_apps_smsm = mux_val;
2597 }
2598
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002599 spin_lock_irqsave(&smem_lock, flags);
Eric Holmbergda31d042012-03-28 14:01:02 -06002600 smsm_cb_snapshot(1);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002601 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002602 return IRQ_HANDLED;
2603 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002604
2605 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002606 if (!smsm_info.state) {
2607 SMSM_INFO("<SM NO STATE>\n");
2608 } else {
2609 unsigned old_apps, apps;
2610 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002611
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002612 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002613
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002614 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2615 if (apps & SMSM_RESET) {
2616 /* If we get an interrupt and the apps SMSM_RESET
2617 bit is already set, the modem is acking the
2618 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002619 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302620 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002621 /* Issue a fake irq to handle any
2622 * smd state changes during reset
2623 */
2624 smd_fake_irq_handler(0);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002625
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002626 /* queue modem restart notify chain */
2627 modem_queue_start_reset_notify();
2628
2629 } else if (modm & SMSM_RESET) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002630 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
Ram Somani8b9589f2012-04-03 12:07:18 +05302631 if (!disable_smsm_reset_handshake) {
2632 apps |= SMSM_RESET;
2633 flush_cache_all();
2634 outer_flush_all();
2635 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002636 modem_queue_start_reset_notify();
2637
2638 } else if (modm & SMSM_INIT) {
2639 if (!(apps & SMSM_INIT)) {
2640 apps |= SMSM_INIT;
2641 modem_queue_smsm_init_notify();
2642 }
2643
2644 if (modm & SMSM_SMDINIT)
2645 apps |= SMSM_SMDINIT;
2646 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2647 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2648 apps |= SMSM_RUN;
2649 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2650 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2651 modem_queue_start_reset_notify();
2652 }
2653
2654 if (old_apps != apps) {
2655 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2656 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2657 do_smd_probe();
2658 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2659 }
2660
Eric Holmbergda31d042012-03-28 14:01:02 -06002661 smsm_cb_snapshot(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002662 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002663 spin_unlock_irqrestore(&smem_lock, flags);
2664 return IRQ_HANDLED;
2665}
2666
Eric Holmberg98c6c642012-02-24 11:29:35 -07002667static irqreturn_t smsm_modem_irq_handler(int irq, void *data)
2668{
2669 SMx_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002670 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002671 return smsm_irq_handler(irq, data);
2672}
2673
2674static irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
2675{
2676 SMx_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002677 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002678 return smsm_irq_handler(irq, data);
2679}
2680
2681static irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
2682{
2683 SMx_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002684 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002685 return smsm_irq_handler(irq, data);
2686}
2687
2688static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
2689{
2690 SMx_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002691 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002692 return smsm_irq_handler(irq, data);
2693}
2694
Eric Holmberge8a39322012-04-03 15:14:02 -06002695/*
2696 * Changes the global interrupt mask. The set and clear masks are re-applied
2697 * every time the global interrupt mask is updated for callback registration
2698 * and de-registration.
2699 *
2700 * The clear mask is applied first, so if a bit is set to 1 in both the clear
2701 * mask and the set mask, the result will be that the interrupt is set.
2702 *
2703 * @smsm_entry SMSM entry to change
2704 * @clear_mask 1 = clear bit, 0 = no-op
2705 * @set_mask 1 = set bit, 0 = no-op
2706 *
2707 * @returns 0 for success, < 0 for error
2708 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002709int smsm_change_intr_mask(uint32_t smsm_entry,
2710 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002711{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002712 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002713 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002714
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002715 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2716 pr_err("smsm_change_state: Invalid entry %d\n",
2717 smsm_entry);
2718 return -EINVAL;
2719 }
2720
2721 if (!smsm_info.intr_mask) {
2722 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002723 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002724 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002725
2726 spin_lock_irqsave(&smem_lock, flags);
Eric Holmberge8a39322012-04-03 15:14:02 -06002727 smsm_states[smsm_entry].intr_mask_clear = clear_mask;
2728 smsm_states[smsm_entry].intr_mask_set = set_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002729
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002730 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2731 new_mask = (old_mask & ~clear_mask) | set_mask;
2732 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002733
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002734 wmb();
2735 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002736
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002737 return 0;
2738}
2739EXPORT_SYMBOL(smsm_change_intr_mask);
2740
2741int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2742{
2743 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2744 pr_err("smsm_change_state: Invalid entry %d\n",
2745 smsm_entry);
2746 return -EINVAL;
2747 }
2748
2749 if (!smsm_info.intr_mask) {
2750 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2751 return -EIO;
2752 }
2753
2754 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2755 return 0;
2756}
2757EXPORT_SYMBOL(smsm_get_intr_mask);
2758
2759int smsm_change_state(uint32_t smsm_entry,
2760 uint32_t clear_mask, uint32_t set_mask)
2761{
2762 unsigned long flags;
2763 uint32_t old_state, new_state;
2764
2765 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2766 pr_err("smsm_change_state: Invalid entry %d",
2767 smsm_entry);
2768 return -EINVAL;
2769 }
2770
2771 if (!smsm_info.state) {
2772 pr_err("smsm_change_state <SM NO STATE>\n");
2773 return -EIO;
2774 }
2775 spin_lock_irqsave(&smem_lock, flags);
2776
2777 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2778 new_state = (old_state & ~clear_mask) | set_mask;
2779 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2780 SMSM_DBG("smsm_change_state %x\n", new_state);
2781 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002782
2783 spin_unlock_irqrestore(&smem_lock, flags);
2784
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002785 return 0;
2786}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002787EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002788
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002789uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002790{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002791 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002792
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002793 /* needs interface change to return error code */
2794 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2795 pr_err("smsm_change_state: Invalid entry %d",
2796 smsm_entry);
2797 return 0;
2798 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002799
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002800 if (!smsm_info.state) {
2801 pr_err("smsm_get_state <SM NO STATE>\n");
2802 } else {
2803 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2804 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002805
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002806 return rv;
2807}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002808EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002809
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002810/**
2811 * Performs SMSM callback client notifiction.
2812 */
2813void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002814{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002815 struct smsm_state_cb_info *cb_info;
2816 struct smsm_state_info *state_info;
2817 int n;
2818 uint32_t new_state;
2819 uint32_t state_changes;
Eric Holmbergda31d042012-03-28 14:01:02 -06002820 uint32_t use_wakelock;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002821 int ret;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002822 unsigned long flags;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002823
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002824 if (!smd_initialized)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002825 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002826
Eric Holmbergda31d042012-03-28 14:01:02 -06002827 while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002828 mutex_lock(&smsm_lock);
2829 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2830 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002831
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002832 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2833 sizeof(new_state));
2834 if (ret != sizeof(new_state)) {
2835 pr_err("%s: snapshot underflow %d\n",
2836 __func__, ret);
2837 mutex_unlock(&smsm_lock);
2838 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002839 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002840
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002841 state_changes = state_info->last_value ^ new_state;
2842 if (state_changes) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07002843 SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n",
2844 n, state_info->last_value,
2845 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002846 list_for_each_entry(cb_info,
2847 &state_info->callbacks, cb_list) {
2848
2849 if (cb_info->mask & state_changes)
2850 cb_info->notify(cb_info->data,
2851 state_info->last_value,
2852 new_state);
2853 }
2854 state_info->last_value = new_state;
2855 }
2856 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002857
Eric Holmbergda31d042012-03-28 14:01:02 -06002858 /* read wakelock flag */
2859 ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock,
2860 sizeof(use_wakelock));
2861 if (ret != sizeof(use_wakelock)) {
2862 pr_err("%s: snapshot underflow %d\n",
2863 __func__, ret);
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002864 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002865 return;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002866 }
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002867 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002868
2869 if (use_wakelock) {
2870 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2871 if (smsm_snapshot_count) {
2872 --smsm_snapshot_count;
2873 if (smsm_snapshot_count == 0) {
2874 SMx_POWER_INFO("SMSM snapshot"
2875 " wake unlock\n");
2876 wake_unlock(&smsm_snapshot_wakelock);
2877 }
2878 } else {
2879 pr_err("%s: invalid snapshot count\n",
2880 __func__);
2881 }
2882 spin_unlock_irqrestore(&smsm_snapshot_count_lock,
2883 flags);
2884 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002885 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002886}
2887
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002888
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002889/**
2890 * Registers callback for SMSM state notifications when the specified
2891 * bits change.
2892 *
2893 * @smsm_entry Processor entry to deregister
2894 * @mask Bits to deregister (if result is 0, callback is removed)
2895 * @notify Notification function to deregister
2896 * @data Opaque data passed in to callback
2897 *
2898 * @returns Status code
2899 * <0 error code
2900 * 0 inserted new entry
2901 * 1 updated mask of existing entry
2902 */
2903int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
2904 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002905{
Eric Holmberge8a39322012-04-03 15:14:02 -06002906 struct smsm_state_info *state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002907 struct smsm_state_cb_info *cb_info;
2908 struct smsm_state_cb_info *cb_found = 0;
Eric Holmberge8a39322012-04-03 15:14:02 -06002909 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002910 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002911
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002912 if (smsm_entry >= SMSM_NUM_ENTRIES)
2913 return -EINVAL;
2914
Eric Holmbergc8002902011-09-16 13:55:57 -06002915 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002916
2917 if (!smsm_states) {
2918 /* smsm not yet initialized */
2919 ret = -ENODEV;
2920 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002921 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002922
Eric Holmberge8a39322012-04-03 15:14:02 -06002923 state = &smsm_states[smsm_entry];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002924 list_for_each_entry(cb_info,
Eric Holmberge8a39322012-04-03 15:14:02 -06002925 &state->callbacks, cb_list) {
2926 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002927 (cb_info->data == data)) {
2928 cb_info->mask |= mask;
2929 cb_found = cb_info;
2930 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002931 }
Eric Holmberge8a39322012-04-03 15:14:02 -06002932 new_mask |= cb_info->mask;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002933 }
2934
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002935 if (!cb_found) {
2936 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
2937 GFP_ATOMIC);
2938 if (!cb_info) {
2939 ret = -ENOMEM;
2940 goto cleanup;
2941 }
2942
2943 cb_info->mask = mask;
2944 cb_info->notify = notify;
2945 cb_info->data = data;
2946 INIT_LIST_HEAD(&cb_info->cb_list);
2947 list_add_tail(&cb_info->cb_list,
Eric Holmberge8a39322012-04-03 15:14:02 -06002948 &state->callbacks);
2949 new_mask |= mask;
2950 }
2951
2952 /* update interrupt notification mask */
2953 if (smsm_entry == SMSM_MODEM_STATE)
2954 new_mask |= LEGACY_MODEM_SMSM_MASK;
2955
2956 if (smsm_info.intr_mask) {
2957 unsigned long flags;
2958
2959 spin_lock_irqsave(&smem_lock, flags);
2960 new_mask = (new_mask & ~state->intr_mask_clear)
2961 | state->intr_mask_set;
2962 __raw_writel(new_mask,
2963 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2964 wmb();
2965 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002966 }
2967
2968cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06002969 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002970 return ret;
2971}
2972EXPORT_SYMBOL(smsm_state_cb_register);
2973
2974
2975/**
2976 * Deregisters for SMSM state notifications for the specified bits.
2977 *
2978 * @smsm_entry Processor entry to deregister
2979 * @mask Bits to deregister (if result is 0, callback is removed)
2980 * @notify Notification function to deregister
2981 * @data Opaque data passed in to callback
2982 *
2983 * @returns Status code
2984 * <0 error code
2985 * 0 not found
2986 * 1 updated mask
2987 * 2 removed callback
2988 */
2989int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
2990 void (*notify)(void *, uint32_t, uint32_t), void *data)
2991{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002992 struct smsm_state_cb_info *cb_info;
Eric Holmberge8a39322012-04-03 15:14:02 -06002993 struct smsm_state_cb_info *cb_tmp;
2994 struct smsm_state_info *state;
2995 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002996 int ret = 0;
2997
2998 if (smsm_entry >= SMSM_NUM_ENTRIES)
2999 return -EINVAL;
3000
Eric Holmbergc8002902011-09-16 13:55:57 -06003001 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003002
3003 if (!smsm_states) {
3004 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06003005 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003006 return -ENODEV;
3007 }
3008
Eric Holmberge8a39322012-04-03 15:14:02 -06003009 state = &smsm_states[smsm_entry];
3010 list_for_each_entry_safe(cb_info, cb_tmp,
3011 &state->callbacks, cb_list) {
3012 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003013 (cb_info->data == data)) {
3014 cb_info->mask &= ~mask;
3015 ret = 1;
3016 if (!cb_info->mask) {
3017 /* no mask bits set, remove callback */
3018 list_del(&cb_info->cb_list);
3019 kfree(cb_info);
3020 ret = 2;
Eric Holmberge8a39322012-04-03 15:14:02 -06003021 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003022 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003023 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003024 new_mask |= cb_info->mask;
3025 }
3026
3027 /* update interrupt notification mask */
3028 if (smsm_entry == SMSM_MODEM_STATE)
3029 new_mask |= LEGACY_MODEM_SMSM_MASK;
3030
3031 if (smsm_info.intr_mask) {
3032 unsigned long flags;
3033
3034 spin_lock_irqsave(&smem_lock, flags);
3035 new_mask = (new_mask & ~state->intr_mask_clear)
3036 | state->intr_mask_set;
3037 __raw_writel(new_mask,
3038 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3039 wmb();
3040 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003041 }
3042
Eric Holmbergc8002902011-09-16 13:55:57 -06003043 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003044 return ret;
3045}
3046EXPORT_SYMBOL(smsm_state_cb_deregister);
3047
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003048int smsm_driver_state_notifier_register(struct notifier_block *nb)
3049{
3050 int ret;
3051 if (!nb)
3052 return -EINVAL;
3053 mutex_lock(&smsm_driver_state_notifier_lock);
3054 ret = raw_notifier_chain_register(&smsm_driver_state_notifier_list, nb);
3055 mutex_unlock(&smsm_driver_state_notifier_lock);
3056 return ret;
3057}
3058EXPORT_SYMBOL(smsm_driver_state_notifier_register);
3059
3060int smsm_driver_state_notifier_unregister(struct notifier_block *nb)
3061{
3062 int ret;
3063 if (!nb)
3064 return -EINVAL;
3065 mutex_lock(&smsm_driver_state_notifier_lock);
3066 ret = raw_notifier_chain_unregister(&smsm_driver_state_notifier_list,
3067 nb);
3068 mutex_unlock(&smsm_driver_state_notifier_lock);
3069 return ret;
3070}
3071EXPORT_SYMBOL(smsm_driver_state_notifier_unregister);
3072
3073static void smsm_driver_state_notify(uint32_t state, void *data)
3074{
3075 mutex_lock(&smsm_driver_state_notifier_lock);
3076 raw_notifier_call_chain(&smsm_driver_state_notifier_list,
3077 state, data);
3078 mutex_unlock(&smsm_driver_state_notifier_lock);
3079}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003080
3081int smd_core_init(void)
3082{
3083 int r;
3084 unsigned long flags = IRQF_TRIGGER_RISING;
3085 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003086
Brian Swetland37521a32009-07-01 18:30:47 -07003087 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003088 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003089 if (r < 0)
3090 return r;
3091 r = enable_irq_wake(INT_A9_M2A_0);
3092 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003093 pr_err("smd_core_init: "
3094 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003095
Eric Holmberg98c6c642012-02-24 11:29:35 -07003096 r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003097 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003098 if (r < 0) {
3099 free_irq(INT_A9_M2A_0, 0);
3100 return r;
3101 }
3102 r = enable_irq_wake(INT_A9_M2A_5);
3103 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003104 pr_err("smd_core_init: "
3105 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003106
Brian Swetland37521a32009-07-01 18:30:47 -07003107#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003108#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
3109 flags |= IRQF_SHARED;
3110#endif
Brian Swetland37521a32009-07-01 18:30:47 -07003111 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003112 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07003113 if (r < 0) {
3114 free_irq(INT_A9_M2A_0, 0);
3115 free_irq(INT_A9_M2A_5, 0);
3116 return r;
3117 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003118
Eric Holmberg98c6c642012-02-24 11:29:35 -07003119 r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler,
3120 flags, "smsm_dev", smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003121 if (r < 0) {
3122 free_irq(INT_A9_M2A_0, 0);
3123 free_irq(INT_A9_M2A_5, 0);
3124 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
3125 return r;
3126 }
3127
3128 r = enable_irq_wake(INT_ADSP_A11);
3129 if (r < 0)
3130 pr_err("smd_core_init: "
3131 "enable_irq_wake failed for INT_ADSP_A11\n");
3132
3133#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
3134 r = enable_irq_wake(INT_ADSP_A11_SMSM);
3135 if (r < 0)
3136 pr_err("smd_core_init: enable_irq_wake "
3137 "failed for INT_ADSP_A11_SMSM\n");
3138#endif
3139 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07003140#endif
3141
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003142#if defined(CONFIG_DSPS)
3143 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
3144 flags, "smd_dev", smd_dsps_irq_handler);
3145 if (r < 0) {
3146 free_irq(INT_A9_M2A_0, 0);
3147 free_irq(INT_A9_M2A_5, 0);
3148 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003149 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003150 return r;
3151 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003152
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003153 r = enable_irq_wake(INT_DSPS_A11);
3154 if (r < 0)
3155 pr_err("smd_core_init: "
3156 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003157#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003158
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003159#if defined(CONFIG_WCNSS)
3160 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
3161 flags, "smd_dev", smd_wcnss_irq_handler);
3162 if (r < 0) {
3163 free_irq(INT_A9_M2A_0, 0);
3164 free_irq(INT_A9_M2A_5, 0);
3165 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003166 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003167 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3168 return r;
3169 }
3170
3171 r = enable_irq_wake(INT_WCNSS_A11);
3172 if (r < 0)
3173 pr_err("smd_core_init: "
3174 "enable_irq_wake failed for INT_WCNSS_A11\n");
3175
Eric Holmberg98c6c642012-02-24 11:29:35 -07003176 r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler,
3177 flags, "smsm_dev", smsm_wcnss_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003178 if (r < 0) {
3179 free_irq(INT_A9_M2A_0, 0);
3180 free_irq(INT_A9_M2A_5, 0);
3181 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003182 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003183 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3184 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
3185 return r;
3186 }
3187
3188 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
3189 if (r < 0)
3190 pr_err("smd_core_init: "
3191 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
3192#endif
3193
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003194#if defined(CONFIG_DSPS_SMSM)
Eric Holmberg98c6c642012-02-24 11:29:35 -07003195 r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler,
3196 flags, "smsm_dev", smsm_dsps_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003197 if (r < 0) {
3198 free_irq(INT_A9_M2A_0, 0);
3199 free_irq(INT_A9_M2A_5, 0);
3200 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003201 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003202 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3203 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003204 free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003205 return r;
3206 }
3207
3208 r = enable_irq_wake(INT_DSPS_A11_SMSM);
3209 if (r < 0)
3210 pr_err("smd_core_init: "
3211 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
3212#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003213 SMD_INFO("smd_core_init() done\n");
3214
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003215 return 0;
3216}
3217
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303218static int intr_init(struct interrupt_config_item *private_irq,
3219 struct smd_irq_config *platform_irq,
3220 struct platform_device *pdev
3221 )
3222{
3223 int irq_id;
3224 int ret;
3225 int ret_wake;
3226
3227 private_irq->out_bit_pos = platform_irq->out_bit_pos;
3228 private_irq->out_offset = platform_irq->out_offset;
3229 private_irq->out_base = platform_irq->out_base;
3230
3231 irq_id = platform_get_irq_byname(
3232 pdev,
3233 platform_irq->irq_name
3234 );
3235 SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__,
3236 platform_irq->irq_name, irq_id);
3237 ret = request_irq(irq_id,
3238 private_irq->irq_handler,
3239 platform_irq->flags,
3240 platform_irq->device_name,
3241 (void *)platform_irq->dev_id
3242 );
3243 if (ret < 0) {
3244 platform_irq->irq_id = ret;
3245 } else {
3246 platform_irq->irq_id = irq_id;
3247 ret_wake = enable_irq_wake(irq_id);
3248 if (ret_wake < 0) {
3249 pr_err("smd: enable_irq_wake failed on %s",
3250 platform_irq->irq_name);
3251 }
3252 }
3253
3254 return ret;
3255}
3256
Jeff Hugobdc734d2012-03-26 16:05:39 -06003257int sort_cmp_func(const void *a, const void *b)
3258{
3259 struct smem_area *left = (struct smem_area *)(a);
3260 struct smem_area *right = (struct smem_area *)(b);
3261
3262 return left->phys_addr - right->phys_addr;
3263}
3264
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303265int smd_core_platform_init(struct platform_device *pdev)
3266{
3267 int i;
3268 int ret;
3269 uint32_t num_ss;
3270 struct smd_platform *smd_platform_data;
3271 struct smd_subsystem_config *smd_ss_config_list;
3272 struct smd_subsystem_config *cfg;
3273 int err_ret = 0;
Jeff Hugobdc734d2012-03-26 16:05:39 -06003274 struct smd_smem_regions *smd_smem_areas;
3275 int smem_idx = 0;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303276
3277 smd_platform_data = pdev->dev.platform_data;
3278 num_ss = smd_platform_data->num_ss_configs;
3279 smd_ss_config_list = smd_platform_data->smd_ss_configs;
3280
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06003281 if (smd_platform_data->smd_ssr_config)
3282 disable_smsm_reset_handshake = smd_platform_data->
3283 smd_ssr_config->disable_smsm_reset_handshake;
3284
Jeff Hugobdc734d2012-03-26 16:05:39 -06003285 smd_smem_areas = smd_platform_data->smd_smem_areas;
3286 if (smd_smem_areas) {
3287 num_smem_areas = smd_platform_data->num_smem_areas;
3288 smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
3289 GFP_KERNEL);
3290 if (!smem_areas) {
3291 pr_err("%s: smem_areas kmalloc failed\n", __func__);
3292 err_ret = -ENOMEM;
3293 goto smem_areas_alloc_fail;
3294 }
3295
3296 for (smem_idx = 0; smem_idx < num_smem_areas; ++smem_idx) {
3297 smem_areas[smem_idx].phys_addr =
3298 smd_smem_areas[smem_idx].phys_addr;
3299 smem_areas[smem_idx].size =
3300 smd_smem_areas[smem_idx].size;
3301 smem_areas[smem_idx].virt_addr = ioremap_nocache(
3302 (unsigned long)(smem_areas[smem_idx].phys_addr),
3303 smem_areas[smem_idx].size);
3304 if (!smem_areas[smem_idx].virt_addr) {
3305 pr_err("%s: ioremap_nocache() of addr:%p"
3306 " size: %x\n", __func__,
3307 smem_areas[smem_idx].phys_addr,
3308 smem_areas[smem_idx].size);
3309 err_ret = -ENOMEM;
3310 ++smem_idx;
3311 goto smem_failed;
3312 }
3313 }
3314 sort(smem_areas, num_smem_areas,
3315 sizeof(struct smem_area),
3316 sort_cmp_func, NULL);
3317 }
3318
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303319 for (i = 0; i < num_ss; i++) {
3320 cfg = &smd_ss_config_list[i];
3321
3322 ret = intr_init(
3323 &private_intr_config[cfg->irq_config_id].smd,
3324 &cfg->smd_int,
3325 pdev
3326 );
3327
3328 if (ret < 0) {
3329 err_ret = ret;
3330 pr_err("smd: register irq failed on %s\n",
3331 cfg->smd_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003332 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303333 }
3334
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003335 /* only init smsm structs if this edge supports smsm */
3336 if (cfg->smsm_int.irq_id)
3337 ret = intr_init(
3338 &private_intr_config[cfg->irq_config_id].smsm,
3339 &cfg->smsm_int,
3340 pdev
3341 );
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303342
3343 if (ret < 0) {
3344 err_ret = ret;
3345 pr_err("smd: register irq failed on %s\n",
3346 cfg->smsm_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003347 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303348 }
Eric Holmberg17992c12012-02-29 12:54:44 -07003349
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003350 if (cfg->subsys_name)
3351 strlcpy(edge_to_pids[cfg->edge].subsys_name,
Eric Holmberg17992c12012-02-29 12:54:44 -07003352 cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303353 }
3354
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303355
3356 SMD_INFO("smd_core_platform_init() done\n");
3357 return 0;
3358
Jeff Hugobdc734d2012-03-26 16:05:39 -06003359intr_failed:
3360 pr_err("smd: deregistering IRQs\n");
3361 for (i = 0; i < num_ss; ++i) {
3362 cfg = &smd_ss_config_list[i];
3363
3364 if (cfg->smd_int.irq_id >= 0)
3365 free_irq(cfg->smd_int.irq_id,
3366 (void *)cfg->smd_int.dev_id
3367 );
3368 if (cfg->smsm_int.irq_id >= 0)
3369 free_irq(cfg->smsm_int.irq_id,
3370 (void *)cfg->smsm_int.dev_id
3371 );
3372 }
3373smem_failed:
3374 for (smem_idx = smem_idx - 1; smem_idx >= 0; --smem_idx)
3375 iounmap(smem_areas[smem_idx].virt_addr);
3376 kfree(smem_areas);
3377smem_areas_alloc_fail:
3378 return err_ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303379}
3380
Gregory Bean4416e9e2010-07-28 10:22:12 -07003381static int __devinit msm_smd_probe(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003382{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303383 int ret;
Daniel Walker0aec66d2010-03-18 12:31:08 -07003384
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303385 SMD_INFO("smd probe\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003386 INIT_WORK(&probe_work, smd_channel_probe_worker);
3387
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003388 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3389 if (IS_ERR(channel_close_wq)) {
3390 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3391 return -ENOMEM;
3392 }
3393
3394 if (smsm_init()) {
3395 pr_err("smsm_init() failed\n");
3396 return -1;
3397 }
3398
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303399 if (pdev) {
3400 if (pdev->dev.of_node) {
3401 pr_err("SMD: Device tree not currently supported\n");
3402 return -ENODEV;
3403 } else if (pdev->dev.platform_data) {
3404 ret = smd_core_platform_init(pdev);
3405 if (ret) {
3406 pr_err(
3407 "SMD: smd_core_platform_init() failed\n");
3408 return -ENODEV;
3409 }
3410 } else {
3411 ret = smd_core_init();
3412 if (ret) {
3413 pr_err("smd_core_init() failed\n");
3414 return -ENODEV;
3415 }
3416 }
3417 } else {
3418 pr_err("SMD: PDEV not found\n");
3419 return -ENODEV;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003420 }
3421
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003422 smd_initialized = 1;
3423
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003424 smd_alloc_loopback_channel();
Eric Holmbergc33d4ab2011-10-24 10:28:25 -06003425 smsm_irq_handler(0, 0);
3426 tasklet_schedule(&smd_fake_irq_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003427
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003428 return 0;
3429}
3430
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003431static int restart_notifier_cb(struct notifier_block *this,
3432 unsigned long code,
3433 void *data);
3434
3435static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003436 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3437 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
3438 {SMD_WCNSS, "riva", .nb.notifier_call = restart_notifier_cb},
3439 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003440 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003441};
3442
3443static int restart_notifier_cb(struct notifier_block *this,
3444 unsigned long code,
3445 void *data)
3446{
3447 if (code == SUBSYS_AFTER_SHUTDOWN) {
3448 struct restart_notifier_block *notifier;
3449
3450 notifier = container_of(this,
3451 struct restart_notifier_block, nb);
3452 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3453 __func__, notifier->processor,
3454 notifier->name);
3455
3456 smd_channel_reset(notifier->processor);
3457 }
3458
3459 return NOTIFY_DONE;
3460}
3461
3462static __init int modem_restart_late_init(void)
3463{
3464 int i;
3465 void *handle;
3466 struct restart_notifier_block *nb;
3467
3468 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
3469 nb = &restart_notifiers[i];
3470 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
3471 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
3472 __func__, nb->name, handle);
3473 }
3474 return 0;
3475}
3476late_initcall(modem_restart_late_init);
3477
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003478static struct platform_driver msm_smd_driver = {
3479 .probe = msm_smd_probe,
3480 .driver = {
3481 .name = MODULE_NAME,
3482 .owner = THIS_MODULE,
3483 },
3484};
3485
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003486int __init msm_smd_init(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003487{
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003488 static bool registered;
3489
3490 if (registered)
3491 return 0;
3492
3493 registered = true;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003494 return platform_driver_register(&msm_smd_driver);
3495}
3496
3497module_init(msm_smd_init);
3498
3499MODULE_DESCRIPTION("MSM Shared Memory Core");
3500MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
3501MODULE_LICENSE("GPL");