blob: c1e41188ac8275d42f930fd98966081318b068b4 [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07004 * Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f942012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Jeff Hugobdc734d2012-03-26 16:05:39 -060037#include <linux/sort.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070038#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070040#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053042#include <mach/socinfo.h>
Ram Somani8b9589f2012-04-03 12:07:18 +053043#include <asm/cacheflush.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070044
45#include "smd_private.h"
46#include "proc_comm.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047#include "modem_notifier.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
Jeff Hugo56b933a2011-09-28 14:42:05 -060050 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060051 || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064)
Brian Swetland37521a32009-07-01 18:30:47 -070052#define CONFIG_QDSP6 1
53#endif
54
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060055#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \
56 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057#define CONFIG_DSPS 1
58#endif
59
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060060#if defined(CONFIG_ARCH_MSM8960) \
61 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060063#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070065
66#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067#define SMEM_VERSION 0x000B
68#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070069#define SMSM_SNAPSHOT_CNT 64
Eric Holmbergda31d042012-03-28 14:01:02 -060070#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72uint32_t SMSM_NUM_ENTRIES = 8;
73uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070074
Eric Holmberge8a39322012-04-03 15:14:02 -060075/* Legacy SMSM interrupt notifications */
76#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT \
77 | SMSM_RUN | SMSM_SYSTEM_DOWNLOAD)
78
Brian Swetland2eb44eb2008-09-29 16:00:48 -070079enum {
80 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081 MSM_SMSM_DEBUG = 1U << 1,
82 MSM_SMD_INFO = 1U << 2,
83 MSM_SMSM_INFO = 1U << 3,
Eric Holmberg98c6c642012-02-24 11:29:35 -070084 MSM_SMx_POWER_INFO = 1U << 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085};
86
87struct smsm_shared_info {
88 uint32_t *state;
89 uint32_t *intr_mask;
90 uint32_t *intr_mux;
91};
92
93static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f942012-03-19 10:04:22 -060094static struct kfifo smsm_snapshot_fifo;
95static struct wake_lock smsm_snapshot_wakelock;
96static int smsm_snapshot_count;
97static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098
99struct smsm_size_info_type {
100 uint32_t num_hosts;
101 uint32_t num_entries;
102 uint32_t reserved0;
103 uint32_t reserved1;
104};
105
106struct smsm_state_cb_info {
107 struct list_head cb_list;
108 uint32_t mask;
109 void *data;
110 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
111};
112
113struct smsm_state_info {
114 struct list_head callbacks;
115 uint32_t last_value;
Eric Holmberge8a39322012-04-03 15:14:02 -0600116 uint32_t intr_mask_set;
117 uint32_t intr_mask_clear;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118};
119
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530120struct interrupt_config_item {
121 /* must be initialized */
122 irqreturn_t (*irq_handler)(int req, void *data);
123 /* outgoing interrupt config (set from platform data) */
124 uint32_t out_bit_pos;
125 void __iomem *out_base;
126 uint32_t out_offset;
127};
128
129struct interrupt_config {
130 struct interrupt_config_item smd;
131 struct interrupt_config_item smsm;
132};
133
134static irqreturn_t smd_modem_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700135static irqreturn_t smsm_modem_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530136static irqreturn_t smd_dsp_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700137static irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530138static irqreturn_t smd_dsps_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700139static irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530140static irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700141static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600142static irqreturn_t smd_rpm_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530143static irqreturn_t smsm_irq_handler(int irq, void *data);
144
145static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
146 [SMD_MODEM] = {
147 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700148 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530149 },
150 [SMD_Q6] = {
151 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700152 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530153 },
154 [SMD_DSPS] = {
155 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700156 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530157 },
158 [SMD_WCNSS] = {
159 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700160 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530161 },
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600162 [SMD_RPM] = {
163 .smd.irq_handler = smd_rpm_irq_handler,
164 .smsm.irq_handler = NULL, /* does not support smsm */
165 },
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530166};
Jeff Hugobdc734d2012-03-26 16:05:39 -0600167
168struct smem_area {
169 void *phys_addr;
170 unsigned size;
171 void __iomem *virt_addr;
172};
173static uint32_t num_smem_areas;
174static struct smem_area *smem_areas;
175static void *smem_range_check(void *base, unsigned offset);
176
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700177struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530178
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
180#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
181 entry * SMSM_NUM_HOSTS + host)
182#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
183
184/* Internal definitions which are not exported in some targets */
185enum {
186 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700187};
188
189static int msm_smd_debug_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700190module_param_named(debug_mask, msm_smd_debug_mask,
191 int, S_IRUGO | S_IWUSR | S_IWGRP);
192
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700193#if defined(CONFIG_MSM_SMD_DEBUG)
194#define SMD_DBG(x...) do { \
195 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
196 printk(KERN_DEBUG x); \
197 } while (0)
198
199#define SMSM_DBG(x...) do { \
200 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
201 printk(KERN_DEBUG x); \
202 } while (0)
203
204#define SMD_INFO(x...) do { \
205 if (msm_smd_debug_mask & MSM_SMD_INFO) \
206 printk(KERN_INFO x); \
207 } while (0)
208
209#define SMSM_INFO(x...) do { \
210 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
211 printk(KERN_INFO x); \
212 } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700213#define SMx_POWER_INFO(x...) do { \
214 if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \
215 printk(KERN_INFO x); \
216 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217#else
218#define SMD_DBG(x...) do { } while (0)
219#define SMSM_DBG(x...) do { } while (0)
220#define SMD_INFO(x...) do { } while (0)
221#define SMSM_INFO(x...) do { } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700222#define SMx_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700223#endif
224
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700225static unsigned last_heap_free = 0xffffffff;
226
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227static inline void smd_write_intr(unsigned int val,
228 const void __iomem *addr);
229
230#if defined(CONFIG_ARCH_MSM7X30)
231#define MSM_TRIG_A2M_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530232 (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700233#define MSM_TRIG_A2Q6_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530234 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235#define MSM_TRIG_A2M_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530236 (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237#define MSM_TRIG_A2Q6_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530238 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600240#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241#define MSM_TRIG_A2WCNSS_SMD_INT
242#define MSM_TRIG_A2WCNSS_SMSM_INT
243#elif defined(CONFIG_ARCH_MSM8X60)
244#define MSM_TRIG_A2M_SMD_INT \
245 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
246#define MSM_TRIG_A2Q6_SMD_INT \
247 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
248#define MSM_TRIG_A2M_SMSM_INT \
249 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
250#define MSM_TRIG_A2Q6_SMSM_INT \
251 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
252#define MSM_TRIG_A2DSPS_SMD_INT \
253 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600254#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255#define MSM_TRIG_A2WCNSS_SMD_INT
256#define MSM_TRIG_A2WCNSS_SMSM_INT
Jeff Hugo56b933a2011-09-28 14:42:05 -0600257#elif defined(CONFIG_ARCH_MSM9615)
258#define MSM_TRIG_A2M_SMD_INT \
259 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
260#define MSM_TRIG_A2Q6_SMD_INT \
261 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
262#define MSM_TRIG_A2M_SMSM_INT \
263 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
264#define MSM_TRIG_A2Q6_SMSM_INT \
265 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
266#define MSM_TRIG_A2DSPS_SMD_INT
267#define MSM_TRIG_A2DSPS_SMSM_INT
268#define MSM_TRIG_A2WCNSS_SMD_INT
269#define MSM_TRIG_A2WCNSS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700270#elif defined(CONFIG_ARCH_FSM9XXX)
271#define MSM_TRIG_A2Q6_SMD_INT \
272 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
273#define MSM_TRIG_A2Q6_SMSM_INT \
274 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
275#define MSM_TRIG_A2M_SMD_INT \
276 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
277#define MSM_TRIG_A2M_SMSM_INT \
278 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
279#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600280#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700281#define MSM_TRIG_A2WCNSS_SMD_INT
282#define MSM_TRIG_A2WCNSS_SMSM_INT
Eric Holmberg73d45462012-02-28 10:41:31 -0700283#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700284#define MSM_TRIG_A2M_SMD_INT \
285 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700286#define MSM_TRIG_A2Q6_SMD_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287#define MSM_TRIG_A2M_SMSM_INT \
288 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700289#define MSM_TRIG_A2Q6_SMSM_INT
290#define MSM_TRIG_A2DSPS_SMD_INT
291#define MSM_TRIG_A2DSPS_SMSM_INT
292#define MSM_TRIG_A2WCNSS_SMD_INT
293#define MSM_TRIG_A2WCNSS_SMSM_INT
294#elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A)
295#define MSM_TRIG_A2M_SMD_INT \
296 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
297#define MSM_TRIG_A2Q6_SMD_INT
298#define MSM_TRIG_A2M_SMSM_INT \
299 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
300#define MSM_TRIG_A2Q6_SMSM_INT
301#define MSM_TRIG_A2DSPS_SMD_INT
302#define MSM_TRIG_A2DSPS_SMSM_INT
303#define MSM_TRIG_A2WCNSS_SMD_INT
304#define MSM_TRIG_A2WCNSS_SMSM_INT
305#else /* use platform device / device tree configuration */
306#define MSM_TRIG_A2M_SMD_INT
307#define MSM_TRIG_A2Q6_SMD_INT
308#define MSM_TRIG_A2M_SMSM_INT
309#define MSM_TRIG_A2Q6_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700310#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600311#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312#define MSM_TRIG_A2WCNSS_SMD_INT
313#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700314#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700315
Jeff Hugoee40b152012-02-09 17:39:47 -0700316/*
317 * stub out legacy macros if they are not being used so that the legacy
318 * code compiles even though it is not used
319 *
320 * these definitions should not be used in active code and will cause
321 * an early failure
322 */
323#ifndef INT_A9_M2A_0
324#define INT_A9_M2A_0 -1
325#endif
326#ifndef INT_A9_M2A_5
327#define INT_A9_M2A_5 -1
328#endif
329#ifndef INT_ADSP_A11
330#define INT_ADSP_A11 -1
331#endif
332#ifndef INT_ADSP_A11_SMSM
333#define INT_ADSP_A11_SMSM -1
334#endif
335#ifndef INT_DSPS_A11
336#define INT_DSPS_A11 -1
337#endif
338#ifndef INT_DSPS_A11_SMSM
339#define INT_DSPS_A11_SMSM -1
340#endif
341#ifndef INT_WCNSS_A11
342#define INT_WCNSS_A11 -1
343#endif
344#ifndef INT_WCNSS_A11_SMSM
345#define INT_WCNSS_A11_SMSM -1
346#endif
347
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700348#define SMD_LOOPBACK_CID 100
349
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600350#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
351static remote_spinlock_t remote_spinlock;
352
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700353static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergda31d042012-03-28 14:01:02 -0600355static void smsm_cb_snapshot(uint32_t use_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356
357static void notify_smsm_cb_clients_worker(struct work_struct *work);
358static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600359static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360static struct smsm_state_info *smsm_states;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +0530361static int spinlocks_initialized;
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -0600362static RAW_NOTIFIER_HEAD(smsm_driver_state_notifier_list);
363static DEFINE_MUTEX(smsm_driver_state_notifier_lock);
364static void smsm_driver_state_notify(uint32_t state, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700365
366static inline void smd_write_intr(unsigned int val,
367 const void __iomem *addr)
368{
369 wmb();
370 __raw_writel(val, addr);
371}
372
373#ifdef CONFIG_WCNSS
374static inline void wakeup_v1_riva(void)
375{
376 /*
377 * workaround hack for RIVA v1 hardware bug
378 * trigger GPIO 40 to wake up RIVA from power collaspe
379 * not to be sent to customers
380 */
Jeff Hugoafb8c4a2011-10-27 15:57:27 -0600381 if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 1) {
382 __raw_writel(0x0, MSM_TLMM_BASE + 0x1284);
383 __raw_writel(0x2, MSM_TLMM_BASE + 0x1284);
384 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700385 /* end workaround */
386}
387#else
388static inline void wakeup_v1_riva(void) {}
389#endif
390
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530391static inline void notify_modem_smd(void)
392{
393 static const struct interrupt_config_item *intr
394 = &private_intr_config[SMD_MODEM].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700395 if (intr->out_base) {
396 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530397 smd_write_intr(intr->out_bit_pos,
398 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700399 } else {
400 ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530401 MSM_TRIG_A2M_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700402 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530403}
404
405static inline void notify_dsp_smd(void)
406{
407 static const struct interrupt_config_item *intr
408 = &private_intr_config[SMD_Q6].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700409 if (intr->out_base) {
410 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530411 smd_write_intr(intr->out_bit_pos,
412 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700413 } else {
414 ++interrupt_stats[SMD_Q6].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530415 MSM_TRIG_A2Q6_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700416 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530417}
418
419static inline void notify_dsps_smd(void)
420{
421 static const struct interrupt_config_item *intr
422 = &private_intr_config[SMD_DSPS].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700423 if (intr->out_base) {
424 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530425 smd_write_intr(intr->out_bit_pos,
426 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700427 } else {
428 ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530429 MSM_TRIG_A2DSPS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700430 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530431}
432
433static inline void notify_wcnss_smd(void)
434{
435 static const struct interrupt_config_item *intr
436 = &private_intr_config[SMD_WCNSS].smd;
437 wakeup_v1_riva();
438
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700439 if (intr->out_base) {
440 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530441 smd_write_intr(intr->out_bit_pos,
442 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700443 } else {
444 ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530445 MSM_TRIG_A2WCNSS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700446 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530447}
448
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600449static inline void notify_rpm_smd(void)
450{
451 static const struct interrupt_config_item *intr
452 = &private_intr_config[SMD_RPM].smd;
453
454 if (intr->out_base) {
455 ++interrupt_stats[SMD_RPM].smd_out_config_count;
456 smd_write_intr(intr->out_bit_pos,
457 intr->out_base + intr->out_offset);
458 }
459}
460
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530461static inline void notify_modem_smsm(void)
462{
463 static const struct interrupt_config_item *intr
464 = &private_intr_config[SMD_MODEM].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700465 if (intr->out_base) {
466 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530467 smd_write_intr(intr->out_bit_pos,
468 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700469 } else {
470 ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530471 MSM_TRIG_A2M_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700472 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530473}
474
475static inline void notify_dsp_smsm(void)
476{
477 static const struct interrupt_config_item *intr
478 = &private_intr_config[SMD_Q6].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700479 if (intr->out_base) {
480 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530481 smd_write_intr(intr->out_bit_pos,
482 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700483 } else {
484 ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530485 MSM_TRIG_A2Q6_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700486 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530487}
488
489static inline void notify_dsps_smsm(void)
490{
491 static const struct interrupt_config_item *intr
492 = &private_intr_config[SMD_DSPS].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700493 if (intr->out_base) {
494 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530495 smd_write_intr(intr->out_bit_pos,
496 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700497 } else {
498 ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530499 MSM_TRIG_A2DSPS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700500 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530501}
502
503static inline void notify_wcnss_smsm(void)
504{
505 static const struct interrupt_config_item *intr
506 = &private_intr_config[SMD_WCNSS].smsm;
507 wakeup_v1_riva();
508
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700509 if (intr->out_base) {
510 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530511 smd_write_intr(intr->out_bit_pos,
512 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700513 } else {
514 ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530515 MSM_TRIG_A2WCNSS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700516 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530517}
518
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700519static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
520{
521 /* older protocol don't use smsm_intr_mask,
522 but still communicates with modem */
523 if (!smsm_info.intr_mask ||
524 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
525 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530526 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700527
528 if (smsm_info.intr_mask &&
529 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
530 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531 uint32_t mux_val;
532
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600533 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700534 mux_val = __raw_readl(
535 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
536 mux_val++;
537 __raw_writel(mux_val,
538 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
539 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530540 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700541 }
542
543 if (smsm_info.intr_mask &&
544 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
545 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530546 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700547 }
548
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600549 if (smsm_info.intr_mask &&
550 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
551 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530552 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600553 }
554
Eric Holmbergda31d042012-03-28 14:01:02 -0600555 /*
556 * Notify local SMSM callback clients without wakelock since this
557 * code is used by power management during power-down/-up sequencing
558 * on DEM-based targets. Grabbing a wakelock in this case will
559 * abort the power-down sequencing.
560 */
561 smsm_cb_snapshot(0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700562}
563
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700564void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700565{
566 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700567 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700568
569 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
570 if (x != 0) {
571 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 SMD_INFO("smem: DIAG '%s'\n", x);
573 }
574
575 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
576 if (x != 0) {
577 x[size - 1] = 0;
578 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700579 }
580}
581
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700582
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700583static void handle_modem_crash(void)
584{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700585 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700586 smd_diag();
587
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700588 /* hard reboot if possible FIXME
589 if (msm_reset_hook)
590 msm_reset_hook();
591 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700592
593 /* in this case the modem or watchdog should reboot us */
594 for (;;)
595 ;
596}
597
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700598int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700599{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700600 /* if the modem's not ready yet, we have to hope for the best */
601 if (!smsm_info.state)
602 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700603
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700604 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700605 handle_modem_crash();
606 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700607 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700608 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700609}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700610EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700611
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700612/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700613 * irq handler and code that mutates the channel
614 * list or fiddles with channel state
615 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700617DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700618
619/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700620 * operations to avoid races while creating or
621 * destroying smd_channel structures
622 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700623static DEFINE_MUTEX(smd_creation_mutex);
624
625static int smd_initialized;
626
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700627struct smd_shared_v1 {
628 struct smd_half_channel ch0;
629 unsigned char data0[SMD_BUF_SIZE];
630 struct smd_half_channel ch1;
631 unsigned char data1[SMD_BUF_SIZE];
632};
633
634struct smd_shared_v2 {
635 struct smd_half_channel ch0;
636 struct smd_half_channel ch1;
637};
638
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600639struct smd_shared_v2_word_access {
640 struct smd_half_channel_word_access ch0;
641 struct smd_half_channel_word_access ch1;
642};
643
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644struct smd_channel {
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600645 volatile void *send; /* some variant of smd_half_channel */
646 volatile void *recv; /* some variant of smd_half_channel */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700647 unsigned char *send_data;
648 unsigned char *recv_data;
649 unsigned fifo_size;
650 unsigned fifo_mask;
651 struct list_head ch_list;
652
653 unsigned current_packet;
654 unsigned n;
655 void *priv;
656 void (*notify)(void *priv, unsigned flags);
657
658 int (*read)(smd_channel_t *ch, void *data, int len, int user_buf);
659 int (*write)(smd_channel_t *ch, const void *data, int len,
660 int user_buf);
661 int (*read_avail)(smd_channel_t *ch);
662 int (*write_avail)(smd_channel_t *ch);
663 int (*read_from_cb)(smd_channel_t *ch, void *data, int len,
664 int user_buf);
665
666 void (*update_state)(smd_channel_t *ch);
667 unsigned last_state;
668 void (*notify_other_cpu)(void);
669
670 char name[20];
671 struct platform_device pdev;
672 unsigned type;
673
674 int pending_pkt_sz;
675
676 char is_pkt_ch;
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600677
678 /*
679 * private internal functions to access *send and *recv.
680 * never to be exported outside of smd
681 */
682 struct smd_half_channel_access *half_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700683};
684
685struct edge_to_pid {
686 uint32_t local_pid;
687 uint32_t remote_pid;
Eric Holmberg17992c12012-02-29 12:54:44 -0700688 char subsys_name[SMD_MAX_CH_NAME_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700689};
690
691/**
692 * Maps edge type to local and remote processor ID's.
693 */
694static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700695 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
696 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "q6"},
697 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
698 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
699 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
700 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
701 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
702 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
703 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
704 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
705 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
706 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
707 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
708 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
709 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600710 [SMD_APPS_RPM] = {SMD_APPS, SMD_RPM},
711 [SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
712 [SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
713 [SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700714};
715
716struct restart_notifier_block {
717 unsigned processor;
718 char *name;
719 struct notifier_block nb;
720};
721
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -0600722static int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700723static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
724
725static LIST_HEAD(smd_ch_closed_list);
726static LIST_HEAD(smd_ch_closing_list);
727static LIST_HEAD(smd_ch_to_close_list);
728static LIST_HEAD(smd_ch_list_modem);
729static LIST_HEAD(smd_ch_list_dsp);
730static LIST_HEAD(smd_ch_list_dsps);
731static LIST_HEAD(smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600732static LIST_HEAD(smd_ch_list_rpm);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700733
734static unsigned char smd_ch_allocated[64];
735static struct work_struct probe_work;
736
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700737static void finalize_channel_close_fn(struct work_struct *work);
738static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
739static struct workqueue_struct *channel_close_wq;
740
741static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
742
743/* on smp systems, the probe might get called from multiple cores,
744 hence use a lock */
745static DEFINE_MUTEX(smd_probe_lock);
746
747static void smd_channel_probe_worker(struct work_struct *work)
748{
749 struct smd_alloc_elm *shared;
750 unsigned n;
751 uint32_t type;
752
753 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
754
755 if (!shared) {
756 pr_err("%s: allocation table not initialized\n", __func__);
757 return;
758 }
759
760 mutex_lock(&smd_probe_lock);
761 for (n = 0; n < 64; n++) {
762 if (smd_ch_allocated[n])
763 continue;
764
765 /* channel should be allocated only if APPS
766 processor is involved */
767 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600768 if (type >= ARRAY_SIZE(edge_to_pids) ||
769 edge_to_pids[type].local_pid != SMD_APPS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700770 continue;
771 if (!shared[n].ref_count)
772 continue;
773 if (!shared[n].name[0])
774 continue;
775
776 if (!smd_alloc_channel(&shared[n]))
777 smd_ch_allocated[n] = 1;
778 else
779 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
780 }
781 mutex_unlock(&smd_probe_lock);
782}
783
784/**
785 * Lookup processor ID and determine if it belongs to the proved edge
786 * type.
787 *
788 * @shared2: Pointer to v2 shared channel structure
789 * @type: Edge type
790 * @pid: Processor ID of processor on edge
791 * @local_ch: Channel that belongs to processor @pid
792 * @remote_ch: Other side of edge contained @pid
793 *
794 * Returns 0 for not on edge, 1 for found on edge
795 */
796static int pid_is_on_edge(struct smd_shared_v2 *shared2,
797 uint32_t type, uint32_t pid,
798 struct smd_half_channel **local_ch,
799 struct smd_half_channel **remote_ch
800 )
801{
802 int ret = 0;
803 struct edge_to_pid *edge;
804
805 *local_ch = 0;
806 *remote_ch = 0;
807
808 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
809 return 0;
810
811 edge = &edge_to_pids[type];
812 if (edge->local_pid != edge->remote_pid) {
813 if (pid == edge->local_pid) {
814 *local_ch = &shared2->ch0;
815 *remote_ch = &shared2->ch1;
816 ret = 1;
817 } else if (pid == edge->remote_pid) {
818 *local_ch = &shared2->ch1;
819 *remote_ch = &shared2->ch0;
820 ret = 1;
821 }
822 }
823
824 return ret;
825}
826
Eric Holmberg17992c12012-02-29 12:54:44 -0700827/*
828 * Returns a pointer to the subsystem name or NULL if no
829 * subsystem name is available.
830 *
831 * @type - Edge definition
832 */
833const char *smd_edge_to_subsystem(uint32_t type)
834{
835 const char *subsys = NULL;
836
837 if (type < ARRAY_SIZE(edge_to_pids)) {
838 subsys = edge_to_pids[type].subsys_name;
839 if (subsys[0] == 0x0)
840 subsys = NULL;
841 }
842 return subsys;
843}
844EXPORT_SYMBOL(smd_edge_to_subsystem);
845
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700846/*
847 * Returns a pointer to the subsystem name given the
848 * remote processor ID.
849 *
850 * @pid Remote processor ID
851 * @returns Pointer to subsystem name or NULL if not found
852 */
853const char *smd_pid_to_subsystem(uint32_t pid)
854{
855 const char *subsys = NULL;
856 int i;
857
858 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
859 if (pid == edge_to_pids[i].remote_pid &&
860 edge_to_pids[i].subsys_name[0] != 0x0
861 ) {
862 subsys = edge_to_pids[i].subsys_name;
863 break;
864 }
865 }
866
867 return subsys;
868}
869EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700870
Eric Holmberg2a563c32011-10-05 14:51:43 -0600871static void smd_reset_edge(struct smd_half_channel *ch, unsigned new_state)
872{
873 if (ch->state != SMD_SS_CLOSED) {
874 ch->state = new_state;
875 ch->fDSR = 0;
876 ch->fCTS = 0;
877 ch->fCD = 0;
878 ch->fSTATE = 1;
879 }
880}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700881
882static void smd_channel_reset_state(struct smd_alloc_elm *shared,
883 unsigned new_state, unsigned pid)
884{
885 unsigned n;
886 struct smd_shared_v2 *shared2;
887 uint32_t type;
888 struct smd_half_channel *local_ch;
889 struct smd_half_channel *remote_ch;
890
891 for (n = 0; n < SMD_CHANNELS; n++) {
892 if (!shared[n].ref_count)
893 continue;
894 if (!shared[n].name[0])
895 continue;
896
897 type = SMD_CHANNEL_TYPE(shared[n].type);
898 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n, sizeof(*shared2));
899 if (!shared2)
900 continue;
901
Eric Holmberg2a563c32011-10-05 14:51:43 -0600902 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch))
903 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700904
Eric Holmberg2a563c32011-10-05 14:51:43 -0600905 /*
906 * ModemFW is in the same subsystem as ModemSW, but has
907 * separate SMD edges that need to be reset.
908 */
909 if (pid == SMSM_MODEM &&
910 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
911 &local_ch, &remote_ch))
912 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700913 }
914}
915
916
917void smd_channel_reset(uint32_t restart_pid)
918{
919 struct smd_alloc_elm *shared;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700920 unsigned long flags;
921
922 SMD_DBG("%s: starting reset\n", __func__);
923 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
924 if (!shared) {
925 pr_err("%s: allocation table not initialized\n", __func__);
926 return;
927 }
928
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600929 /* release any held spinlocks */
930 remote_spin_release(&remote_spinlock, restart_pid);
931 remote_spin_release_all(restart_pid);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700932
933 /* reset SMSM entry */
934 if (smsm_info.state) {
935 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
936
Eric Holmberg351a63c2011-12-02 17:49:43 -0700937 /* restart SMSM init handshake */
938 if (restart_pid == SMSM_MODEM) {
939 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -0700940 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
941 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -0700942 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700943
944 /* notify SMSM processors */
945 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -0700946 notify_modem_smsm();
947 notify_dsp_smsm();
948 notify_dsps_smsm();
949 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700950 }
951
952 /* change all remote states to CLOSING */
953 mutex_lock(&smd_probe_lock);
954 spin_lock_irqsave(&smd_lock, flags);
955 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
956 spin_unlock_irqrestore(&smd_lock, flags);
957 mutex_unlock(&smd_probe_lock);
958
959 /* notify SMD processors */
960 mb();
961 smd_fake_irq_handler(0);
962 notify_modem_smd();
963 notify_dsp_smd();
964 notify_dsps_smd();
965 notify_wcnss_smd();
966
967 /* change all remote states to CLOSED */
968 mutex_lock(&smd_probe_lock);
969 spin_lock_irqsave(&smd_lock, flags);
970 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
971 spin_unlock_irqrestore(&smd_lock, flags);
972 mutex_unlock(&smd_probe_lock);
973
974 /* notify SMD processors */
975 mb();
976 smd_fake_irq_handler(0);
977 notify_modem_smd();
978 notify_dsp_smd();
979 notify_dsps_smd();
980 notify_wcnss_smd();
981
982 SMD_DBG("%s: finished reset\n", __func__);
983}
984
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700985/* how many bytes are available for reading */
986static int smd_stream_read_avail(struct smd_channel *ch)
987{
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600988 return (ch->half_ch->get_head(ch->recv) -
989 ch->half_ch->get_tail(ch->recv)) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700990}
991
992/* how many bytes we are free to write */
993static int smd_stream_write_avail(struct smd_channel *ch)
994{
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600995 return ch->fifo_mask - ((ch->half_ch->get_head(ch->send) -
996 ch->half_ch->get_tail(ch->send)) & ch->fifo_mask);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700997}
998
999static int smd_packet_read_avail(struct smd_channel *ch)
1000{
1001 if (ch->current_packet) {
1002 int n = smd_stream_read_avail(ch);
1003 if (n > ch->current_packet)
1004 n = ch->current_packet;
1005 return n;
1006 } else {
1007 return 0;
1008 }
1009}
1010
1011static int smd_packet_write_avail(struct smd_channel *ch)
1012{
1013 int n = smd_stream_write_avail(ch);
1014 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
1015}
1016
1017static int ch_is_open(struct smd_channel *ch)
1018{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001019 return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED ||
1020 ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING)
1021 && (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001022}
1023
1024/* provide a pointer and length to readable data in the fifo */
1025static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
1026{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001027 unsigned head = ch->half_ch->get_head(ch->recv);
1028 unsigned tail = ch->half_ch->get_tail(ch->recv);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001029 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001030
1031 if (tail <= head)
1032 return head - tail;
1033 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001034 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001035}
1036
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001037static int read_intr_blocked(struct smd_channel *ch)
1038{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001039 return ch->half_ch->get_fBLOCKREADINTR(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001040}
1041
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001042/* advance the fifo read pointer after data from ch_read_buffer is consumed */
1043static void ch_read_done(struct smd_channel *ch, unsigned count)
1044{
1045 BUG_ON(count > smd_stream_read_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001046 ch->half_ch->set_tail(ch->recv,
1047 (ch->half_ch->get_tail(ch->recv) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001048 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001049 ch->half_ch->set_fTAIL(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001050}
1051
1052/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -07001053 * by smd_*_read() and update_packet_state()
1054 * will read-and-discard if the _data pointer is null
1055 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001056static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001057{
1058 void *ptr;
1059 unsigned n;
1060 unsigned char *data = _data;
1061 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001062 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001063
1064 while (len > 0) {
1065 n = ch_read_buffer(ch, &ptr);
1066 if (n == 0)
1067 break;
1068
1069 if (n > len)
1070 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001071 if (_data) {
1072 if (user_buf) {
1073 r = copy_to_user(data, ptr, n);
1074 if (r > 0) {
1075 pr_err("%s: "
1076 "copy_to_user could not copy "
1077 "%i bytes.\n",
1078 __func__,
1079 r);
1080 }
1081 } else
1082 memcpy(data, ptr, n);
1083 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001084
1085 data += n;
1086 len -= n;
1087 ch_read_done(ch, n);
1088 }
1089
1090 return orig_len - len;
1091}
1092
1093static void update_stream_state(struct smd_channel *ch)
1094{
1095 /* streams have no special state requiring updating */
1096}
1097
1098static void update_packet_state(struct smd_channel *ch)
1099{
1100 unsigned hdr[5];
1101 int r;
1102
1103 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001104 while (ch->current_packet == 0) {
1105 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001106
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001107 /* don't bother unless we can get the full header */
1108 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1109 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001110
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001111 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1112 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001113
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001114 ch->current_packet = hdr[0];
1115 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001116}
1117
1118/* provide a pointer and length to next free space in the fifo */
1119static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1120{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001121 unsigned head = ch->half_ch->get_head(ch->send);
1122 unsigned tail = ch->half_ch->get_tail(ch->send);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001123 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001124
1125 if (head < tail) {
1126 return tail - head - 1;
1127 } else {
1128 if (tail == 0)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001129 return ch->fifo_size - head - 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001130 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001131 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001132 }
1133}
1134
1135/* advace the fifo write pointer after freespace
1136 * from ch_write_buffer is filled
1137 */
1138static void ch_write_done(struct smd_channel *ch, unsigned count)
1139{
1140 BUG_ON(count > smd_stream_write_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001141 ch->half_ch->set_head(ch->send,
1142 (ch->half_ch->get_head(ch->send) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001143 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001144 ch->half_ch->set_fHEAD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001145}
1146
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001147static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001148{
1149 if (n == SMD_SS_OPENED) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001150 ch->half_ch->set_fDSR(ch->send, 1);
1151 ch->half_ch->set_fCTS(ch->send, 1);
1152 ch->half_ch->set_fCD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001153 } else {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001154 ch->half_ch->set_fDSR(ch->send, 0);
1155 ch->half_ch->set_fCTS(ch->send, 0);
1156 ch->half_ch->set_fCD(ch->send, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001157 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001158 ch->half_ch->set_state(ch->send, n);
1159 ch->half_ch->set_fSTATE(ch->send, 1);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001160 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001161}
1162
1163static void do_smd_probe(void)
1164{
1165 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1166 if (shared->heap_info.free_offset != last_heap_free) {
1167 last_heap_free = shared->heap_info.free_offset;
1168 schedule_work(&probe_work);
1169 }
1170}
1171
1172static void smd_state_change(struct smd_channel *ch,
1173 unsigned last, unsigned next)
1174{
1175 ch->last_state = next;
1176
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001177 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001178
1179 switch (next) {
1180 case SMD_SS_OPENING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001181 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING ||
1182 ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
1183 ch->half_ch->set_tail(ch->recv, 0);
1184 ch->half_ch->set_head(ch->send, 0);
1185 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001186 ch_set_state(ch, SMD_SS_OPENING);
1187 }
1188 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001189 case SMD_SS_OPENED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001190 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001191 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001192 ch->notify(ch->priv, SMD_EVENT_OPEN);
1193 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001194 break;
1195 case SMD_SS_FLUSHING:
1196 case SMD_SS_RESET:
1197 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198 break;
1199 case SMD_SS_CLOSED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001200 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001201 ch_set_state(ch, SMD_SS_CLOSING);
1202 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001203 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001204 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1205 }
1206 break;
1207 case SMD_SS_CLOSING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001208 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001209 list_move(&ch->ch_list,
1210 &smd_ch_to_close_list);
1211 queue_work(channel_close_wq,
1212 &finalize_channel_close_work);
1213 }
1214 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001215 }
1216}
1217
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001218static void handle_smd_irq_closing_list(void)
1219{
1220 unsigned long flags;
1221 struct smd_channel *ch;
1222 struct smd_channel *index;
1223 unsigned tmp;
1224
1225 spin_lock_irqsave(&smd_lock, flags);
1226 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001227 if (ch->half_ch->get_fSTATE(ch->recv))
1228 ch->half_ch->set_fSTATE(ch->recv, 0);
1229 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001230 if (tmp != ch->last_state)
1231 smd_state_change(ch, ch->last_state, tmp);
1232 }
1233 spin_unlock_irqrestore(&smd_lock, flags);
1234}
1235
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001236static void handle_smd_irq(struct list_head *list, void (*notify)(void))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001237{
1238 unsigned long flags;
1239 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001240 unsigned ch_flags;
1241 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001242 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001243
1244 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001245 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001246 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001247 ch_flags = 0;
1248 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001249 if (ch->half_ch->get_fHEAD(ch->recv)) {
1250 ch->half_ch->set_fHEAD(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001251 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001252 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001253 if (ch->half_ch->get_fTAIL(ch->recv)) {
1254 ch->half_ch->set_fTAIL(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001255 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001256 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001257 if (ch->half_ch->get_fSTATE(ch->recv)) {
1258 ch->half_ch->set_fSTATE(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001259 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001260 }
1261 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001262 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001263 if (tmp != ch->last_state) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07001264 SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
1265 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001266 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001267 state_change = 1;
1268 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001269 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001270 ch->update_state(ch);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001271 SMx_POWER_INFO("SMD ch%d '%s' Data event r%d/w%d\n",
1272 ch->n, ch->name,
1273 ch->read_avail(ch),
1274 ch->fifo_size - ch->write_avail(ch));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001275 ch->notify(ch->priv, SMD_EVENT_DATA);
1276 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001277 if (ch_flags & 0x4 && !state_change) {
1278 SMx_POWER_INFO("SMD ch%d '%s' State update\n",
1279 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001280 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001281 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001282 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001283 spin_unlock_irqrestore(&smd_lock, flags);
1284 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001285}
1286
Brian Swetland37521a32009-07-01 18:30:47 -07001287static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001288{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001289 SMx_POWER_INFO("SMD Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001290 ++interrupt_stats[SMD_MODEM].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001291 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001292 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001293 return IRQ_HANDLED;
1294}
1295
1296static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
1297{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001298 SMx_POWER_INFO("SMD Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001299 ++interrupt_stats[SMD_Q6].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001300 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001301 handle_smd_irq_closing_list();
1302 return IRQ_HANDLED;
1303}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001304
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001305static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
1306{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001307 SMx_POWER_INFO("SMD Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001308 ++interrupt_stats[SMD_DSPS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001309 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1310 handle_smd_irq_closing_list();
1311 return IRQ_HANDLED;
1312}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001313
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001314static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1315{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001316 SMx_POWER_INFO("SMD Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001317 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001318 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1319 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001320 return IRQ_HANDLED;
1321}
1322
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001323static irqreturn_t smd_rpm_irq_handler(int irq, void *data)
1324{
1325 SMx_POWER_INFO("SMD Int RPM->Apps\n");
1326 ++interrupt_stats[SMD_RPM].smd_in_count;
1327 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
1328 handle_smd_irq_closing_list();
1329 return IRQ_HANDLED;
1330}
1331
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001332static void smd_fake_irq_handler(unsigned long arg)
1333{
Brian Swetland37521a32009-07-01 18:30:47 -07001334 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1335 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001336 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1337 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001338 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001339 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001340}
1341
1342static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1343
Brian Swetland37521a32009-07-01 18:30:47 -07001344static inline int smd_need_int(struct smd_channel *ch)
1345{
1346 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001347 if (ch->half_ch->get_fHEAD(ch->recv) ||
1348 ch->half_ch->get_fTAIL(ch->recv) ||
1349 ch->half_ch->get_fSTATE(ch->recv))
Brian Swetland37521a32009-07-01 18:30:47 -07001350 return 1;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001351 if (ch->half_ch->get_state(ch->recv) != ch->last_state)
Brian Swetland37521a32009-07-01 18:30:47 -07001352 return 1;
1353 }
1354 return 0;
1355}
1356
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001357void smd_sleep_exit(void)
1358{
1359 unsigned long flags;
1360 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001361 int need_int = 0;
1362
1363 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001364 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1365 if (smd_need_int(ch)) {
1366 need_int = 1;
1367 break;
1368 }
1369 }
1370 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1371 if (smd_need_int(ch)) {
1372 need_int = 1;
1373 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001374 }
1375 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001376 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1377 if (smd_need_int(ch)) {
1378 need_int = 1;
1379 break;
1380 }
1381 }
1382 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1383 if (smd_need_int(ch)) {
1384 need_int = 1;
1385 break;
1386 }
1387 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001388 spin_unlock_irqrestore(&smd_lock, flags);
1389 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001390
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001391 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001392 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001393 tasklet_schedule(&smd_fake_irq_tasklet);
1394 }
1395}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001396EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001397
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001398static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001399{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001400 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1401 return 0;
1402 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001403 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001404
1405 /* for cases where xfer type is 0 */
1406 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001407 return 0;
1408
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001409 /* for cases where xfer type is 0 */
1410 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1411 return 0;
1412
1413 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001414 return 1;
1415 else
1416 return 0;
1417}
1418
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001419static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1420 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001421{
1422 void *ptr;
1423 const unsigned char *buf = _data;
1424 unsigned xfer;
1425 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001426 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001427
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001428 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001429 if (len < 0)
1430 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001431 else if (len == 0)
1432 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001433
1434 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001435 if (!ch_is_open(ch)) {
1436 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001437 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001438 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001439 if (xfer > len)
1440 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001441 if (user_buf) {
1442 r = copy_from_user(ptr, buf, xfer);
1443 if (r > 0) {
1444 pr_err("%s: "
1445 "copy_from_user could not copy %i "
1446 "bytes.\n",
1447 __func__,
1448 r);
1449 }
1450 } else
1451 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001452 ch_write_done(ch, xfer);
1453 len -= xfer;
1454 buf += xfer;
1455 if (len == 0)
1456 break;
1457 }
1458
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001459 if (orig_len - len)
1460 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001461
1462 return orig_len - len;
1463}
1464
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001465static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1466 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001467{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001468 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001469 unsigned hdr[5];
1470
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001471 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001472 if (len < 0)
1473 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001474 else if (len == 0)
1475 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001476
1477 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1478 return -ENOMEM;
1479
1480 hdr[0] = len;
1481 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1482
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001483
1484 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1485 if (ret < 0 || ret != sizeof(hdr)) {
1486 SMD_DBG("%s failed to write pkt header: "
1487 "%d returned\n", __func__, ret);
1488 return -1;
1489 }
1490
1491
1492 ret = smd_stream_write(ch, _data, len, user_buf);
1493 if (ret < 0 || ret != len) {
1494 SMD_DBG("%s failed to write pkt data: "
1495 "%d returned\n", __func__, ret);
1496 return ret;
1497 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001498
1499 return len;
1500}
1501
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001502static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001503{
1504 int r;
1505
1506 if (len < 0)
1507 return -EINVAL;
1508
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001509 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001510 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001511 if (!read_intr_blocked(ch))
1512 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001513
1514 return r;
1515}
1516
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001517static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001518{
1519 unsigned long flags;
1520 int r;
1521
1522 if (len < 0)
1523 return -EINVAL;
1524
1525 if (len > ch->current_packet)
1526 len = ch->current_packet;
1527
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001528 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001529 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001530 if (!read_intr_blocked(ch))
1531 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001532
1533 spin_lock_irqsave(&smd_lock, flags);
1534 ch->current_packet -= r;
1535 update_packet_state(ch);
1536 spin_unlock_irqrestore(&smd_lock, flags);
1537
1538 return r;
1539}
1540
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001541static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1542 int user_buf)
1543{
1544 int r;
1545
1546 if (len < 0)
1547 return -EINVAL;
1548
1549 if (len > ch->current_packet)
1550 len = ch->current_packet;
1551
1552 r = ch_read(ch, data, len, user_buf);
1553 if (r > 0)
1554 if (!read_intr_blocked(ch))
1555 ch->notify_other_cpu();
1556
1557 ch->current_packet -= r;
1558 update_packet_state(ch);
1559
1560 return r;
1561}
1562
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301563#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001564static int smd_alloc_v2(struct smd_channel *ch)
1565{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001566 void *buffer;
1567 unsigned buffer_sz;
1568
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001569 if (is_word_access_ch(ch->type)) {
1570 struct smd_shared_v2_word_access *shared2;
1571 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1572 sizeof(*shared2));
1573 if (!shared2) {
1574 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1575 return -EINVAL;
1576 }
1577 ch->send = &shared2->ch0;
1578 ch->recv = &shared2->ch1;
1579 } else {
1580 struct smd_shared_v2 *shared2;
1581 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1582 sizeof(*shared2));
1583 if (!shared2) {
1584 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1585 return -EINVAL;
1586 }
1587 ch->send = &shared2->ch0;
1588 ch->recv = &shared2->ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001589 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001590 ch->half_ch = get_half_ch_funcs(ch->type);
1591
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001592 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1593 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301594 SMD_INFO("smem_get_entry failed\n");
1595 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001596 }
1597
1598 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301599 if (buffer_sz & (buffer_sz - 1)) {
1600 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1601 return -EINVAL;
1602 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001603 buffer_sz /= 2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001604 ch->send_data = buffer;
1605 ch->recv_data = buffer + buffer_sz;
1606 ch->fifo_size = buffer_sz;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001607
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001608 return 0;
1609}
1610
1611static int smd_alloc_v1(struct smd_channel *ch)
1612{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301613 return -EINVAL;
1614}
1615
1616#else /* define v1 for older targets */
1617static int smd_alloc_v2(struct smd_channel *ch)
1618{
1619 return -EINVAL;
1620}
1621
1622static int smd_alloc_v1(struct smd_channel *ch)
1623{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001624 struct smd_shared_v1 *shared1;
1625 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1626 if (!shared1) {
1627 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301628 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001629 }
1630 ch->send = &shared1->ch0;
1631 ch->recv = &shared1->ch1;
1632 ch->send_data = shared1->data0;
1633 ch->recv_data = shared1->data1;
1634 ch->fifo_size = SMD_BUF_SIZE;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001635 ch->half_ch = get_half_ch_funcs(ch->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001636 return 0;
1637}
1638
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301639#endif
1640
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001641static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001642{
1643 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001644
1645 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1646 if (ch == 0) {
1647 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001648 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001649 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001650 ch->n = alloc_elm->cid;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001651 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001652
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001653 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001654 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001655 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001656 }
1657
1658 ch->fifo_mask = ch->fifo_size - 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001659
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001660 /* probe_worker guarentees ch->type will be a valid type */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001661 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001662 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001663 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001664 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001665 else if (ch->type == SMD_APPS_DSPS)
1666 ch->notify_other_cpu = notify_dsps_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001667 else if (ch->type == SMD_APPS_WCNSS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001668 ch->notify_other_cpu = notify_wcnss_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001669 else if (ch->type == SMD_APPS_RPM)
1670 ch->notify_other_cpu = notify_rpm_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001671
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001672 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001673 ch->read = smd_packet_read;
1674 ch->write = smd_packet_write;
1675 ch->read_avail = smd_packet_read_avail;
1676 ch->write_avail = smd_packet_write_avail;
1677 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001678 ch->read_from_cb = smd_packet_read_from_cb;
1679 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001680 } else {
1681 ch->read = smd_stream_read;
1682 ch->write = smd_stream_write;
1683 ch->read_avail = smd_stream_read_avail;
1684 ch->write_avail = smd_stream_write_avail;
1685 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001686 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001687 }
1688
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001689 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1690 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001691
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001692 ch->pdev.name = ch->name;
1693 ch->pdev.id = ch->type;
1694
1695 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1696 ch->name, ch->n);
1697
1698 mutex_lock(&smd_creation_mutex);
1699 list_add(&ch->ch_list, &smd_ch_closed_list);
1700 mutex_unlock(&smd_creation_mutex);
1701
1702 platform_device_register(&ch->pdev);
1703 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1704 /* create a platform driver to be used by smd_tty driver
1705 * so that it can access the loopback port
1706 */
1707 loopback_tty_pdev.id = ch->type;
1708 platform_device_register(&loopback_tty_pdev);
1709 }
1710 return 0;
1711}
1712
1713static inline void notify_loopback_smd(void)
1714{
1715 unsigned long flags;
1716 struct smd_channel *ch;
1717
1718 spin_lock_irqsave(&smd_lock, flags);
1719 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1720 ch->notify(ch->priv, SMD_EVENT_DATA);
1721 }
1722 spin_unlock_irqrestore(&smd_lock, flags);
1723}
1724
1725static int smd_alloc_loopback_channel(void)
1726{
1727 static struct smd_half_channel smd_loopback_ctl;
1728 static char smd_loopback_data[SMD_BUF_SIZE];
1729 struct smd_channel *ch;
1730
1731 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1732 if (ch == 0) {
1733 pr_err("%s: out of memory\n", __func__);
1734 return -1;
1735 }
1736 ch->n = SMD_LOOPBACK_CID;
1737
1738 ch->send = &smd_loopback_ctl;
1739 ch->recv = &smd_loopback_ctl;
1740 ch->send_data = smd_loopback_data;
1741 ch->recv_data = smd_loopback_data;
1742 ch->fifo_size = SMD_BUF_SIZE;
1743
1744 ch->fifo_mask = ch->fifo_size - 1;
1745 ch->type = SMD_LOOPBACK_TYPE;
1746 ch->notify_other_cpu = notify_loopback_smd;
1747
1748 ch->read = smd_stream_read;
1749 ch->write = smd_stream_write;
1750 ch->read_avail = smd_stream_read_avail;
1751 ch->write_avail = smd_stream_write_avail;
1752 ch->update_state = update_stream_state;
1753 ch->read_from_cb = smd_stream_read;
1754
1755 memset(ch->name, 0, 20);
1756 memcpy(ch->name, "local_loopback", 14);
1757
1758 ch->pdev.name = ch->name;
1759 ch->pdev.id = ch->type;
1760
1761 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001762
1763 mutex_lock(&smd_creation_mutex);
1764 list_add(&ch->ch_list, &smd_ch_closed_list);
1765 mutex_unlock(&smd_creation_mutex);
1766
1767 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001768 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001769}
1770
1771static void do_nothing_notify(void *priv, unsigned flags)
1772{
1773}
1774
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001775static void finalize_channel_close_fn(struct work_struct *work)
1776{
1777 unsigned long flags;
1778 struct smd_channel *ch;
1779 struct smd_channel *index;
1780
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001781 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001782 spin_lock_irqsave(&smd_lock, flags);
1783 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1784 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001785 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001786 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1787 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001788 }
1789 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001790 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001791}
1792
1793struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001794{
1795 struct smd_channel *ch;
1796
1797 mutex_lock(&smd_creation_mutex);
1798 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001799 if (!strcmp(name, ch->name) &&
1800 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001801 list_del(&ch->ch_list);
1802 mutex_unlock(&smd_creation_mutex);
1803 return ch;
1804 }
1805 }
1806 mutex_unlock(&smd_creation_mutex);
1807
1808 return NULL;
1809}
1810
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001811int smd_named_open_on_edge(const char *name, uint32_t edge,
1812 smd_channel_t **_ch,
1813 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001814{
1815 struct smd_channel *ch;
1816 unsigned long flags;
1817
1818 if (smd_initialized == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001819 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001820 return -ENODEV;
1821 }
1822
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001823 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1824
1825 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001826 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001827 /* check closing list for port */
1828 spin_lock_irqsave(&smd_lock, flags);
1829 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1830 if (!strncmp(name, ch->name, 20) &&
1831 (edge == ch->type)) {
1832 /* channel exists, but is being closed */
1833 spin_unlock_irqrestore(&smd_lock, flags);
1834 return -EAGAIN;
1835 }
1836 }
1837
1838 /* check closing workqueue list for port */
1839 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1840 if (!strncmp(name, ch->name, 20) &&
1841 (edge == ch->type)) {
1842 /* channel exists, but is being closed */
1843 spin_unlock_irqrestore(&smd_lock, flags);
1844 return -EAGAIN;
1845 }
1846 }
1847 spin_unlock_irqrestore(&smd_lock, flags);
1848
1849 /* one final check to handle closing->closed race condition */
1850 ch = smd_get_channel(name, edge);
1851 if (!ch)
1852 return -ENODEV;
1853 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001854
1855 if (notify == 0)
1856 notify = do_nothing_notify;
1857
1858 ch->notify = notify;
1859 ch->current_packet = 0;
1860 ch->last_state = SMD_SS_CLOSED;
1861 ch->priv = priv;
1862
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001863 if (edge == SMD_LOOPBACK_TYPE) {
1864 ch->last_state = SMD_SS_OPENED;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001865 ch->half_ch->set_state(ch->send, SMD_SS_OPENED);
1866 ch->half_ch->set_fDSR(ch->send, 1);
1867 ch->half_ch->set_fCTS(ch->send, 1);
1868 ch->half_ch->set_fCD(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001869 }
1870
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001871 *_ch = ch;
1872
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001873 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1874
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001875 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001876 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001877 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001878 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001879 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001880 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1881 list_add(&ch->ch_list, &smd_ch_list_dsps);
1882 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1883 list_add(&ch->ch_list, &smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001884 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_RPM)
1885 list_add(&ch->ch_list, &smd_ch_list_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001886 else
1887 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001888
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001889 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1890
1891 if (edge != SMD_LOOPBACK_TYPE)
1892 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1893
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001894 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001895
1896 return 0;
1897}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001898EXPORT_SYMBOL(smd_named_open_on_edge);
1899
1900
1901int smd_open(const char *name, smd_channel_t **_ch,
1902 void *priv, void (*notify)(void *, unsigned))
1903{
1904 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
1905 notify);
1906}
1907EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001908
1909int smd_close(smd_channel_t *ch)
1910{
1911 unsigned long flags;
1912
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001913 if (ch == 0)
1914 return -1;
1915
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001916 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001917
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001918 spin_lock_irqsave(&smd_lock, flags);
1919 list_del(&ch->ch_list);
1920 if (ch->n == SMD_LOOPBACK_CID) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001921 ch->half_ch->set_fDSR(ch->send, 0);
1922 ch->half_ch->set_fCTS(ch->send, 0);
1923 ch->half_ch->set_fCD(ch->send, 0);
1924 ch->half_ch->set_state(ch->send, SMD_SS_CLOSED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001925 } else
1926 ch_set_state(ch, SMD_SS_CLOSED);
1927
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001928 if (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001929 list_add(&ch->ch_list, &smd_ch_closing_list);
1930 spin_unlock_irqrestore(&smd_lock, flags);
1931 } else {
1932 spin_unlock_irqrestore(&smd_lock, flags);
1933 ch->notify = do_nothing_notify;
1934 mutex_lock(&smd_creation_mutex);
1935 list_add(&ch->ch_list, &smd_ch_closed_list);
1936 mutex_unlock(&smd_creation_mutex);
1937 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001938
1939 return 0;
1940}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001941EXPORT_SYMBOL(smd_close);
1942
1943int smd_write_start(smd_channel_t *ch, int len)
1944{
1945 int ret;
1946 unsigned hdr[5];
1947
1948 if (!ch) {
1949 pr_err("%s: Invalid channel specified\n", __func__);
1950 return -ENODEV;
1951 }
1952 if (!ch->is_pkt_ch) {
1953 pr_err("%s: non-packet channel specified\n", __func__);
1954 return -EACCES;
1955 }
1956 if (len < 1) {
1957 pr_err("%s: invalid length: %d\n", __func__, len);
1958 return -EINVAL;
1959 }
1960
1961 if (ch->pending_pkt_sz) {
1962 pr_err("%s: packet of size: %d in progress\n", __func__,
1963 ch->pending_pkt_sz);
1964 return -EBUSY;
1965 }
1966 ch->pending_pkt_sz = len;
1967
1968 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
1969 ch->pending_pkt_sz = 0;
1970 SMD_DBG("%s: no space to write packet header\n", __func__);
1971 return -EAGAIN;
1972 }
1973
1974 hdr[0] = len;
1975 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1976
1977
1978 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1979 if (ret < 0 || ret != sizeof(hdr)) {
1980 ch->pending_pkt_sz = 0;
1981 pr_err("%s: packet header failed to write\n", __func__);
1982 return -EPERM;
1983 }
1984 return 0;
1985}
1986EXPORT_SYMBOL(smd_write_start);
1987
1988int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
1989{
1990 int bytes_written;
1991
1992 if (!ch) {
1993 pr_err("%s: Invalid channel specified\n", __func__);
1994 return -ENODEV;
1995 }
1996 if (len < 1) {
1997 pr_err("%s: invalid length: %d\n", __func__, len);
1998 return -EINVAL;
1999 }
2000
2001 if (!ch->pending_pkt_sz) {
2002 pr_err("%s: no transaction in progress\n", __func__);
2003 return -ENOEXEC;
2004 }
2005 if (ch->pending_pkt_sz - len < 0) {
2006 pr_err("%s: segment of size: %d will make packet go over "
2007 "length\n", __func__, len);
2008 return -EINVAL;
2009 }
2010
2011 bytes_written = smd_stream_write(ch, data, len, user_buf);
2012
2013 ch->pending_pkt_sz -= bytes_written;
2014
2015 return bytes_written;
2016}
2017EXPORT_SYMBOL(smd_write_segment);
2018
2019int smd_write_end(smd_channel_t *ch)
2020{
2021
2022 if (!ch) {
2023 pr_err("%s: Invalid channel specified\n", __func__);
2024 return -ENODEV;
2025 }
2026 if (ch->pending_pkt_sz) {
2027 pr_err("%s: current packet not completely written\n", __func__);
2028 return -E2BIG;
2029 }
2030
2031 return 0;
2032}
2033EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002034
2035int smd_read(smd_channel_t *ch, void *data, int len)
2036{
Jack Pham1b236d12012-03-19 15:27:18 -07002037 if (!ch) {
2038 pr_err("%s: Invalid channel specified\n", __func__);
2039 return -ENODEV;
2040 }
2041
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002042 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002043}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002044EXPORT_SYMBOL(smd_read);
2045
2046int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
2047{
Jack Pham1b236d12012-03-19 15:27:18 -07002048 if (!ch) {
2049 pr_err("%s: Invalid channel specified\n", __func__);
2050 return -ENODEV;
2051 }
2052
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002053 return ch->read(ch, data, len, 1);
2054}
2055EXPORT_SYMBOL(smd_read_user_buffer);
2056
2057int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
2058{
Jack Pham1b236d12012-03-19 15:27:18 -07002059 if (!ch) {
2060 pr_err("%s: Invalid channel specified\n", __func__);
2061 return -ENODEV;
2062 }
2063
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002064 return ch->read_from_cb(ch, data, len, 0);
2065}
2066EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002067
2068int smd_write(smd_channel_t *ch, const void *data, int len)
2069{
Jack Pham1b236d12012-03-19 15:27:18 -07002070 if (!ch) {
2071 pr_err("%s: Invalid channel specified\n", __func__);
2072 return -ENODEV;
2073 }
2074
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002075 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002076}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002077EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002078
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002079int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08002080{
Jack Pham1b236d12012-03-19 15:27:18 -07002081 if (!ch) {
2082 pr_err("%s: Invalid channel specified\n", __func__);
2083 return -ENODEV;
2084 }
2085
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002086 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002087}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002088EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002089
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002090int smd_read_avail(smd_channel_t *ch)
2091{
Jack Pham1b236d12012-03-19 15:27:18 -07002092 if (!ch) {
2093 pr_err("%s: Invalid channel specified\n", __func__);
2094 return -ENODEV;
2095 }
2096
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002097 return ch->read_avail(ch);
2098}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002099EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002100
2101int smd_write_avail(smd_channel_t *ch)
2102{
Jack Pham1b236d12012-03-19 15:27:18 -07002103 if (!ch) {
2104 pr_err("%s: Invalid channel specified\n", __func__);
2105 return -ENODEV;
2106 }
2107
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002108 return ch->write_avail(ch);
2109}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002110EXPORT_SYMBOL(smd_write_avail);
2111
2112void smd_enable_read_intr(smd_channel_t *ch)
2113{
2114 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002115 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002116}
2117EXPORT_SYMBOL(smd_enable_read_intr);
2118
2119void smd_disable_read_intr(smd_channel_t *ch)
2120{
2121 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002122 ch->half_ch->set_fBLOCKREADINTR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002123}
2124EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002125
2126int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2127{
2128 return -1;
2129}
2130
2131int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2132{
2133 return -1;
2134}
2135
2136int smd_cur_packet_size(smd_channel_t *ch)
2137{
Jack Pham1b236d12012-03-19 15:27:18 -07002138 if (!ch) {
2139 pr_err("%s: Invalid channel specified\n", __func__);
2140 return -ENODEV;
2141 }
2142
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002143 return ch->current_packet;
2144}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002145EXPORT_SYMBOL(smd_cur_packet_size);
2146
2147int smd_tiocmget(smd_channel_t *ch)
2148{
Jack Pham1b236d12012-03-19 15:27:18 -07002149 if (!ch) {
2150 pr_err("%s: Invalid channel specified\n", __func__);
2151 return -ENODEV;
2152 }
2153
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002154 return (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) |
2155 (ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) |
2156 (ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) |
2157 (ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) |
2158 (ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) |
2159 (ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002160}
2161EXPORT_SYMBOL(smd_tiocmget);
2162
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002163/* this api will be called while holding smd_lock */
2164int
2165smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002166{
Jack Pham1b236d12012-03-19 15:27:18 -07002167 if (!ch) {
2168 pr_err("%s: Invalid channel specified\n", __func__);
2169 return -ENODEV;
2170 }
2171
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002172 if (set & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002173 ch->half_ch->set_fDSR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002174
2175 if (set & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002176 ch->half_ch->set_fCTS(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002177
2178 if (clear & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002179 ch->half_ch->set_fDSR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002180
2181 if (clear & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002182 ch->half_ch->set_fCTS(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002183
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002184 ch->half_ch->set_fSTATE(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002185 barrier();
2186 ch->notify_other_cpu();
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002187
2188 return 0;
2189}
2190EXPORT_SYMBOL(smd_tiocmset_from_cb);
2191
2192int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2193{
2194 unsigned long flags;
2195
Jack Pham1b236d12012-03-19 15:27:18 -07002196 if (!ch) {
2197 pr_err("%s: Invalid channel specified\n", __func__);
2198 return -ENODEV;
2199 }
2200
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002201 spin_lock_irqsave(&smd_lock, flags);
2202 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002203 spin_unlock_irqrestore(&smd_lock, flags);
2204
2205 return 0;
2206}
2207EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002208
2209
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002210/* -------------------------------------------------------------------------- */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002211
Jeff Hugobdc734d2012-03-26 16:05:39 -06002212/*
2213 * Shared Memory Range Check
2214 *
2215 * Takes a physical address and an offset and checks if the resulting physical
2216 * address would fit into one of the aux smem regions. If so, returns the
2217 * corresponding virtual address. Otherwise returns NULL. Expects the array
2218 * of smem regions to be in ascending physical address order.
2219 *
2220 * @base: physical base address to check
2221 * @offset: offset from the base to get the final address
2222 */
2223static void *smem_range_check(void *base, unsigned offset)
2224{
2225 int i;
2226 void *phys_addr;
2227 unsigned size;
2228
2229 for (i = 0; i < num_smem_areas; ++i) {
2230 phys_addr = smem_areas[i].phys_addr;
2231 size = smem_areas[i].size;
2232 if (base < phys_addr)
2233 return NULL;
2234 if (base > phys_addr + size)
2235 continue;
2236 if (base >= phys_addr && base + offset < phys_addr + size)
2237 return smem_areas[i].virt_addr + offset;
2238 }
2239
2240 return NULL;
2241}
2242
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002243/* smem_alloc returns the pointer to smem item if it is already allocated.
2244 * Otherwise, it returns NULL.
2245 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002246void *smem_alloc(unsigned id, unsigned size)
2247{
2248 return smem_find(id, size);
2249}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002250EXPORT_SYMBOL(smem_alloc);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002251
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002252/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
2253 * it allocates it and then returns the pointer to it.
2254 */
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302255void *smem_alloc2(unsigned id, unsigned size_in)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002256{
2257 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2258 struct smem_heap_entry *toc = shared->heap_toc;
2259 unsigned long flags;
2260 void *ret = NULL;
2261
2262 if (!shared->heap_info.initialized) {
2263 pr_err("%s: smem heap info not initialized\n", __func__);
2264 return NULL;
2265 }
2266
2267 if (id >= SMEM_NUM_ITEMS)
2268 return NULL;
2269
2270 size_in = ALIGN(size_in, 8);
2271 remote_spin_lock_irqsave(&remote_spinlock, flags);
2272 if (toc[id].allocated) {
2273 SMD_DBG("%s: %u already allocated\n", __func__, id);
2274 if (size_in != toc[id].size)
2275 pr_err("%s: wrong size %u (expected %u)\n",
2276 __func__, toc[id].size, size_in);
2277 else
2278 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2279 } else if (id > SMEM_FIXED_ITEM_LAST) {
2280 SMD_DBG("%s: allocating %u\n", __func__, id);
2281 if (shared->heap_info.heap_remaining >= size_in) {
2282 toc[id].offset = shared->heap_info.free_offset;
2283 toc[id].size = size_in;
2284 wmb();
2285 toc[id].allocated = 1;
2286
2287 shared->heap_info.free_offset += size_in;
2288 shared->heap_info.heap_remaining -= size_in;
2289 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2290 } else
2291 pr_err("%s: not enough memory %u (required %u)\n",
2292 __func__, shared->heap_info.heap_remaining,
2293 size_in);
2294 }
2295 wmb();
2296 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
2297 return ret;
2298}
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302299EXPORT_SYMBOL(smem_alloc2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002300
2301void *smem_get_entry(unsigned id, unsigned *size)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002302{
2303 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2304 struct smem_heap_entry *toc = shared->heap_toc;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302305 int use_spinlocks = spinlocks_initialized;
2306 void *ret = 0;
2307 unsigned long flags = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002308
2309 if (id >= SMEM_NUM_ITEMS)
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302310 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002311
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302312 if (use_spinlocks)
2313 remote_spin_lock_irqsave(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002314 /* toc is in device memory and cannot be speculatively accessed */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002315 if (toc[id].allocated) {
2316 *size = toc[id].size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002317 barrier();
Jeff Hugobdc734d2012-03-26 16:05:39 -06002318 if (!(toc[id].reserved & BASE_ADDR_MASK))
2319 ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
2320 else
2321 ret = smem_range_check(
2322 (void *)(toc[id].reserved & BASE_ADDR_MASK),
2323 toc[id].offset);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002324 } else {
2325 *size = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002326 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302327 if (use_spinlocks)
2328 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002329
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302330 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002331}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002332EXPORT_SYMBOL(smem_get_entry);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002333
2334void *smem_find(unsigned id, unsigned size_in)
2335{
2336 unsigned size;
2337 void *ptr;
2338
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002339 ptr = smem_get_entry(id, &size);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002340 if (!ptr)
2341 return 0;
2342
2343 size_in = ALIGN(size_in, 8);
2344 if (size_in != size) {
2345 pr_err("smem_find(%d, %d): wrong size %d\n",
2346 id, size_in, size);
2347 return 0;
2348 }
2349
2350 return ptr;
2351}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002352EXPORT_SYMBOL(smem_find);
2353
2354static int smsm_cb_init(void)
2355{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002356 struct smsm_state_info *state_info;
2357 int n;
2358 int ret = 0;
2359
2360 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2361 GFP_KERNEL);
2362
2363 if (!smsm_states) {
2364 pr_err("%s: SMSM init failed\n", __func__);
2365 return -ENOMEM;
2366 }
2367
Eric Holmbergc8002902011-09-16 13:55:57 -06002368 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002369 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2370 state_info = &smsm_states[n];
2371 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
Eric Holmberge8a39322012-04-03 15:14:02 -06002372 state_info->intr_mask_set = 0x0;
2373 state_info->intr_mask_clear = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002374 INIT_LIST_HEAD(&state_info->callbacks);
2375 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002376 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002377
2378 return ret;
2379}
2380
2381static int smsm_init(void)
2382{
2383 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2384 int i;
2385 struct smsm_size_info_type *smsm_size_info;
2386
2387 i = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_ALLOC);
2388 if (i) {
2389 pr_err("%s: remote spinlock init failed %d\n", __func__, i);
2390 return i;
2391 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302392 spinlocks_initialized = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002393
2394 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
2395 sizeof(struct smsm_size_info_type));
2396 if (smsm_size_info) {
2397 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2398 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2399 }
2400
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002401 i = kfifo_alloc(&smsm_snapshot_fifo,
2402 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2403 GFP_KERNEL);
2404 if (i) {
2405 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2406 return i;
2407 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002408 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2409 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002410
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002411 if (!smsm_info.state) {
2412 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
2413 SMSM_NUM_ENTRIES *
2414 sizeof(uint32_t));
2415
2416 if (smsm_info.state) {
2417 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2418 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2419 __raw_writel(0, \
2420 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2421 }
2422 }
2423
2424 if (!smsm_info.intr_mask) {
2425 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
2426 SMSM_NUM_ENTRIES *
2427 SMSM_NUM_HOSTS *
2428 sizeof(uint32_t));
2429
Eric Holmberge8a39322012-04-03 15:14:02 -06002430 if (smsm_info.intr_mask) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002431 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
Eric Holmberge8a39322012-04-03 15:14:02 -06002432 __raw_writel(0x0,
2433 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2434
2435 /* Configure legacy modem bits */
2436 __raw_writel(LEGACY_MODEM_SMSM_MASK,
2437 SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
2438 SMSM_APPS));
2439 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002440 }
2441
2442 if (!smsm_info.intr_mux)
2443 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
2444 SMSM_NUM_INTR_MUX *
2445 sizeof(uint32_t));
2446
2447 i = smsm_cb_init();
2448 if (i)
2449 return i;
2450
2451 wmb();
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06002452 smsm_driver_state_notify(SMSM_INIT, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002453 return 0;
2454}
2455
2456void smsm_reset_modem(unsigned mode)
2457{
2458 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2459 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2460 } else if (mode == SMSM_MODEM_WAIT) {
2461 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2462 } else { /* reset_mode is SMSM_RESET or default */
2463 mode = SMSM_RESET;
2464 }
2465
2466 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2467}
2468EXPORT_SYMBOL(smsm_reset_modem);
2469
2470void smsm_reset_modem_cont(void)
2471{
2472 unsigned long flags;
2473 uint32_t state;
2474
2475 if (!smsm_info.state)
2476 return;
2477
2478 spin_lock_irqsave(&smem_lock, flags);
2479 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2480 & ~SMSM_MODEM_WAIT;
2481 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2482 wmb();
2483 spin_unlock_irqrestore(&smem_lock, flags);
2484}
2485EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002486
Eric Holmbergda31d042012-03-28 14:01:02 -06002487static void smsm_cb_snapshot(uint32_t use_wakelock)
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002488{
2489 int n;
2490 uint32_t new_state;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002491 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002492 int ret;
2493
2494 ret = kfifo_avail(&smsm_snapshot_fifo);
Eric Holmbergda31d042012-03-28 14:01:02 -06002495 if (ret < SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002496 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2497 return;
2498 }
2499
Eric Holmberg96b55f62012-04-03 19:10:46 -06002500 /*
2501 * To avoid a race condition with notify_smsm_cb_clients_worker, the
2502 * following sequence must be followed:
2503 * 1) increment snapshot count
2504 * 2) insert data into FIFO
2505 *
2506 * Potentially in parallel, the worker:
2507 * a) verifies >= 1 snapshots are in FIFO
2508 * b) processes snapshot
2509 * c) decrements reference count
2510 *
2511 * This order ensures that 1 will always occur before abc.
2512 */
Eric Holmbergda31d042012-03-28 14:01:02 -06002513 if (use_wakelock) {
2514 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2515 if (smsm_snapshot_count == 0) {
2516 SMx_POWER_INFO("SMSM snapshot wake lock\n");
2517 wake_lock(&smsm_snapshot_wakelock);
2518 }
2519 ++smsm_snapshot_count;
2520 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2521 }
Eric Holmberg96b55f62012-04-03 19:10:46 -06002522
2523 /* queue state entries */
2524 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2525 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2526
2527 ret = kfifo_in(&smsm_snapshot_fifo,
2528 &new_state, sizeof(new_state));
2529 if (ret != sizeof(new_state)) {
2530 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2531 goto restore_snapshot_count;
2532 }
2533 }
2534
2535 /* queue wakelock usage flag */
2536 ret = kfifo_in(&smsm_snapshot_fifo,
2537 &use_wakelock, sizeof(use_wakelock));
2538 if (ret != sizeof(use_wakelock)) {
2539 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2540 goto restore_snapshot_count;
2541 }
2542
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002543 schedule_work(&smsm_cb_work);
Eric Holmberg96b55f62012-04-03 19:10:46 -06002544 return;
2545
2546restore_snapshot_count:
2547 if (use_wakelock) {
2548 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2549 if (smsm_snapshot_count) {
2550 --smsm_snapshot_count;
2551 if (smsm_snapshot_count == 0) {
2552 SMx_POWER_INFO("SMSM snapshot wake unlock\n");
2553 wake_unlock(&smsm_snapshot_wakelock);
2554 }
2555 } else {
2556 pr_err("%s: invalid snapshot count\n", __func__);
2557 }
2558 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2559 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002560}
2561
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002562static irqreturn_t smsm_irq_handler(int irq, void *data)
2563{
2564 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002565
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002566 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002567 uint32_t mux_val;
2568 static uint32_t prev_smem_q6_apps_smsm;
2569
2570 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2571 mux_val = __raw_readl(
2572 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2573 if (mux_val != prev_smem_q6_apps_smsm)
2574 prev_smem_q6_apps_smsm = mux_val;
2575 }
2576
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002577 spin_lock_irqsave(&smem_lock, flags);
Eric Holmbergda31d042012-03-28 14:01:02 -06002578 smsm_cb_snapshot(1);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002579 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002580 return IRQ_HANDLED;
2581 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002582
2583 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002584 if (!smsm_info.state) {
2585 SMSM_INFO("<SM NO STATE>\n");
2586 } else {
2587 unsigned old_apps, apps;
2588 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002589
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002590 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002591
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002592 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2593 if (apps & SMSM_RESET) {
2594 /* If we get an interrupt and the apps SMSM_RESET
2595 bit is already set, the modem is acking the
2596 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002597 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302598 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002599 /* Issue a fake irq to handle any
2600 * smd state changes during reset
2601 */
2602 smd_fake_irq_handler(0);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002603
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002604 /* queue modem restart notify chain */
2605 modem_queue_start_reset_notify();
2606
2607 } else if (modm & SMSM_RESET) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002608 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
Ram Somani8b9589f2012-04-03 12:07:18 +05302609 if (!disable_smsm_reset_handshake) {
2610 apps |= SMSM_RESET;
2611 flush_cache_all();
2612 outer_flush_all();
2613 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002614 modem_queue_start_reset_notify();
2615
2616 } else if (modm & SMSM_INIT) {
2617 if (!(apps & SMSM_INIT)) {
2618 apps |= SMSM_INIT;
2619 modem_queue_smsm_init_notify();
2620 }
2621
2622 if (modm & SMSM_SMDINIT)
2623 apps |= SMSM_SMDINIT;
2624 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2625 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2626 apps |= SMSM_RUN;
2627 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2628 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2629 modem_queue_start_reset_notify();
2630 }
2631
2632 if (old_apps != apps) {
2633 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2634 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2635 do_smd_probe();
2636 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2637 }
2638
Eric Holmbergda31d042012-03-28 14:01:02 -06002639 smsm_cb_snapshot(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002640 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002641 spin_unlock_irqrestore(&smem_lock, flags);
2642 return IRQ_HANDLED;
2643}
2644
Eric Holmberg98c6c642012-02-24 11:29:35 -07002645static irqreturn_t smsm_modem_irq_handler(int irq, void *data)
2646{
2647 SMx_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002648 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002649 return smsm_irq_handler(irq, data);
2650}
2651
2652static irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
2653{
2654 SMx_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002655 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002656 return smsm_irq_handler(irq, data);
2657}
2658
2659static irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
2660{
2661 SMx_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002662 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002663 return smsm_irq_handler(irq, data);
2664}
2665
2666static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
2667{
2668 SMx_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002669 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002670 return smsm_irq_handler(irq, data);
2671}
2672
Eric Holmberge8a39322012-04-03 15:14:02 -06002673/*
2674 * Changes the global interrupt mask. The set and clear masks are re-applied
2675 * every time the global interrupt mask is updated for callback registration
2676 * and de-registration.
2677 *
2678 * The clear mask is applied first, so if a bit is set to 1 in both the clear
2679 * mask and the set mask, the result will be that the interrupt is set.
2680 *
2681 * @smsm_entry SMSM entry to change
2682 * @clear_mask 1 = clear bit, 0 = no-op
2683 * @set_mask 1 = set bit, 0 = no-op
2684 *
2685 * @returns 0 for success, < 0 for error
2686 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002687int smsm_change_intr_mask(uint32_t smsm_entry,
2688 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002689{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002690 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002691 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002692
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002693 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2694 pr_err("smsm_change_state: Invalid entry %d\n",
2695 smsm_entry);
2696 return -EINVAL;
2697 }
2698
2699 if (!smsm_info.intr_mask) {
2700 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002701 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002702 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002703
2704 spin_lock_irqsave(&smem_lock, flags);
Eric Holmberge8a39322012-04-03 15:14:02 -06002705 smsm_states[smsm_entry].intr_mask_clear = clear_mask;
2706 smsm_states[smsm_entry].intr_mask_set = set_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002707
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002708 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2709 new_mask = (old_mask & ~clear_mask) | set_mask;
2710 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002711
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002712 wmb();
2713 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002714
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002715 return 0;
2716}
2717EXPORT_SYMBOL(smsm_change_intr_mask);
2718
2719int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2720{
2721 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2722 pr_err("smsm_change_state: Invalid entry %d\n",
2723 smsm_entry);
2724 return -EINVAL;
2725 }
2726
2727 if (!smsm_info.intr_mask) {
2728 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2729 return -EIO;
2730 }
2731
2732 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2733 return 0;
2734}
2735EXPORT_SYMBOL(smsm_get_intr_mask);
2736
2737int smsm_change_state(uint32_t smsm_entry,
2738 uint32_t clear_mask, uint32_t set_mask)
2739{
2740 unsigned long flags;
2741 uint32_t old_state, new_state;
2742
2743 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2744 pr_err("smsm_change_state: Invalid entry %d",
2745 smsm_entry);
2746 return -EINVAL;
2747 }
2748
2749 if (!smsm_info.state) {
2750 pr_err("smsm_change_state <SM NO STATE>\n");
2751 return -EIO;
2752 }
2753 spin_lock_irqsave(&smem_lock, flags);
2754
2755 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2756 new_state = (old_state & ~clear_mask) | set_mask;
2757 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2758 SMSM_DBG("smsm_change_state %x\n", new_state);
2759 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002760
2761 spin_unlock_irqrestore(&smem_lock, flags);
2762
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002763 return 0;
2764}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002765EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002766
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002767uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002768{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002769 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002770
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002771 /* needs interface change to return error code */
2772 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2773 pr_err("smsm_change_state: Invalid entry %d",
2774 smsm_entry);
2775 return 0;
2776 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002777
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002778 if (!smsm_info.state) {
2779 pr_err("smsm_get_state <SM NO STATE>\n");
2780 } else {
2781 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2782 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002783
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002784 return rv;
2785}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002786EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002787
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002788/**
2789 * Performs SMSM callback client notifiction.
2790 */
2791void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002792{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002793 struct smsm_state_cb_info *cb_info;
2794 struct smsm_state_info *state_info;
2795 int n;
2796 uint32_t new_state;
2797 uint32_t state_changes;
Eric Holmbergda31d042012-03-28 14:01:02 -06002798 uint32_t use_wakelock;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002799 int ret;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002800 unsigned long flags;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002801
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002802 if (!smd_initialized)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002803 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002804
Eric Holmbergda31d042012-03-28 14:01:02 -06002805 while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002806 mutex_lock(&smsm_lock);
2807 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2808 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002809
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002810 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2811 sizeof(new_state));
2812 if (ret != sizeof(new_state)) {
2813 pr_err("%s: snapshot underflow %d\n",
2814 __func__, ret);
2815 mutex_unlock(&smsm_lock);
2816 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002817 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002818
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002819 state_changes = state_info->last_value ^ new_state;
2820 if (state_changes) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07002821 SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n",
2822 n, state_info->last_value,
2823 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002824 list_for_each_entry(cb_info,
2825 &state_info->callbacks, cb_list) {
2826
2827 if (cb_info->mask & state_changes)
2828 cb_info->notify(cb_info->data,
2829 state_info->last_value,
2830 new_state);
2831 }
2832 state_info->last_value = new_state;
2833 }
2834 }
2835 mutex_unlock(&smsm_lock);
Eric Holmberg59a9f942012-03-19 10:04:22 -06002836
Eric Holmbergda31d042012-03-28 14:01:02 -06002837 /* read wakelock flag */
2838 ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock,
2839 sizeof(use_wakelock));
2840 if (ret != sizeof(use_wakelock)) {
2841 pr_err("%s: snapshot underflow %d\n",
2842 __func__, ret);
2843 return;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002844 }
Eric Holmbergda31d042012-03-28 14:01:02 -06002845
2846 if (use_wakelock) {
2847 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2848 if (smsm_snapshot_count) {
2849 --smsm_snapshot_count;
2850 if (smsm_snapshot_count == 0) {
2851 SMx_POWER_INFO("SMSM snapshot"
2852 " wake unlock\n");
2853 wake_unlock(&smsm_snapshot_wakelock);
2854 }
2855 } else {
2856 pr_err("%s: invalid snapshot count\n",
2857 __func__);
2858 }
2859 spin_unlock_irqrestore(&smsm_snapshot_count_lock,
2860 flags);
2861 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002862 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002863}
2864
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002865
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002866/**
2867 * Registers callback for SMSM state notifications when the specified
2868 * bits change.
2869 *
2870 * @smsm_entry Processor entry to deregister
2871 * @mask Bits to deregister (if result is 0, callback is removed)
2872 * @notify Notification function to deregister
2873 * @data Opaque data passed in to callback
2874 *
2875 * @returns Status code
2876 * <0 error code
2877 * 0 inserted new entry
2878 * 1 updated mask of existing entry
2879 */
2880int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
2881 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002882{
Eric Holmberge8a39322012-04-03 15:14:02 -06002883 struct smsm_state_info *state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002884 struct smsm_state_cb_info *cb_info;
2885 struct smsm_state_cb_info *cb_found = 0;
Eric Holmberge8a39322012-04-03 15:14:02 -06002886 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002887 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002888
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002889 if (smsm_entry >= SMSM_NUM_ENTRIES)
2890 return -EINVAL;
2891
Eric Holmbergc8002902011-09-16 13:55:57 -06002892 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002893
2894 if (!smsm_states) {
2895 /* smsm not yet initialized */
2896 ret = -ENODEV;
2897 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002898 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002899
Eric Holmberge8a39322012-04-03 15:14:02 -06002900 state = &smsm_states[smsm_entry];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002901 list_for_each_entry(cb_info,
Eric Holmberge8a39322012-04-03 15:14:02 -06002902 &state->callbacks, cb_list) {
2903 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002904 (cb_info->data == data)) {
2905 cb_info->mask |= mask;
2906 cb_found = cb_info;
2907 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002908 }
Eric Holmberge8a39322012-04-03 15:14:02 -06002909 new_mask |= cb_info->mask;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002910 }
2911
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002912 if (!cb_found) {
2913 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
2914 GFP_ATOMIC);
2915 if (!cb_info) {
2916 ret = -ENOMEM;
2917 goto cleanup;
2918 }
2919
2920 cb_info->mask = mask;
2921 cb_info->notify = notify;
2922 cb_info->data = data;
2923 INIT_LIST_HEAD(&cb_info->cb_list);
2924 list_add_tail(&cb_info->cb_list,
Eric Holmberge8a39322012-04-03 15:14:02 -06002925 &state->callbacks);
2926 new_mask |= mask;
2927 }
2928
2929 /* update interrupt notification mask */
2930 if (smsm_entry == SMSM_MODEM_STATE)
2931 new_mask |= LEGACY_MODEM_SMSM_MASK;
2932
2933 if (smsm_info.intr_mask) {
2934 unsigned long flags;
2935
2936 spin_lock_irqsave(&smem_lock, flags);
2937 new_mask = (new_mask & ~state->intr_mask_clear)
2938 | state->intr_mask_set;
2939 __raw_writel(new_mask,
2940 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2941 wmb();
2942 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002943 }
2944
2945cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06002946 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002947 return ret;
2948}
2949EXPORT_SYMBOL(smsm_state_cb_register);
2950
2951
2952/**
2953 * Deregisters for SMSM state notifications for the specified bits.
2954 *
2955 * @smsm_entry Processor entry to deregister
2956 * @mask Bits to deregister (if result is 0, callback is removed)
2957 * @notify Notification function to deregister
2958 * @data Opaque data passed in to callback
2959 *
2960 * @returns Status code
2961 * <0 error code
2962 * 0 not found
2963 * 1 updated mask
2964 * 2 removed callback
2965 */
2966int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
2967 void (*notify)(void *, uint32_t, uint32_t), void *data)
2968{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002969 struct smsm_state_cb_info *cb_info;
Eric Holmberge8a39322012-04-03 15:14:02 -06002970 struct smsm_state_cb_info *cb_tmp;
2971 struct smsm_state_info *state;
2972 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002973 int ret = 0;
2974
2975 if (smsm_entry >= SMSM_NUM_ENTRIES)
2976 return -EINVAL;
2977
Eric Holmbergc8002902011-09-16 13:55:57 -06002978 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002979
2980 if (!smsm_states) {
2981 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06002982 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002983 return -ENODEV;
2984 }
2985
Eric Holmberge8a39322012-04-03 15:14:02 -06002986 state = &smsm_states[smsm_entry];
2987 list_for_each_entry_safe(cb_info, cb_tmp,
2988 &state->callbacks, cb_list) {
2989 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002990 (cb_info->data == data)) {
2991 cb_info->mask &= ~mask;
2992 ret = 1;
2993 if (!cb_info->mask) {
2994 /* no mask bits set, remove callback */
2995 list_del(&cb_info->cb_list);
2996 kfree(cb_info);
2997 ret = 2;
Eric Holmberge8a39322012-04-03 15:14:02 -06002998 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002999 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003000 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003001 new_mask |= cb_info->mask;
3002 }
3003
3004 /* update interrupt notification mask */
3005 if (smsm_entry == SMSM_MODEM_STATE)
3006 new_mask |= LEGACY_MODEM_SMSM_MASK;
3007
3008 if (smsm_info.intr_mask) {
3009 unsigned long flags;
3010
3011 spin_lock_irqsave(&smem_lock, flags);
3012 new_mask = (new_mask & ~state->intr_mask_clear)
3013 | state->intr_mask_set;
3014 __raw_writel(new_mask,
3015 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3016 wmb();
3017 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003018 }
3019
Eric Holmbergc8002902011-09-16 13:55:57 -06003020 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003021 return ret;
3022}
3023EXPORT_SYMBOL(smsm_state_cb_deregister);
3024
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003025int smsm_driver_state_notifier_register(struct notifier_block *nb)
3026{
3027 int ret;
3028 if (!nb)
3029 return -EINVAL;
3030 mutex_lock(&smsm_driver_state_notifier_lock);
3031 ret = raw_notifier_chain_register(&smsm_driver_state_notifier_list, nb);
3032 mutex_unlock(&smsm_driver_state_notifier_lock);
3033 return ret;
3034}
3035EXPORT_SYMBOL(smsm_driver_state_notifier_register);
3036
3037int smsm_driver_state_notifier_unregister(struct notifier_block *nb)
3038{
3039 int ret;
3040 if (!nb)
3041 return -EINVAL;
3042 mutex_lock(&smsm_driver_state_notifier_lock);
3043 ret = raw_notifier_chain_unregister(&smsm_driver_state_notifier_list,
3044 nb);
3045 mutex_unlock(&smsm_driver_state_notifier_lock);
3046 return ret;
3047}
3048EXPORT_SYMBOL(smsm_driver_state_notifier_unregister);
3049
3050static void smsm_driver_state_notify(uint32_t state, void *data)
3051{
3052 mutex_lock(&smsm_driver_state_notifier_lock);
3053 raw_notifier_call_chain(&smsm_driver_state_notifier_list,
3054 state, data);
3055 mutex_unlock(&smsm_driver_state_notifier_lock);
3056}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003057
3058int smd_core_init(void)
3059{
3060 int r;
3061 unsigned long flags = IRQF_TRIGGER_RISING;
3062 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003063
Brian Swetland37521a32009-07-01 18:30:47 -07003064 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003065 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003066 if (r < 0)
3067 return r;
3068 r = enable_irq_wake(INT_A9_M2A_0);
3069 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003070 pr_err("smd_core_init: "
3071 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003072
Eric Holmberg98c6c642012-02-24 11:29:35 -07003073 r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003074 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003075 if (r < 0) {
3076 free_irq(INT_A9_M2A_0, 0);
3077 return r;
3078 }
3079 r = enable_irq_wake(INT_A9_M2A_5);
3080 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003081 pr_err("smd_core_init: "
3082 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003083
Brian Swetland37521a32009-07-01 18:30:47 -07003084#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003085#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
3086 flags |= IRQF_SHARED;
3087#endif
Brian Swetland37521a32009-07-01 18:30:47 -07003088 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003089 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07003090 if (r < 0) {
3091 free_irq(INT_A9_M2A_0, 0);
3092 free_irq(INT_A9_M2A_5, 0);
3093 return r;
3094 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003095
Eric Holmberg98c6c642012-02-24 11:29:35 -07003096 r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler,
3097 flags, "smsm_dev", smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003098 if (r < 0) {
3099 free_irq(INT_A9_M2A_0, 0);
3100 free_irq(INT_A9_M2A_5, 0);
3101 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
3102 return r;
3103 }
3104
3105 r = enable_irq_wake(INT_ADSP_A11);
3106 if (r < 0)
3107 pr_err("smd_core_init: "
3108 "enable_irq_wake failed for INT_ADSP_A11\n");
3109
3110#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
3111 r = enable_irq_wake(INT_ADSP_A11_SMSM);
3112 if (r < 0)
3113 pr_err("smd_core_init: enable_irq_wake "
3114 "failed for INT_ADSP_A11_SMSM\n");
3115#endif
3116 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07003117#endif
3118
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003119#if defined(CONFIG_DSPS)
3120 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
3121 flags, "smd_dev", smd_dsps_irq_handler);
3122 if (r < 0) {
3123 free_irq(INT_A9_M2A_0, 0);
3124 free_irq(INT_A9_M2A_5, 0);
3125 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003126 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003127 return r;
3128 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003129
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003130 r = enable_irq_wake(INT_DSPS_A11);
3131 if (r < 0)
3132 pr_err("smd_core_init: "
3133 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003134#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003135
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003136#if defined(CONFIG_WCNSS)
3137 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
3138 flags, "smd_dev", smd_wcnss_irq_handler);
3139 if (r < 0) {
3140 free_irq(INT_A9_M2A_0, 0);
3141 free_irq(INT_A9_M2A_5, 0);
3142 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003143 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003144 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3145 return r;
3146 }
3147
3148 r = enable_irq_wake(INT_WCNSS_A11);
3149 if (r < 0)
3150 pr_err("smd_core_init: "
3151 "enable_irq_wake failed for INT_WCNSS_A11\n");
3152
Eric Holmberg98c6c642012-02-24 11:29:35 -07003153 r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler,
3154 flags, "smsm_dev", smsm_wcnss_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003155 if (r < 0) {
3156 free_irq(INT_A9_M2A_0, 0);
3157 free_irq(INT_A9_M2A_5, 0);
3158 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003159 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003160 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3161 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
3162 return r;
3163 }
3164
3165 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
3166 if (r < 0)
3167 pr_err("smd_core_init: "
3168 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
3169#endif
3170
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003171#if defined(CONFIG_DSPS_SMSM)
Eric Holmberg98c6c642012-02-24 11:29:35 -07003172 r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler,
3173 flags, "smsm_dev", smsm_dsps_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003174 if (r < 0) {
3175 free_irq(INT_A9_M2A_0, 0);
3176 free_irq(INT_A9_M2A_5, 0);
3177 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003178 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003179 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3180 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003181 free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003182 return r;
3183 }
3184
3185 r = enable_irq_wake(INT_DSPS_A11_SMSM);
3186 if (r < 0)
3187 pr_err("smd_core_init: "
3188 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
3189#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003190 SMD_INFO("smd_core_init() done\n");
3191
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003192 return 0;
3193}
3194
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303195static int intr_init(struct interrupt_config_item *private_irq,
3196 struct smd_irq_config *platform_irq,
3197 struct platform_device *pdev
3198 )
3199{
3200 int irq_id;
3201 int ret;
3202 int ret_wake;
3203
3204 private_irq->out_bit_pos = platform_irq->out_bit_pos;
3205 private_irq->out_offset = platform_irq->out_offset;
3206 private_irq->out_base = platform_irq->out_base;
3207
3208 irq_id = platform_get_irq_byname(
3209 pdev,
3210 platform_irq->irq_name
3211 );
3212 SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__,
3213 platform_irq->irq_name, irq_id);
3214 ret = request_irq(irq_id,
3215 private_irq->irq_handler,
3216 platform_irq->flags,
3217 platform_irq->device_name,
3218 (void *)platform_irq->dev_id
3219 );
3220 if (ret < 0) {
3221 platform_irq->irq_id = ret;
3222 } else {
3223 platform_irq->irq_id = irq_id;
3224 ret_wake = enable_irq_wake(irq_id);
3225 if (ret_wake < 0) {
3226 pr_err("smd: enable_irq_wake failed on %s",
3227 platform_irq->irq_name);
3228 }
3229 }
3230
3231 return ret;
3232}
3233
Jeff Hugobdc734d2012-03-26 16:05:39 -06003234int sort_cmp_func(const void *a, const void *b)
3235{
3236 struct smem_area *left = (struct smem_area *)(a);
3237 struct smem_area *right = (struct smem_area *)(b);
3238
3239 return left->phys_addr - right->phys_addr;
3240}
3241
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303242int smd_core_platform_init(struct platform_device *pdev)
3243{
3244 int i;
3245 int ret;
3246 uint32_t num_ss;
3247 struct smd_platform *smd_platform_data;
3248 struct smd_subsystem_config *smd_ss_config_list;
3249 struct smd_subsystem_config *cfg;
3250 int err_ret = 0;
Jeff Hugobdc734d2012-03-26 16:05:39 -06003251 struct smd_smem_regions *smd_smem_areas;
3252 int smem_idx = 0;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303253
3254 smd_platform_data = pdev->dev.platform_data;
3255 num_ss = smd_platform_data->num_ss_configs;
3256 smd_ss_config_list = smd_platform_data->smd_ss_configs;
3257
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06003258 if (smd_platform_data->smd_ssr_config)
3259 disable_smsm_reset_handshake = smd_platform_data->
3260 smd_ssr_config->disable_smsm_reset_handshake;
3261
Jeff Hugobdc734d2012-03-26 16:05:39 -06003262 smd_smem_areas = smd_platform_data->smd_smem_areas;
3263 if (smd_smem_areas) {
3264 num_smem_areas = smd_platform_data->num_smem_areas;
3265 smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
3266 GFP_KERNEL);
3267 if (!smem_areas) {
3268 pr_err("%s: smem_areas kmalloc failed\n", __func__);
3269 err_ret = -ENOMEM;
3270 goto smem_areas_alloc_fail;
3271 }
3272
3273 for (smem_idx = 0; smem_idx < num_smem_areas; ++smem_idx) {
3274 smem_areas[smem_idx].phys_addr =
3275 smd_smem_areas[smem_idx].phys_addr;
3276 smem_areas[smem_idx].size =
3277 smd_smem_areas[smem_idx].size;
3278 smem_areas[smem_idx].virt_addr = ioremap_nocache(
3279 (unsigned long)(smem_areas[smem_idx].phys_addr),
3280 smem_areas[smem_idx].size);
3281 if (!smem_areas[smem_idx].virt_addr) {
3282 pr_err("%s: ioremap_nocache() of addr:%p"
3283 " size: %x\n", __func__,
3284 smem_areas[smem_idx].phys_addr,
3285 smem_areas[smem_idx].size);
3286 err_ret = -ENOMEM;
3287 ++smem_idx;
3288 goto smem_failed;
3289 }
3290 }
3291 sort(smem_areas, num_smem_areas,
3292 sizeof(struct smem_area),
3293 sort_cmp_func, NULL);
3294 }
3295
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303296 for (i = 0; i < num_ss; i++) {
3297 cfg = &smd_ss_config_list[i];
3298
3299 ret = intr_init(
3300 &private_intr_config[cfg->irq_config_id].smd,
3301 &cfg->smd_int,
3302 pdev
3303 );
3304
3305 if (ret < 0) {
3306 err_ret = ret;
3307 pr_err("smd: register irq failed on %s\n",
3308 cfg->smd_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003309 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303310 }
3311
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003312 /* only init smsm structs if this edge supports smsm */
3313 if (cfg->smsm_int.irq_id)
3314 ret = intr_init(
3315 &private_intr_config[cfg->irq_config_id].smsm,
3316 &cfg->smsm_int,
3317 pdev
3318 );
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303319
3320 if (ret < 0) {
3321 err_ret = ret;
3322 pr_err("smd: register irq failed on %s\n",
3323 cfg->smsm_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003324 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303325 }
Eric Holmberg17992c12012-02-29 12:54:44 -07003326
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003327 if (cfg->subsys_name)
3328 strlcpy(edge_to_pids[cfg->edge].subsys_name,
Eric Holmberg17992c12012-02-29 12:54:44 -07003329 cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303330 }
3331
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303332
3333 SMD_INFO("smd_core_platform_init() done\n");
3334 return 0;
3335
Jeff Hugobdc734d2012-03-26 16:05:39 -06003336intr_failed:
3337 pr_err("smd: deregistering IRQs\n");
3338 for (i = 0; i < num_ss; ++i) {
3339 cfg = &smd_ss_config_list[i];
3340
3341 if (cfg->smd_int.irq_id >= 0)
3342 free_irq(cfg->smd_int.irq_id,
3343 (void *)cfg->smd_int.dev_id
3344 );
3345 if (cfg->smsm_int.irq_id >= 0)
3346 free_irq(cfg->smsm_int.irq_id,
3347 (void *)cfg->smsm_int.dev_id
3348 );
3349 }
3350smem_failed:
3351 for (smem_idx = smem_idx - 1; smem_idx >= 0; --smem_idx)
3352 iounmap(smem_areas[smem_idx].virt_addr);
3353 kfree(smem_areas);
3354smem_areas_alloc_fail:
3355 return err_ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303356}
3357
Gregory Bean4416e9e2010-07-28 10:22:12 -07003358static int __devinit msm_smd_probe(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003359{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303360 int ret;
Daniel Walker0aec66d2010-03-18 12:31:08 -07003361
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303362 SMD_INFO("smd probe\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003363 INIT_WORK(&probe_work, smd_channel_probe_worker);
3364
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003365 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3366 if (IS_ERR(channel_close_wq)) {
3367 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3368 return -ENOMEM;
3369 }
3370
3371 if (smsm_init()) {
3372 pr_err("smsm_init() failed\n");
3373 return -1;
3374 }
3375
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303376 if (pdev) {
3377 if (pdev->dev.of_node) {
3378 pr_err("SMD: Device tree not currently supported\n");
3379 return -ENODEV;
3380 } else if (pdev->dev.platform_data) {
3381 ret = smd_core_platform_init(pdev);
3382 if (ret) {
3383 pr_err(
3384 "SMD: smd_core_platform_init() failed\n");
3385 return -ENODEV;
3386 }
3387 } else {
3388 ret = smd_core_init();
3389 if (ret) {
3390 pr_err("smd_core_init() failed\n");
3391 return -ENODEV;
3392 }
3393 }
3394 } else {
3395 pr_err("SMD: PDEV not found\n");
3396 return -ENODEV;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003397 }
3398
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003399 smd_initialized = 1;
3400
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003401 smd_alloc_loopback_channel();
Eric Holmbergc33d4ab2011-10-24 10:28:25 -06003402 smsm_irq_handler(0, 0);
3403 tasklet_schedule(&smd_fake_irq_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003404
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003405 return 0;
3406}
3407
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003408static int restart_notifier_cb(struct notifier_block *this,
3409 unsigned long code,
3410 void *data);
3411
3412static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003413 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3414 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
3415 {SMD_WCNSS, "riva", .nb.notifier_call = restart_notifier_cb},
3416 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003417 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003418};
3419
3420static int restart_notifier_cb(struct notifier_block *this,
3421 unsigned long code,
3422 void *data)
3423{
3424 if (code == SUBSYS_AFTER_SHUTDOWN) {
3425 struct restart_notifier_block *notifier;
3426
3427 notifier = container_of(this,
3428 struct restart_notifier_block, nb);
3429 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3430 __func__, notifier->processor,
3431 notifier->name);
3432
3433 smd_channel_reset(notifier->processor);
3434 }
3435
3436 return NOTIFY_DONE;
3437}
3438
3439static __init int modem_restart_late_init(void)
3440{
3441 int i;
3442 void *handle;
3443 struct restart_notifier_block *nb;
3444
3445 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
3446 nb = &restart_notifiers[i];
3447 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
3448 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
3449 __func__, nb->name, handle);
3450 }
3451 return 0;
3452}
3453late_initcall(modem_restart_late_init);
3454
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003455static struct platform_driver msm_smd_driver = {
3456 .probe = msm_smd_probe,
3457 .driver = {
3458 .name = MODULE_NAME,
3459 .owner = THIS_MODULE,
3460 },
3461};
3462
3463static int __init msm_smd_init(void)
3464{
3465 return platform_driver_register(&msm_smd_driver);
3466}
3467
3468module_init(msm_smd_init);
3469
3470MODULE_DESCRIPTION("MSM Shared Memory Core");
3471MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
3472MODULE_LICENSE("GPL");