blob: ef0f62a7c173ec39a58ce51dd98ea40e7f98d6e6 [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07004 * Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f942012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Jeff Hugobdc734d2012-03-26 16:05:39 -060037#include <linux/sort.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070038#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070040#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053042#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070043#include <mach/proc_comm.h>
Ram Somani8b9589f2012-04-03 12:07:18 +053044#include <asm/cacheflush.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070045
46#include "smd_private.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047#include "modem_notifier.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
Jeff Hugo56b933a2011-09-28 14:42:05 -060050 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060051 || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064)
Brian Swetland37521a32009-07-01 18:30:47 -070052#define CONFIG_QDSP6 1
53#endif
54
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060055#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \
56 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057#define CONFIG_DSPS 1
58#endif
59
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060060#if defined(CONFIG_ARCH_MSM8960) \
61 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060063#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070065
66#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067#define SMEM_VERSION 0x000B
68#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070069#define SMSM_SNAPSHOT_CNT 64
Eric Holmbergda31d042012-03-28 14:01:02 -060070#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72uint32_t SMSM_NUM_ENTRIES = 8;
73uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070074
Eric Holmberge8a39322012-04-03 15:14:02 -060075/* Legacy SMSM interrupt notifications */
76#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT \
77 | SMSM_RUN | SMSM_SYSTEM_DOWNLOAD)
Brian Swetland2eb44eb2008-09-29 16:00:48 -070078
79enum {
80 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081 MSM_SMSM_DEBUG = 1U << 1,
82 MSM_SMD_INFO = 1U << 2,
83 MSM_SMSM_INFO = 1U << 3,
Eric Holmberg98c6c642012-02-24 11:29:35 -070084 MSM_SMx_POWER_INFO = 1U << 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085};
86
87struct smsm_shared_info {
88 uint32_t *state;
89 uint32_t *intr_mask;
90 uint32_t *intr_mux;
91};
92
93static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f942012-03-19 10:04:22 -060094static struct kfifo smsm_snapshot_fifo;
95static struct wake_lock smsm_snapshot_wakelock;
96static int smsm_snapshot_count;
97static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098
99struct smsm_size_info_type {
100 uint32_t num_hosts;
101 uint32_t num_entries;
102 uint32_t reserved0;
103 uint32_t reserved1;
104};
105
106struct smsm_state_cb_info {
107 struct list_head cb_list;
108 uint32_t mask;
109 void *data;
110 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
111};
112
113struct smsm_state_info {
114 struct list_head callbacks;
115 uint32_t last_value;
Eric Holmberge8a39322012-04-03 15:14:02 -0600116 uint32_t intr_mask_set;
117 uint32_t intr_mask_clear;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118};
119
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530120struct interrupt_config_item {
121 /* must be initialized */
122 irqreturn_t (*irq_handler)(int req, void *data);
123 /* outgoing interrupt config (set from platform data) */
124 uint32_t out_bit_pos;
125 void __iomem *out_base;
126 uint32_t out_offset;
Eric Holmbergdeace152012-07-25 12:17:11 -0600127 int irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530128};
129
130struct interrupt_config {
131 struct interrupt_config_item smd;
132 struct interrupt_config_item smsm;
133};
134
135static irqreturn_t smd_modem_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700136static irqreturn_t smsm_modem_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530137static irqreturn_t smd_dsp_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700138static irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530139static irqreturn_t smd_dsps_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700140static irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530141static irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700142static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600143static irqreturn_t smd_rpm_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530144static irqreturn_t smsm_irq_handler(int irq, void *data);
145
146static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
147 [SMD_MODEM] = {
148 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700149 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530150 },
151 [SMD_Q6] = {
152 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700153 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530154 },
155 [SMD_DSPS] = {
156 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700157 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530158 },
159 [SMD_WCNSS] = {
160 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700161 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530162 },
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600163 [SMD_RPM] = {
164 .smd.irq_handler = smd_rpm_irq_handler,
165 .smsm.irq_handler = NULL, /* does not support smsm */
166 },
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530167};
Jeff Hugobdc734d2012-03-26 16:05:39 -0600168
169struct smem_area {
170 void *phys_addr;
171 unsigned size;
172 void __iomem *virt_addr;
173};
174static uint32_t num_smem_areas;
175static struct smem_area *smem_areas;
176static void *smem_range_check(void *base, unsigned offset);
177
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700178struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530179
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
181#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
182 entry * SMSM_NUM_HOSTS + host)
183#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
184
185/* Internal definitions which are not exported in some targets */
186enum {
187 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700188};
189
190static int msm_smd_debug_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700191module_param_named(debug_mask, msm_smd_debug_mask,
192 int, S_IRUGO | S_IWUSR | S_IWGRP);
193
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194#if defined(CONFIG_MSM_SMD_DEBUG)
195#define SMD_DBG(x...) do { \
196 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
197 printk(KERN_DEBUG x); \
198 } while (0)
199
200#define SMSM_DBG(x...) do { \
201 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
202 printk(KERN_DEBUG x); \
203 } while (0)
204
205#define SMD_INFO(x...) do { \
206 if (msm_smd_debug_mask & MSM_SMD_INFO) \
207 printk(KERN_INFO x); \
208 } while (0)
209
210#define SMSM_INFO(x...) do { \
211 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
212 printk(KERN_INFO x); \
213 } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700214#define SMx_POWER_INFO(x...) do { \
215 if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \
216 printk(KERN_INFO x); \
217 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218#else
219#define SMD_DBG(x...) do { } while (0)
220#define SMSM_DBG(x...) do { } while (0)
221#define SMD_INFO(x...) do { } while (0)
222#define SMSM_INFO(x...) do { } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700223#define SMx_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224#endif
225
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700226static unsigned last_heap_free = 0xffffffff;
227
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228static inline void smd_write_intr(unsigned int val,
229 const void __iomem *addr);
230
231#if defined(CONFIG_ARCH_MSM7X30)
232#define MSM_TRIG_A2M_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530233 (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700234#define MSM_TRIG_A2Q6_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530235 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236#define MSM_TRIG_A2M_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530237 (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238#define MSM_TRIG_A2Q6_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530239 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600241#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700242#define MSM_TRIG_A2WCNSS_SMD_INT
243#define MSM_TRIG_A2WCNSS_SMSM_INT
244#elif defined(CONFIG_ARCH_MSM8X60)
245#define MSM_TRIG_A2M_SMD_INT \
246 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
247#define MSM_TRIG_A2Q6_SMD_INT \
248 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
249#define MSM_TRIG_A2M_SMSM_INT \
250 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
251#define MSM_TRIG_A2Q6_SMSM_INT \
252 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
253#define MSM_TRIG_A2DSPS_SMD_INT \
254 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600255#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256#define MSM_TRIG_A2WCNSS_SMD_INT
257#define MSM_TRIG_A2WCNSS_SMSM_INT
Jeff Hugo56b933a2011-09-28 14:42:05 -0600258#elif defined(CONFIG_ARCH_MSM9615)
259#define MSM_TRIG_A2M_SMD_INT \
260 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
261#define MSM_TRIG_A2Q6_SMD_INT \
262 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
263#define MSM_TRIG_A2M_SMSM_INT \
264 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
265#define MSM_TRIG_A2Q6_SMSM_INT \
266 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
267#define MSM_TRIG_A2DSPS_SMD_INT
268#define MSM_TRIG_A2DSPS_SMSM_INT
269#define MSM_TRIG_A2WCNSS_SMD_INT
270#define MSM_TRIG_A2WCNSS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271#elif defined(CONFIG_ARCH_FSM9XXX)
272#define MSM_TRIG_A2Q6_SMD_INT \
273 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
274#define MSM_TRIG_A2Q6_SMSM_INT \
275 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
276#define MSM_TRIG_A2M_SMD_INT \
277 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
278#define MSM_TRIG_A2M_SMSM_INT \
279 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
280#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600281#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700282#define MSM_TRIG_A2WCNSS_SMD_INT
283#define MSM_TRIG_A2WCNSS_SMSM_INT
Eric Holmberg73d45462012-02-28 10:41:31 -0700284#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285#define MSM_TRIG_A2M_SMD_INT \
286 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700287#define MSM_TRIG_A2Q6_SMD_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288#define MSM_TRIG_A2M_SMSM_INT \
289 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700290#define MSM_TRIG_A2Q6_SMSM_INT
291#define MSM_TRIG_A2DSPS_SMD_INT
292#define MSM_TRIG_A2DSPS_SMSM_INT
293#define MSM_TRIG_A2WCNSS_SMD_INT
294#define MSM_TRIG_A2WCNSS_SMSM_INT
295#elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A)
296#define MSM_TRIG_A2M_SMD_INT \
297 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
298#define MSM_TRIG_A2Q6_SMD_INT
299#define MSM_TRIG_A2M_SMSM_INT \
300 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
301#define MSM_TRIG_A2Q6_SMSM_INT
302#define MSM_TRIG_A2DSPS_SMD_INT
303#define MSM_TRIG_A2DSPS_SMSM_INT
304#define MSM_TRIG_A2WCNSS_SMD_INT
305#define MSM_TRIG_A2WCNSS_SMSM_INT
306#else /* use platform device / device tree configuration */
307#define MSM_TRIG_A2M_SMD_INT
308#define MSM_TRIG_A2Q6_SMD_INT
309#define MSM_TRIG_A2M_SMSM_INT
310#define MSM_TRIG_A2Q6_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600312#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313#define MSM_TRIG_A2WCNSS_SMD_INT
314#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700315#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316
Jeff Hugoee40b152012-02-09 17:39:47 -0700317/*
318 * stub out legacy macros if they are not being used so that the legacy
319 * code compiles even though it is not used
320 *
321 * these definitions should not be used in active code and will cause
322 * an early failure
323 */
324#ifndef INT_A9_M2A_0
325#define INT_A9_M2A_0 -1
326#endif
327#ifndef INT_A9_M2A_5
328#define INT_A9_M2A_5 -1
329#endif
330#ifndef INT_ADSP_A11
331#define INT_ADSP_A11 -1
332#endif
333#ifndef INT_ADSP_A11_SMSM
334#define INT_ADSP_A11_SMSM -1
335#endif
336#ifndef INT_DSPS_A11
337#define INT_DSPS_A11 -1
338#endif
339#ifndef INT_DSPS_A11_SMSM
340#define INT_DSPS_A11_SMSM -1
341#endif
342#ifndef INT_WCNSS_A11
343#define INT_WCNSS_A11 -1
344#endif
345#ifndef INT_WCNSS_A11_SMSM
346#define INT_WCNSS_A11_SMSM -1
347#endif
348
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349#define SMD_LOOPBACK_CID 100
350
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600351#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
352static remote_spinlock_t remote_spinlock;
353
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergda31d042012-03-28 14:01:02 -0600356static void smsm_cb_snapshot(uint32_t use_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700357
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -0600358static struct workqueue_struct *smsm_cb_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700359static void notify_smsm_cb_clients_worker(struct work_struct *work);
360static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600361static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362static struct smsm_state_info *smsm_states;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +0530363static int spinlocks_initialized;
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -0600364static RAW_NOTIFIER_HEAD(smsm_driver_state_notifier_list);
365static DEFINE_MUTEX(smsm_driver_state_notifier_lock);
366static void smsm_driver_state_notify(uint32_t state, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367
368static inline void smd_write_intr(unsigned int val,
369 const void __iomem *addr)
370{
371 wmb();
372 __raw_writel(val, addr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700373}
374
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375#ifdef CONFIG_WCNSS
376static inline void wakeup_v1_riva(void)
377{
378 /*
379 * workaround hack for RIVA v1 hardware bug
380 * trigger GPIO 40 to wake up RIVA from power collaspe
381 * not to be sent to customers
382 */
Jeff Hugoafb8c4a2011-10-27 15:57:27 -0600383 if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 1) {
384 __raw_writel(0x0, MSM_TLMM_BASE + 0x1284);
385 __raw_writel(0x2, MSM_TLMM_BASE + 0x1284);
386 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387 /* end workaround */
388}
389#else
390static inline void wakeup_v1_riva(void) {}
391#endif
392
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700393static inline void notify_modem_smd(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700394{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530395 static const struct interrupt_config_item *intr
396 = &private_intr_config[SMD_MODEM].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700397 if (intr->out_base) {
398 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530399 smd_write_intr(intr->out_bit_pos,
400 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700401 } else {
402 ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530403 MSM_TRIG_A2M_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700404 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700405}
406
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700407static inline void notify_dsp_smd(void)
408{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530409 static const struct interrupt_config_item *intr
410 = &private_intr_config[SMD_Q6].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700411 if (intr->out_base) {
412 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530413 smd_write_intr(intr->out_bit_pos,
414 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700415 } else {
416 ++interrupt_stats[SMD_Q6].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530417 MSM_TRIG_A2Q6_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700418 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700419}
420
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530421static inline void notify_dsps_smd(void)
422{
423 static const struct interrupt_config_item *intr
424 = &private_intr_config[SMD_DSPS].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700425 if (intr->out_base) {
426 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530427 smd_write_intr(intr->out_bit_pos,
428 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700429 } else {
430 ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530431 MSM_TRIG_A2DSPS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700432 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530433}
434
435static inline void notify_wcnss_smd(void)
436{
437 static const struct interrupt_config_item *intr
438 = &private_intr_config[SMD_WCNSS].smd;
439 wakeup_v1_riva();
440
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700441 if (intr->out_base) {
442 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530443 smd_write_intr(intr->out_bit_pos,
444 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700445 } else {
446 ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530447 MSM_TRIG_A2WCNSS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700448 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530449}
450
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600451static inline void notify_rpm_smd(void)
452{
453 static const struct interrupt_config_item *intr
454 = &private_intr_config[SMD_RPM].smd;
455
456 if (intr->out_base) {
457 ++interrupt_stats[SMD_RPM].smd_out_config_count;
458 smd_write_intr(intr->out_bit_pos,
459 intr->out_base + intr->out_offset);
460 }
461}
462
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530463static inline void notify_modem_smsm(void)
464{
465 static const struct interrupt_config_item *intr
466 = &private_intr_config[SMD_MODEM].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700467 if (intr->out_base) {
468 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530469 smd_write_intr(intr->out_bit_pos,
470 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700471 } else {
472 ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530473 MSM_TRIG_A2M_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700474 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530475}
476
477static inline void notify_dsp_smsm(void)
478{
479 static const struct interrupt_config_item *intr
480 = &private_intr_config[SMD_Q6].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700481 if (intr->out_base) {
482 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530483 smd_write_intr(intr->out_bit_pos,
484 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700485 } else {
486 ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530487 MSM_TRIG_A2Q6_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700488 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530489}
490
491static inline void notify_dsps_smsm(void)
492{
493 static const struct interrupt_config_item *intr
494 = &private_intr_config[SMD_DSPS].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700495 if (intr->out_base) {
496 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530497 smd_write_intr(intr->out_bit_pos,
498 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700499 } else {
500 ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530501 MSM_TRIG_A2DSPS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700502 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530503}
504
505static inline void notify_wcnss_smsm(void)
506{
507 static const struct interrupt_config_item *intr
508 = &private_intr_config[SMD_WCNSS].smsm;
509 wakeup_v1_riva();
510
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700511 if (intr->out_base) {
512 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530513 smd_write_intr(intr->out_bit_pos,
514 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700515 } else {
516 ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530517 MSM_TRIG_A2WCNSS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700518 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530519}
520
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700521static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
522{
523 /* older protocol don't use smsm_intr_mask,
524 but still communicates with modem */
525 if (!smsm_info.intr_mask ||
526 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
527 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530528 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529
530 if (smsm_info.intr_mask &&
531 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
532 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533 uint32_t mux_val;
534
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600535 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 mux_val = __raw_readl(
537 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
538 mux_val++;
539 __raw_writel(mux_val,
540 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
541 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530542 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700543 }
544
545 if (smsm_info.intr_mask &&
546 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
547 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530548 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700549 }
550
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600551 if (smsm_info.intr_mask &&
552 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
553 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530554 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600555 }
556
Eric Holmbergda31d042012-03-28 14:01:02 -0600557 /*
558 * Notify local SMSM callback clients without wakelock since this
559 * code is used by power management during power-down/-up sequencing
560 * on DEM-based targets. Grabbing a wakelock in this case will
561 * abort the power-down sequencing.
562 */
Eric Holmberg51676a12012-07-10 18:45:23 -0600563 if (smsm_info.intr_mask &&
564 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS))
565 & notify_mask)) {
566 smsm_cb_snapshot(0);
567 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700568}
569
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700570void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700571{
572 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700573 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700574
575 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
576 if (x != 0) {
577 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700578 SMD_INFO("smem: DIAG '%s'\n", x);
579 }
580
581 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
582 if (x != 0) {
583 x[size - 1] = 0;
584 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700585 }
586}
587
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700588
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700589static void handle_modem_crash(void)
590{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700591 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700592 smd_diag();
593
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700594 /* hard reboot if possible FIXME
595 if (msm_reset_hook)
596 msm_reset_hook();
597 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700598
599 /* in this case the modem or watchdog should reboot us */
600 for (;;)
601 ;
602}
603
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700604int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700605{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700606 /* if the modem's not ready yet, we have to hope for the best */
607 if (!smsm_info.state)
608 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700609
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700610 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700611 handle_modem_crash();
612 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700613 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700614 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700615}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700617
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700618/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700619 * irq handler and code that mutates the channel
620 * list or fiddles with channel state
621 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700622static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700623DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700624
625/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700626 * operations to avoid races while creating or
627 * destroying smd_channel structures
628 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700629static DEFINE_MUTEX(smd_creation_mutex);
630
631static int smd_initialized;
632
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700633struct smd_shared_v1 {
634 struct smd_half_channel ch0;
635 unsigned char data0[SMD_BUF_SIZE];
636 struct smd_half_channel ch1;
637 unsigned char data1[SMD_BUF_SIZE];
638};
639
640struct smd_shared_v2 {
641 struct smd_half_channel ch0;
642 struct smd_half_channel ch1;
643};
644
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600645struct smd_shared_v2_word_access {
646 struct smd_half_channel_word_access ch0;
647 struct smd_half_channel_word_access ch1;
648};
649
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700650struct smd_channel {
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600651 volatile void *send; /* some variant of smd_half_channel */
652 volatile void *recv; /* some variant of smd_half_channel */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653 unsigned char *send_data;
654 unsigned char *recv_data;
655 unsigned fifo_size;
656 unsigned fifo_mask;
657 struct list_head ch_list;
658
659 unsigned current_packet;
660 unsigned n;
661 void *priv;
662 void (*notify)(void *priv, unsigned flags);
663
664 int (*read)(smd_channel_t *ch, void *data, int len, int user_buf);
665 int (*write)(smd_channel_t *ch, const void *data, int len,
666 int user_buf);
667 int (*read_avail)(smd_channel_t *ch);
668 int (*write_avail)(smd_channel_t *ch);
669 int (*read_from_cb)(smd_channel_t *ch, void *data, int len,
670 int user_buf);
671
672 void (*update_state)(smd_channel_t *ch);
673 unsigned last_state;
674 void (*notify_other_cpu)(void);
675
676 char name[20];
677 struct platform_device pdev;
678 unsigned type;
679
680 int pending_pkt_sz;
681
682 char is_pkt_ch;
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600683
684 /*
685 * private internal functions to access *send and *recv.
686 * never to be exported outside of smd
687 */
688 struct smd_half_channel_access *half_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700689};
690
691struct edge_to_pid {
692 uint32_t local_pid;
693 uint32_t remote_pid;
Eric Holmberg17992c12012-02-29 12:54:44 -0700694 char subsys_name[SMD_MAX_CH_NAME_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700695};
696
697/**
698 * Maps edge type to local and remote processor ID's.
699 */
700static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700701 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
702 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "q6"},
703 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
704 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
705 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
706 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
707 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
708 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
709 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
710 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
711 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
712 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
713 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
714 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
715 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600716 [SMD_APPS_RPM] = {SMD_APPS, SMD_RPM},
717 [SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
718 [SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
719 [SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700720};
721
722struct restart_notifier_block {
723 unsigned processor;
724 char *name;
725 struct notifier_block nb;
726};
727
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -0600728static int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700729static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
730
731static LIST_HEAD(smd_ch_closed_list);
732static LIST_HEAD(smd_ch_closing_list);
733static LIST_HEAD(smd_ch_to_close_list);
734static LIST_HEAD(smd_ch_list_modem);
735static LIST_HEAD(smd_ch_list_dsp);
736static LIST_HEAD(smd_ch_list_dsps);
737static LIST_HEAD(smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600738static LIST_HEAD(smd_ch_list_rpm);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700739
740static unsigned char smd_ch_allocated[64];
741static struct work_struct probe_work;
742
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700743static void finalize_channel_close_fn(struct work_struct *work);
744static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
745static struct workqueue_struct *channel_close_wq;
746
747static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
748
749/* on smp systems, the probe might get called from multiple cores,
750 hence use a lock */
751static DEFINE_MUTEX(smd_probe_lock);
752
753static void smd_channel_probe_worker(struct work_struct *work)
754{
755 struct smd_alloc_elm *shared;
756 unsigned n;
757 uint32_t type;
758
759 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
760
761 if (!shared) {
762 pr_err("%s: allocation table not initialized\n", __func__);
763 return;
764 }
765
766 mutex_lock(&smd_probe_lock);
767 for (n = 0; n < 64; n++) {
768 if (smd_ch_allocated[n])
769 continue;
770
771 /* channel should be allocated only if APPS
772 processor is involved */
773 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600774 if (type >= ARRAY_SIZE(edge_to_pids) ||
775 edge_to_pids[type].local_pid != SMD_APPS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700776 continue;
777 if (!shared[n].ref_count)
778 continue;
779 if (!shared[n].name[0])
780 continue;
781
782 if (!smd_alloc_channel(&shared[n]))
783 smd_ch_allocated[n] = 1;
784 else
785 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
786 }
787 mutex_unlock(&smd_probe_lock);
788}
789
790/**
791 * Lookup processor ID and determine if it belongs to the proved edge
792 * type.
793 *
794 * @shared2: Pointer to v2 shared channel structure
795 * @type: Edge type
796 * @pid: Processor ID of processor on edge
797 * @local_ch: Channel that belongs to processor @pid
798 * @remote_ch: Other side of edge contained @pid
Jeff Hugo70a7e562012-09-07 11:24:32 -0600799 * @is_word_access_ch: Bool, is this a word aligned access channel
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700800 *
801 * Returns 0 for not on edge, 1 for found on edge
802 */
Jeff Hugo70a7e562012-09-07 11:24:32 -0600803static int pid_is_on_edge(void *shared2,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700804 uint32_t type, uint32_t pid,
Jeff Hugo70a7e562012-09-07 11:24:32 -0600805 void **local_ch,
806 void **remote_ch,
807 int is_word_access_ch
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700808 )
809{
810 int ret = 0;
811 struct edge_to_pid *edge;
Jeff Hugo70a7e562012-09-07 11:24:32 -0600812 void *ch0;
813 void *ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700814
815 *local_ch = 0;
816 *remote_ch = 0;
817
818 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
819 return 0;
820
Jeff Hugo70a7e562012-09-07 11:24:32 -0600821 if (is_word_access_ch) {
822 ch0 = &((struct smd_shared_v2_word_access *)(shared2))->ch0;
823 ch1 = &((struct smd_shared_v2_word_access *)(shared2))->ch1;
824 } else {
825 ch0 = &((struct smd_shared_v2 *)(shared2))->ch0;
826 ch1 = &((struct smd_shared_v2 *)(shared2))->ch1;
827 }
828
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700829 edge = &edge_to_pids[type];
830 if (edge->local_pid != edge->remote_pid) {
831 if (pid == edge->local_pid) {
Jeff Hugo70a7e562012-09-07 11:24:32 -0600832 *local_ch = ch0;
833 *remote_ch = ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700834 ret = 1;
835 } else if (pid == edge->remote_pid) {
Jeff Hugo70a7e562012-09-07 11:24:32 -0600836 *local_ch = ch1;
837 *remote_ch = ch0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700838 ret = 1;
839 }
840 }
841
842 return ret;
843}
844
Eric Holmberg17992c12012-02-29 12:54:44 -0700845/*
846 * Returns a pointer to the subsystem name or NULL if no
847 * subsystem name is available.
848 *
849 * @type - Edge definition
850 */
851const char *smd_edge_to_subsystem(uint32_t type)
852{
853 const char *subsys = NULL;
854
855 if (type < ARRAY_SIZE(edge_to_pids)) {
856 subsys = edge_to_pids[type].subsys_name;
857 if (subsys[0] == 0x0)
858 subsys = NULL;
859 }
860 return subsys;
861}
862EXPORT_SYMBOL(smd_edge_to_subsystem);
863
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700864/*
865 * Returns a pointer to the subsystem name given the
866 * remote processor ID.
867 *
868 * @pid Remote processor ID
869 * @returns Pointer to subsystem name or NULL if not found
870 */
871const char *smd_pid_to_subsystem(uint32_t pid)
872{
873 const char *subsys = NULL;
874 int i;
875
876 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
877 if (pid == edge_to_pids[i].remote_pid &&
878 edge_to_pids[i].subsys_name[0] != 0x0
879 ) {
880 subsys = edge_to_pids[i].subsys_name;
881 break;
882 }
883 }
884
885 return subsys;
886}
887EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700888
Jeff Hugo70a7e562012-09-07 11:24:32 -0600889static void smd_reset_edge(void *void_ch, unsigned new_state,
890 int is_word_access_ch)
Eric Holmberg2a563c32011-10-05 14:51:43 -0600891{
Jeff Hugo70a7e562012-09-07 11:24:32 -0600892 if (is_word_access_ch) {
893 struct smd_half_channel_word_access *ch =
894 (struct smd_half_channel_word_access *)(void_ch);
895 if (ch->state != SMD_SS_CLOSED) {
896 ch->state = new_state;
897 ch->fDSR = 0;
898 ch->fCTS = 0;
899 ch->fCD = 0;
900 ch->fSTATE = 1;
901 }
902 } else {
903 struct smd_half_channel *ch =
904 (struct smd_half_channel *)(void_ch);
905 if (ch->state != SMD_SS_CLOSED) {
906 ch->state = new_state;
907 ch->fDSR = 0;
908 ch->fCTS = 0;
909 ch->fCD = 0;
910 ch->fSTATE = 1;
911 }
Eric Holmberg2a563c32011-10-05 14:51:43 -0600912 }
913}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700914
915static void smd_channel_reset_state(struct smd_alloc_elm *shared,
916 unsigned new_state, unsigned pid)
917{
918 unsigned n;
Jeff Hugo70a7e562012-09-07 11:24:32 -0600919 void *shared2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700920 uint32_t type;
Jeff Hugo70a7e562012-09-07 11:24:32 -0600921 void *local_ch;
922 void *remote_ch;
923 int is_word_access;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700924
925 for (n = 0; n < SMD_CHANNELS; n++) {
926 if (!shared[n].ref_count)
927 continue;
928 if (!shared[n].name[0])
929 continue;
930
931 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo70a7e562012-09-07 11:24:32 -0600932 is_word_access = is_word_access_ch(type);
933 if (is_word_access)
934 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n,
935 sizeof(struct smd_shared_v2_word_access));
936 else
937 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n,
938 sizeof(struct smd_shared_v2));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939 if (!shared2)
940 continue;
941
Jeff Hugo70a7e562012-09-07 11:24:32 -0600942 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch,
943 is_word_access))
944 smd_reset_edge(local_ch, new_state, is_word_access);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700945
Eric Holmberg2a563c32011-10-05 14:51:43 -0600946 /*
947 * ModemFW is in the same subsystem as ModemSW, but has
948 * separate SMD edges that need to be reset.
949 */
950 if (pid == SMSM_MODEM &&
951 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
Jeff Hugo70a7e562012-09-07 11:24:32 -0600952 &local_ch, &remote_ch, is_word_access))
953 smd_reset_edge(local_ch, new_state, is_word_access);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700954 }
955}
956
957
958void smd_channel_reset(uint32_t restart_pid)
959{
960 struct smd_alloc_elm *shared;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700961 unsigned long flags;
962
963 SMD_DBG("%s: starting reset\n", __func__);
964 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
965 if (!shared) {
966 pr_err("%s: allocation table not initialized\n", __func__);
967 return;
968 }
969
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600970 /* release any held spinlocks */
971 remote_spin_release(&remote_spinlock, restart_pid);
972 remote_spin_release_all(restart_pid);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973
974 /* reset SMSM entry */
975 if (smsm_info.state) {
976 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
977
Eric Holmberg351a63c2011-12-02 17:49:43 -0700978 /* restart SMSM init handshake */
979 if (restart_pid == SMSM_MODEM) {
980 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -0700981 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
982 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -0700983 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700984
985 /* notify SMSM processors */
986 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -0700987 notify_modem_smsm();
988 notify_dsp_smsm();
989 notify_dsps_smsm();
990 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700991 }
992
993 /* change all remote states to CLOSING */
994 mutex_lock(&smd_probe_lock);
995 spin_lock_irqsave(&smd_lock, flags);
996 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
997 spin_unlock_irqrestore(&smd_lock, flags);
998 mutex_unlock(&smd_probe_lock);
999
1000 /* notify SMD processors */
1001 mb();
1002 smd_fake_irq_handler(0);
1003 notify_modem_smd();
1004 notify_dsp_smd();
1005 notify_dsps_smd();
1006 notify_wcnss_smd();
1007
1008 /* change all remote states to CLOSED */
1009 mutex_lock(&smd_probe_lock);
1010 spin_lock_irqsave(&smd_lock, flags);
1011 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
1012 spin_unlock_irqrestore(&smd_lock, flags);
1013 mutex_unlock(&smd_probe_lock);
1014
1015 /* notify SMD processors */
1016 mb();
1017 smd_fake_irq_handler(0);
1018 notify_modem_smd();
1019 notify_dsp_smd();
1020 notify_dsps_smd();
1021 notify_wcnss_smd();
1022
1023 SMD_DBG("%s: finished reset\n", __func__);
1024}
1025
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001026/* how many bytes are available for reading */
1027static int smd_stream_read_avail(struct smd_channel *ch)
1028{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001029 return (ch->half_ch->get_head(ch->recv) -
1030 ch->half_ch->get_tail(ch->recv)) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001031}
1032
1033/* how many bytes we are free to write */
1034static int smd_stream_write_avail(struct smd_channel *ch)
1035{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001036 return ch->fifo_mask - ((ch->half_ch->get_head(ch->send) -
1037 ch->half_ch->get_tail(ch->send)) & ch->fifo_mask);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001038}
1039
1040static int smd_packet_read_avail(struct smd_channel *ch)
1041{
1042 if (ch->current_packet) {
1043 int n = smd_stream_read_avail(ch);
1044 if (n > ch->current_packet)
1045 n = ch->current_packet;
1046 return n;
1047 } else {
1048 return 0;
1049 }
1050}
1051
1052static int smd_packet_write_avail(struct smd_channel *ch)
1053{
1054 int n = smd_stream_write_avail(ch);
1055 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
1056}
1057
1058static int ch_is_open(struct smd_channel *ch)
1059{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001060 return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED ||
1061 ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING)
1062 && (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001063}
1064
1065/* provide a pointer and length to readable data in the fifo */
1066static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
1067{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001068 unsigned head = ch->half_ch->get_head(ch->recv);
1069 unsigned tail = ch->half_ch->get_tail(ch->recv);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001070 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001071
1072 if (tail <= head)
1073 return head - tail;
1074 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001075 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001076}
1077
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001078static int read_intr_blocked(struct smd_channel *ch)
1079{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001080 return ch->half_ch->get_fBLOCKREADINTR(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001081}
1082
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001083/* advance the fifo read pointer after data from ch_read_buffer is consumed */
1084static void ch_read_done(struct smd_channel *ch, unsigned count)
1085{
1086 BUG_ON(count > smd_stream_read_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001087 ch->half_ch->set_tail(ch->recv,
1088 (ch->half_ch->get_tail(ch->recv) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001089 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001090 ch->half_ch->set_fTAIL(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001091}
1092
1093/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -07001094 * by smd_*_read() and update_packet_state()
1095 * will read-and-discard if the _data pointer is null
1096 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001097static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001098{
1099 void *ptr;
1100 unsigned n;
1101 unsigned char *data = _data;
1102 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001103 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001104
1105 while (len > 0) {
1106 n = ch_read_buffer(ch, &ptr);
1107 if (n == 0)
1108 break;
1109
1110 if (n > len)
1111 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001112 if (_data) {
1113 if (user_buf) {
1114 r = copy_to_user(data, ptr, n);
1115 if (r > 0) {
1116 pr_err("%s: "
1117 "copy_to_user could not copy "
1118 "%i bytes.\n",
1119 __func__,
1120 r);
1121 }
1122 } else
1123 memcpy(data, ptr, n);
1124 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001125
1126 data += n;
1127 len -= n;
1128 ch_read_done(ch, n);
1129 }
1130
1131 return orig_len - len;
1132}
1133
1134static void update_stream_state(struct smd_channel *ch)
1135{
1136 /* streams have no special state requiring updating */
1137}
1138
1139static void update_packet_state(struct smd_channel *ch)
1140{
1141 unsigned hdr[5];
1142 int r;
1143
1144 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001145 while (ch->current_packet == 0) {
1146 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001147
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001148 /* don't bother unless we can get the full header */
1149 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1150 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001151
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001152 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1153 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001154
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001155 ch->current_packet = hdr[0];
1156 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001157}
1158
1159/* provide a pointer and length to next free space in the fifo */
1160static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1161{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001162 unsigned head = ch->half_ch->get_head(ch->send);
1163 unsigned tail = ch->half_ch->get_tail(ch->send);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001164 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001165
1166 if (head < tail) {
1167 return tail - head - 1;
1168 } else {
1169 if (tail == 0)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001170 return ch->fifo_size - head - 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001171 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001172 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001173 }
1174}
1175
1176/* advace the fifo write pointer after freespace
1177 * from ch_write_buffer is filled
1178 */
1179static void ch_write_done(struct smd_channel *ch, unsigned count)
1180{
1181 BUG_ON(count > smd_stream_write_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001182 ch->half_ch->set_head(ch->send,
1183 (ch->half_ch->get_head(ch->send) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001184 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001185 ch->half_ch->set_fHEAD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001186}
1187
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001188static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001189{
1190 if (n == SMD_SS_OPENED) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001191 ch->half_ch->set_fDSR(ch->send, 1);
1192 ch->half_ch->set_fCTS(ch->send, 1);
1193 ch->half_ch->set_fCD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001194 } else {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001195 ch->half_ch->set_fDSR(ch->send, 0);
1196 ch->half_ch->set_fCTS(ch->send, 0);
1197 ch->half_ch->set_fCD(ch->send, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001198 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001199 ch->half_ch->set_state(ch->send, n);
1200 ch->half_ch->set_fSTATE(ch->send, 1);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001201 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001202}
1203
1204static void do_smd_probe(void)
1205{
1206 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1207 if (shared->heap_info.free_offset != last_heap_free) {
1208 last_heap_free = shared->heap_info.free_offset;
1209 schedule_work(&probe_work);
1210 }
1211}
1212
1213static void smd_state_change(struct smd_channel *ch,
1214 unsigned last, unsigned next)
1215{
1216 ch->last_state = next;
1217
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001218 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001219
1220 switch (next) {
1221 case SMD_SS_OPENING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001222 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING ||
1223 ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
1224 ch->half_ch->set_tail(ch->recv, 0);
1225 ch->half_ch->set_head(ch->send, 0);
1226 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001227 ch_set_state(ch, SMD_SS_OPENING);
1228 }
1229 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001230 case SMD_SS_OPENED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001231 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001232 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001233 ch->notify(ch->priv, SMD_EVENT_OPEN);
1234 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001235 break;
1236 case SMD_SS_FLUSHING:
1237 case SMD_SS_RESET:
1238 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001239 break;
1240 case SMD_SS_CLOSED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001241 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001242 ch_set_state(ch, SMD_SS_CLOSING);
1243 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001244 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001245 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1246 }
1247 break;
1248 case SMD_SS_CLOSING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001249 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001250 list_move(&ch->ch_list,
1251 &smd_ch_to_close_list);
1252 queue_work(channel_close_wq,
1253 &finalize_channel_close_work);
1254 }
1255 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001256 }
1257}
1258
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001259static void handle_smd_irq_closing_list(void)
1260{
1261 unsigned long flags;
1262 struct smd_channel *ch;
1263 struct smd_channel *index;
1264 unsigned tmp;
1265
1266 spin_lock_irqsave(&smd_lock, flags);
1267 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001268 if (ch->half_ch->get_fSTATE(ch->recv))
1269 ch->half_ch->set_fSTATE(ch->recv, 0);
1270 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001271 if (tmp != ch->last_state)
1272 smd_state_change(ch, ch->last_state, tmp);
1273 }
1274 spin_unlock_irqrestore(&smd_lock, flags);
1275}
1276
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001277static void handle_smd_irq(struct list_head *list, void (*notify)(void))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001278{
1279 unsigned long flags;
1280 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001281 unsigned ch_flags;
1282 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001283 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001284
1285 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001286 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001287 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001288 ch_flags = 0;
1289 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001290 if (ch->half_ch->get_fHEAD(ch->recv)) {
1291 ch->half_ch->set_fHEAD(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001292 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001293 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001294 if (ch->half_ch->get_fTAIL(ch->recv)) {
1295 ch->half_ch->set_fTAIL(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001296 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001297 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001298 if (ch->half_ch->get_fSTATE(ch->recv)) {
1299 ch->half_ch->set_fSTATE(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001300 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001301 }
1302 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001303 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001304 if (tmp != ch->last_state) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07001305 SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
1306 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001307 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001308 state_change = 1;
1309 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001310 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001311 ch->update_state(ch);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001312 SMx_POWER_INFO("SMD ch%d '%s' Data event r%d/w%d\n",
1313 ch->n, ch->name,
1314 ch->read_avail(ch),
1315 ch->fifo_size - ch->write_avail(ch));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001316 ch->notify(ch->priv, SMD_EVENT_DATA);
1317 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001318 if (ch_flags & 0x4 && !state_change) {
1319 SMx_POWER_INFO("SMD ch%d '%s' State update\n",
1320 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001321 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001322 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001323 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001324 spin_unlock_irqrestore(&smd_lock, flags);
1325 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001326}
1327
Brian Swetland37521a32009-07-01 18:30:47 -07001328static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001329{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001330 SMx_POWER_INFO("SMD Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001331 ++interrupt_stats[SMD_MODEM].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001332 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001333 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001334 return IRQ_HANDLED;
1335}
1336
1337static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
1338{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001339 SMx_POWER_INFO("SMD Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001340 ++interrupt_stats[SMD_Q6].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001341 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001342 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001343 return IRQ_HANDLED;
1344}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001345
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001346static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
1347{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001348 SMx_POWER_INFO("SMD Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001349 ++interrupt_stats[SMD_DSPS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001350 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1351 handle_smd_irq_closing_list();
1352 return IRQ_HANDLED;
1353}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001354
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001355static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1356{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001357 SMx_POWER_INFO("SMD Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001358 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001359 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1360 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001361 return IRQ_HANDLED;
1362}
1363
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001364static irqreturn_t smd_rpm_irq_handler(int irq, void *data)
1365{
1366 SMx_POWER_INFO("SMD Int RPM->Apps\n");
1367 ++interrupt_stats[SMD_RPM].smd_in_count;
1368 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
1369 handle_smd_irq_closing_list();
1370 return IRQ_HANDLED;
1371}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001372
1373static void smd_fake_irq_handler(unsigned long arg)
1374{
Brian Swetland37521a32009-07-01 18:30:47 -07001375 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1376 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001377 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1378 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001379 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001380 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001381}
1382
1383static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1384
Brian Swetland37521a32009-07-01 18:30:47 -07001385static inline int smd_need_int(struct smd_channel *ch)
1386{
1387 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001388 if (ch->half_ch->get_fHEAD(ch->recv) ||
1389 ch->half_ch->get_fTAIL(ch->recv) ||
1390 ch->half_ch->get_fSTATE(ch->recv))
Brian Swetland37521a32009-07-01 18:30:47 -07001391 return 1;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001392 if (ch->half_ch->get_state(ch->recv) != ch->last_state)
Brian Swetland37521a32009-07-01 18:30:47 -07001393 return 1;
1394 }
1395 return 0;
1396}
1397
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001398void smd_sleep_exit(void)
1399{
1400 unsigned long flags;
1401 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001402 int need_int = 0;
1403
1404 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001405 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1406 if (smd_need_int(ch)) {
1407 need_int = 1;
1408 break;
1409 }
1410 }
1411 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1412 if (smd_need_int(ch)) {
1413 need_int = 1;
1414 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001415 }
1416 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001417 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1418 if (smd_need_int(ch)) {
1419 need_int = 1;
1420 break;
1421 }
1422 }
1423 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1424 if (smd_need_int(ch)) {
1425 need_int = 1;
1426 break;
1427 }
1428 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001429 spin_unlock_irqrestore(&smd_lock, flags);
1430 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001431
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001432 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001433 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001434 tasklet_schedule(&smd_fake_irq_tasklet);
1435 }
1436}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001437EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001438
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001439static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001440{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001441 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1442 return 0;
1443 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001444 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001445
1446 /* for cases where xfer type is 0 */
1447 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001448 return 0;
1449
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001450 /* for cases where xfer type is 0 */
1451 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1452 return 0;
1453
1454 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001455 return 1;
1456 else
1457 return 0;
1458}
1459
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001460static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1461 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001462{
1463 void *ptr;
1464 const unsigned char *buf = _data;
1465 unsigned xfer;
1466 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001467 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001468
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001469 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001470 if (len < 0)
1471 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001472 else if (len == 0)
1473 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001474
1475 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001476 if (!ch_is_open(ch)) {
1477 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001478 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001479 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001480 if (xfer > len)
1481 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001482 if (user_buf) {
1483 r = copy_from_user(ptr, buf, xfer);
1484 if (r > 0) {
1485 pr_err("%s: "
1486 "copy_from_user could not copy %i "
1487 "bytes.\n",
1488 __func__,
1489 r);
1490 }
1491 } else
1492 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001493 ch_write_done(ch, xfer);
1494 len -= xfer;
1495 buf += xfer;
1496 if (len == 0)
1497 break;
1498 }
1499
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001500 if (orig_len - len)
1501 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001502
1503 return orig_len - len;
1504}
1505
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001506static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1507 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001508{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001509 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001510 unsigned hdr[5];
1511
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001512 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001513 if (len < 0)
1514 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001515 else if (len == 0)
1516 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001517
1518 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1519 return -ENOMEM;
1520
1521 hdr[0] = len;
1522 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1523
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001524
1525 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1526 if (ret < 0 || ret != sizeof(hdr)) {
1527 SMD_DBG("%s failed to write pkt header: "
1528 "%d returned\n", __func__, ret);
1529 return -1;
1530 }
1531
1532
1533 ret = smd_stream_write(ch, _data, len, user_buf);
1534 if (ret < 0 || ret != len) {
1535 SMD_DBG("%s failed to write pkt data: "
1536 "%d returned\n", __func__, ret);
1537 return ret;
1538 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001539
1540 return len;
1541}
1542
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001543static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001544{
1545 int r;
1546
1547 if (len < 0)
1548 return -EINVAL;
1549
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001550 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001551 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001552 if (!read_intr_blocked(ch))
1553 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001554
1555 return r;
1556}
1557
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001558static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001559{
1560 unsigned long flags;
1561 int r;
1562
1563 if (len < 0)
1564 return -EINVAL;
1565
1566 if (len > ch->current_packet)
1567 len = ch->current_packet;
1568
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001569 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001570 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001571 if (!read_intr_blocked(ch))
1572 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001573
1574 spin_lock_irqsave(&smd_lock, flags);
1575 ch->current_packet -= r;
1576 update_packet_state(ch);
1577 spin_unlock_irqrestore(&smd_lock, flags);
1578
1579 return r;
1580}
1581
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001582static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1583 int user_buf)
1584{
1585 int r;
1586
1587 if (len < 0)
1588 return -EINVAL;
1589
1590 if (len > ch->current_packet)
1591 len = ch->current_packet;
1592
1593 r = ch_read(ch, data, len, user_buf);
1594 if (r > 0)
1595 if (!read_intr_blocked(ch))
1596 ch->notify_other_cpu();
1597
1598 ch->current_packet -= r;
1599 update_packet_state(ch);
1600
1601 return r;
1602}
1603
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301604#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001605static int smd_alloc_v2(struct smd_channel *ch)
1606{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001607 void *buffer;
1608 unsigned buffer_sz;
1609
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001610 if (is_word_access_ch(ch->type)) {
1611 struct smd_shared_v2_word_access *shared2;
1612 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1613 sizeof(*shared2));
1614 if (!shared2) {
1615 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1616 return -EINVAL;
1617 }
1618 ch->send = &shared2->ch0;
1619 ch->recv = &shared2->ch1;
1620 } else {
1621 struct smd_shared_v2 *shared2;
1622 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1623 sizeof(*shared2));
1624 if (!shared2) {
1625 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1626 return -EINVAL;
1627 }
1628 ch->send = &shared2->ch0;
1629 ch->recv = &shared2->ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001630 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001631 ch->half_ch = get_half_ch_funcs(ch->type);
1632
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001633 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1634 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301635 SMD_INFO("smem_get_entry failed\n");
1636 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001637 }
1638
1639 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301640 if (buffer_sz & (buffer_sz - 1)) {
1641 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1642 return -EINVAL;
1643 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001644 buffer_sz /= 2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001645 ch->send_data = buffer;
1646 ch->recv_data = buffer + buffer_sz;
1647 ch->fifo_size = buffer_sz;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001648
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001649 return 0;
1650}
1651
1652static int smd_alloc_v1(struct smd_channel *ch)
1653{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301654 return -EINVAL;
1655}
1656
1657#else /* define v1 for older targets */
1658static int smd_alloc_v2(struct smd_channel *ch)
1659{
1660 return -EINVAL;
1661}
1662
1663static int smd_alloc_v1(struct smd_channel *ch)
1664{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001665 struct smd_shared_v1 *shared1;
1666 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1667 if (!shared1) {
1668 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301669 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001670 }
1671 ch->send = &shared1->ch0;
1672 ch->recv = &shared1->ch1;
1673 ch->send_data = shared1->data0;
1674 ch->recv_data = shared1->data1;
1675 ch->fifo_size = SMD_BUF_SIZE;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001676 ch->half_ch = get_half_ch_funcs(ch->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001677 return 0;
1678}
1679
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301680#endif
1681
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001682static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001683{
1684 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001685
1686 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1687 if (ch == 0) {
1688 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001689 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001690 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001691 ch->n = alloc_elm->cid;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001692 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001693
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001694 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001695 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001696 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001697 }
1698
1699 ch->fifo_mask = ch->fifo_size - 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001700
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001701 /* probe_worker guarentees ch->type will be a valid type */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001702 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001703 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001704 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001705 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001706 else if (ch->type == SMD_APPS_DSPS)
1707 ch->notify_other_cpu = notify_dsps_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001708 else if (ch->type == SMD_APPS_WCNSS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001709 ch->notify_other_cpu = notify_wcnss_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001710 else if (ch->type == SMD_APPS_RPM)
1711 ch->notify_other_cpu = notify_rpm_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001712
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001713 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001714 ch->read = smd_packet_read;
1715 ch->write = smd_packet_write;
1716 ch->read_avail = smd_packet_read_avail;
1717 ch->write_avail = smd_packet_write_avail;
1718 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001719 ch->read_from_cb = smd_packet_read_from_cb;
1720 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001721 } else {
1722 ch->read = smd_stream_read;
1723 ch->write = smd_stream_write;
1724 ch->read_avail = smd_stream_read_avail;
1725 ch->write_avail = smd_stream_write_avail;
1726 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001727 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001728 }
1729
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001730 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1731 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001732
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001733 ch->pdev.name = ch->name;
1734 ch->pdev.id = ch->type;
1735
1736 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1737 ch->name, ch->n);
1738
1739 mutex_lock(&smd_creation_mutex);
1740 list_add(&ch->ch_list, &smd_ch_closed_list);
1741 mutex_unlock(&smd_creation_mutex);
1742
1743 platform_device_register(&ch->pdev);
1744 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1745 /* create a platform driver to be used by smd_tty driver
1746 * so that it can access the loopback port
1747 */
1748 loopback_tty_pdev.id = ch->type;
1749 platform_device_register(&loopback_tty_pdev);
1750 }
1751 return 0;
1752}
1753
1754static inline void notify_loopback_smd(void)
1755{
1756 unsigned long flags;
1757 struct smd_channel *ch;
1758
1759 spin_lock_irqsave(&smd_lock, flags);
1760 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1761 ch->notify(ch->priv, SMD_EVENT_DATA);
1762 }
1763 spin_unlock_irqrestore(&smd_lock, flags);
1764}
1765
1766static int smd_alloc_loopback_channel(void)
1767{
1768 static struct smd_half_channel smd_loopback_ctl;
1769 static char smd_loopback_data[SMD_BUF_SIZE];
1770 struct smd_channel *ch;
1771
1772 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1773 if (ch == 0) {
1774 pr_err("%s: out of memory\n", __func__);
1775 return -1;
1776 }
1777 ch->n = SMD_LOOPBACK_CID;
1778
1779 ch->send = &smd_loopback_ctl;
1780 ch->recv = &smd_loopback_ctl;
1781 ch->send_data = smd_loopback_data;
1782 ch->recv_data = smd_loopback_data;
1783 ch->fifo_size = SMD_BUF_SIZE;
1784
1785 ch->fifo_mask = ch->fifo_size - 1;
1786 ch->type = SMD_LOOPBACK_TYPE;
1787 ch->notify_other_cpu = notify_loopback_smd;
1788
1789 ch->read = smd_stream_read;
1790 ch->write = smd_stream_write;
1791 ch->read_avail = smd_stream_read_avail;
1792 ch->write_avail = smd_stream_write_avail;
1793 ch->update_state = update_stream_state;
1794 ch->read_from_cb = smd_stream_read;
1795
1796 memset(ch->name, 0, 20);
1797 memcpy(ch->name, "local_loopback", 14);
1798
1799 ch->pdev.name = ch->name;
1800 ch->pdev.id = ch->type;
1801
1802 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001803
1804 mutex_lock(&smd_creation_mutex);
1805 list_add(&ch->ch_list, &smd_ch_closed_list);
1806 mutex_unlock(&smd_creation_mutex);
1807
1808 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001809 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001810}
1811
1812static void do_nothing_notify(void *priv, unsigned flags)
1813{
1814}
1815
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001816static void finalize_channel_close_fn(struct work_struct *work)
1817{
1818 unsigned long flags;
1819 struct smd_channel *ch;
1820 struct smd_channel *index;
1821
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001822 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001823 spin_lock_irqsave(&smd_lock, flags);
1824 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1825 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001826 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001827 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1828 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001829 }
1830 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001831 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001832}
1833
1834struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001835{
1836 struct smd_channel *ch;
1837
1838 mutex_lock(&smd_creation_mutex);
1839 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001840 if (!strcmp(name, ch->name) &&
1841 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001842 list_del(&ch->ch_list);
1843 mutex_unlock(&smd_creation_mutex);
1844 return ch;
1845 }
1846 }
1847 mutex_unlock(&smd_creation_mutex);
1848
1849 return NULL;
1850}
1851
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001852int smd_named_open_on_edge(const char *name, uint32_t edge,
1853 smd_channel_t **_ch,
1854 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001855{
1856 struct smd_channel *ch;
1857 unsigned long flags;
1858
1859 if (smd_initialized == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001860 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001861 return -ENODEV;
1862 }
1863
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001864 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1865
1866 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001867 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001868 /* check closing list for port */
1869 spin_lock_irqsave(&smd_lock, flags);
1870 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1871 if (!strncmp(name, ch->name, 20) &&
1872 (edge == ch->type)) {
1873 /* channel exists, but is being closed */
1874 spin_unlock_irqrestore(&smd_lock, flags);
1875 return -EAGAIN;
1876 }
1877 }
1878
1879 /* check closing workqueue list for port */
1880 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1881 if (!strncmp(name, ch->name, 20) &&
1882 (edge == ch->type)) {
1883 /* channel exists, but is being closed */
1884 spin_unlock_irqrestore(&smd_lock, flags);
1885 return -EAGAIN;
1886 }
1887 }
1888 spin_unlock_irqrestore(&smd_lock, flags);
1889
1890 /* one final check to handle closing->closed race condition */
1891 ch = smd_get_channel(name, edge);
1892 if (!ch)
1893 return -ENODEV;
1894 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001895
1896 if (notify == 0)
1897 notify = do_nothing_notify;
1898
1899 ch->notify = notify;
1900 ch->current_packet = 0;
1901 ch->last_state = SMD_SS_CLOSED;
1902 ch->priv = priv;
1903
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001904 if (edge == SMD_LOOPBACK_TYPE) {
1905 ch->last_state = SMD_SS_OPENED;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001906 ch->half_ch->set_state(ch->send, SMD_SS_OPENED);
1907 ch->half_ch->set_fDSR(ch->send, 1);
1908 ch->half_ch->set_fCTS(ch->send, 1);
1909 ch->half_ch->set_fCD(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001910 }
1911
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001912 *_ch = ch;
1913
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001914 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1915
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001916 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001917 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001918 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001919 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001920 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001921 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1922 list_add(&ch->ch_list, &smd_ch_list_dsps);
1923 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1924 list_add(&ch->ch_list, &smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001925 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_RPM)
1926 list_add(&ch->ch_list, &smd_ch_list_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001927 else
1928 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001929
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001930 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1931
1932 if (edge != SMD_LOOPBACK_TYPE)
1933 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1934
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001935 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001936
1937 return 0;
1938}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001939EXPORT_SYMBOL(smd_named_open_on_edge);
1940
1941
1942int smd_open(const char *name, smd_channel_t **_ch,
1943 void *priv, void (*notify)(void *, unsigned))
1944{
1945 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
1946 notify);
1947}
1948EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001949
1950int smd_close(smd_channel_t *ch)
1951{
1952 unsigned long flags;
1953
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001954 if (ch == 0)
1955 return -1;
1956
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001957 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001958
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001959 spin_lock_irqsave(&smd_lock, flags);
1960 list_del(&ch->ch_list);
1961 if (ch->n == SMD_LOOPBACK_CID) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001962 ch->half_ch->set_fDSR(ch->send, 0);
1963 ch->half_ch->set_fCTS(ch->send, 0);
1964 ch->half_ch->set_fCD(ch->send, 0);
1965 ch->half_ch->set_state(ch->send, SMD_SS_CLOSED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001966 } else
1967 ch_set_state(ch, SMD_SS_CLOSED);
1968
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001969 if (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001970 list_add(&ch->ch_list, &smd_ch_closing_list);
1971 spin_unlock_irqrestore(&smd_lock, flags);
1972 } else {
1973 spin_unlock_irqrestore(&smd_lock, flags);
1974 ch->notify = do_nothing_notify;
1975 mutex_lock(&smd_creation_mutex);
1976 list_add(&ch->ch_list, &smd_ch_closed_list);
1977 mutex_unlock(&smd_creation_mutex);
1978 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001979
1980 return 0;
1981}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001982EXPORT_SYMBOL(smd_close);
1983
1984int smd_write_start(smd_channel_t *ch, int len)
1985{
1986 int ret;
1987 unsigned hdr[5];
1988
1989 if (!ch) {
1990 pr_err("%s: Invalid channel specified\n", __func__);
1991 return -ENODEV;
1992 }
1993 if (!ch->is_pkt_ch) {
1994 pr_err("%s: non-packet channel specified\n", __func__);
1995 return -EACCES;
1996 }
1997 if (len < 1) {
1998 pr_err("%s: invalid length: %d\n", __func__, len);
1999 return -EINVAL;
2000 }
2001
2002 if (ch->pending_pkt_sz) {
2003 pr_err("%s: packet of size: %d in progress\n", __func__,
2004 ch->pending_pkt_sz);
2005 return -EBUSY;
2006 }
2007 ch->pending_pkt_sz = len;
2008
2009 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
2010 ch->pending_pkt_sz = 0;
2011 SMD_DBG("%s: no space to write packet header\n", __func__);
2012 return -EAGAIN;
2013 }
2014
2015 hdr[0] = len;
2016 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
2017
2018
2019 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
2020 if (ret < 0 || ret != sizeof(hdr)) {
2021 ch->pending_pkt_sz = 0;
2022 pr_err("%s: packet header failed to write\n", __func__);
2023 return -EPERM;
2024 }
2025 return 0;
2026}
2027EXPORT_SYMBOL(smd_write_start);
2028
2029int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
2030{
2031 int bytes_written;
2032
2033 if (!ch) {
2034 pr_err("%s: Invalid channel specified\n", __func__);
2035 return -ENODEV;
2036 }
2037 if (len < 1) {
2038 pr_err("%s: invalid length: %d\n", __func__, len);
2039 return -EINVAL;
2040 }
2041
2042 if (!ch->pending_pkt_sz) {
2043 pr_err("%s: no transaction in progress\n", __func__);
2044 return -ENOEXEC;
2045 }
2046 if (ch->pending_pkt_sz - len < 0) {
2047 pr_err("%s: segment of size: %d will make packet go over "
2048 "length\n", __func__, len);
2049 return -EINVAL;
2050 }
2051
2052 bytes_written = smd_stream_write(ch, data, len, user_buf);
2053
2054 ch->pending_pkt_sz -= bytes_written;
2055
2056 return bytes_written;
2057}
2058EXPORT_SYMBOL(smd_write_segment);
2059
2060int smd_write_end(smd_channel_t *ch)
2061{
2062
2063 if (!ch) {
2064 pr_err("%s: Invalid channel specified\n", __func__);
2065 return -ENODEV;
2066 }
2067 if (ch->pending_pkt_sz) {
2068 pr_err("%s: current packet not completely written\n", __func__);
2069 return -E2BIG;
2070 }
2071
2072 return 0;
2073}
2074EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002075
2076int smd_read(smd_channel_t *ch, void *data, int len)
2077{
Jack Pham1b236d12012-03-19 15:27:18 -07002078 if (!ch) {
2079 pr_err("%s: Invalid channel specified\n", __func__);
2080 return -ENODEV;
2081 }
2082
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002083 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002084}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002085EXPORT_SYMBOL(smd_read);
2086
2087int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
2088{
Jack Pham1b236d12012-03-19 15:27:18 -07002089 if (!ch) {
2090 pr_err("%s: Invalid channel specified\n", __func__);
2091 return -ENODEV;
2092 }
2093
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002094 return ch->read(ch, data, len, 1);
2095}
2096EXPORT_SYMBOL(smd_read_user_buffer);
2097
2098int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
2099{
Jack Pham1b236d12012-03-19 15:27:18 -07002100 if (!ch) {
2101 pr_err("%s: Invalid channel specified\n", __func__);
2102 return -ENODEV;
2103 }
2104
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002105 return ch->read_from_cb(ch, data, len, 0);
2106}
2107EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002108
2109int smd_write(smd_channel_t *ch, const void *data, int len)
2110{
Jack Pham1b236d12012-03-19 15:27:18 -07002111 if (!ch) {
2112 pr_err("%s: Invalid channel specified\n", __func__);
2113 return -ENODEV;
2114 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002115
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002116 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002117}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002118EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002119
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002120int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08002121{
Jack Pham1b236d12012-03-19 15:27:18 -07002122 if (!ch) {
2123 pr_err("%s: Invalid channel specified\n", __func__);
2124 return -ENODEV;
2125 }
2126
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002127 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002128}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002129EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002130
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002131int smd_read_avail(smd_channel_t *ch)
2132{
Jack Pham1b236d12012-03-19 15:27:18 -07002133 if (!ch) {
2134 pr_err("%s: Invalid channel specified\n", __func__);
2135 return -ENODEV;
2136 }
2137
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002138 return ch->read_avail(ch);
2139}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002140EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002141
2142int smd_write_avail(smd_channel_t *ch)
2143{
Jack Pham1b236d12012-03-19 15:27:18 -07002144 if (!ch) {
2145 pr_err("%s: Invalid channel specified\n", __func__);
2146 return -ENODEV;
2147 }
2148
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002149 return ch->write_avail(ch);
2150}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002151EXPORT_SYMBOL(smd_write_avail);
2152
2153void smd_enable_read_intr(smd_channel_t *ch)
2154{
2155 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002156 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002157}
2158EXPORT_SYMBOL(smd_enable_read_intr);
2159
2160void smd_disable_read_intr(smd_channel_t *ch)
2161{
2162 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002163 ch->half_ch->set_fBLOCKREADINTR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002164}
2165EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002166
Eric Holmbergdeace152012-07-25 12:17:11 -06002167/**
2168 * Enable/disable receive interrupts for the remote processor used by a
2169 * particular channel.
2170 * @ch: open channel handle to use for the edge
2171 * @mask: 1 = mask interrupts; 0 = unmask interrupts
2172 * @returns: 0 for success; < 0 for failure
2173 *
2174 * Note that this enables/disables all interrupts from the remote subsystem for
2175 * all channels. As such, it should be used with care and only for specific
2176 * use cases such as power-collapse sequencing.
2177 */
2178int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask)
2179{
2180 struct irq_chip *irq_chip;
2181 struct irq_data *irq_data;
2182 struct interrupt_config_item *int_cfg;
2183
2184 if (!ch)
2185 return -EINVAL;
2186
2187 if (ch->type >= ARRAY_SIZE(edge_to_pids))
2188 return -ENODEV;
2189
2190 int_cfg = &private_intr_config[edge_to_pids[ch->type].remote_pid].smd;
2191
2192 if (int_cfg->irq_id < 0)
2193 return -ENODEV;
2194
2195 irq_chip = irq_get_chip(int_cfg->irq_id);
2196 if (!irq_chip)
2197 return -ENODEV;
2198
2199 irq_data = irq_get_irq_data(int_cfg->irq_id);
2200 if (!irq_data)
2201 return -ENODEV;
2202
2203 if (mask) {
2204 SMx_POWER_INFO("SMD Masking interrupts from %s\n",
2205 edge_to_pids[ch->type].subsys_name);
2206 irq_chip->irq_mask(irq_data);
2207 } else {
2208 SMx_POWER_INFO("SMD Unmasking interrupts from %s\n",
2209 edge_to_pids[ch->type].subsys_name);
2210 irq_chip->irq_unmask(irq_data);
2211 }
2212
2213 return 0;
2214}
2215EXPORT_SYMBOL(smd_mask_receive_interrupt);
2216
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002217int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2218{
2219 return -1;
2220}
2221
2222int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2223{
2224 return -1;
2225}
2226
2227int smd_cur_packet_size(smd_channel_t *ch)
2228{
Jack Pham1b236d12012-03-19 15:27:18 -07002229 if (!ch) {
2230 pr_err("%s: Invalid channel specified\n", __func__);
2231 return -ENODEV;
2232 }
2233
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002234 return ch->current_packet;
2235}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002236EXPORT_SYMBOL(smd_cur_packet_size);
2237
2238int smd_tiocmget(smd_channel_t *ch)
2239{
Jack Pham1b236d12012-03-19 15:27:18 -07002240 if (!ch) {
2241 pr_err("%s: Invalid channel specified\n", __func__);
2242 return -ENODEV;
2243 }
2244
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002245 return (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) |
2246 (ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) |
2247 (ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) |
2248 (ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) |
2249 (ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) |
2250 (ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002251}
2252EXPORT_SYMBOL(smd_tiocmget);
2253
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002254/* this api will be called while holding smd_lock */
2255int
2256smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002257{
Jack Pham1b236d12012-03-19 15:27:18 -07002258 if (!ch) {
2259 pr_err("%s: Invalid channel specified\n", __func__);
2260 return -ENODEV;
2261 }
2262
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002263 if (set & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002264 ch->half_ch->set_fDSR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002265
2266 if (set & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002267 ch->half_ch->set_fCTS(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002268
2269 if (clear & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002270 ch->half_ch->set_fDSR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002271
2272 if (clear & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002273 ch->half_ch->set_fCTS(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002274
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002275 ch->half_ch->set_fSTATE(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002276 barrier();
2277 ch->notify_other_cpu();
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002278
2279 return 0;
2280}
2281EXPORT_SYMBOL(smd_tiocmset_from_cb);
2282
2283int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2284{
2285 unsigned long flags;
2286
Jack Pham1b236d12012-03-19 15:27:18 -07002287 if (!ch) {
2288 pr_err("%s: Invalid channel specified\n", __func__);
2289 return -ENODEV;
2290 }
2291
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002292 spin_lock_irqsave(&smd_lock, flags);
2293 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002294 spin_unlock_irqrestore(&smd_lock, flags);
2295
2296 return 0;
2297}
2298EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002299
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002300int smd_is_pkt_avail(smd_channel_t *ch)
2301{
Jeff Hugoa8549f12012-08-13 20:36:18 -06002302 unsigned long flags;
2303
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002304 if (!ch || !ch->is_pkt_ch)
2305 return -EINVAL;
2306
2307 if (ch->current_packet)
2308 return 1;
2309
Jeff Hugoa8549f12012-08-13 20:36:18 -06002310 spin_lock_irqsave(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002311 update_packet_state(ch);
Jeff Hugoa8549f12012-08-13 20:36:18 -06002312 spin_unlock_irqrestore(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002313
2314 return ch->current_packet ? 1 : 0;
2315}
2316EXPORT_SYMBOL(smd_is_pkt_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002317
2318
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002319/* -------------------------------------------------------------------------- */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002320
Jeff Hugobdc734d2012-03-26 16:05:39 -06002321/*
2322 * Shared Memory Range Check
2323 *
2324 * Takes a physical address and an offset and checks if the resulting physical
2325 * address would fit into one of the aux smem regions. If so, returns the
2326 * corresponding virtual address. Otherwise returns NULL. Expects the array
2327 * of smem regions to be in ascending physical address order.
2328 *
2329 * @base: physical base address to check
2330 * @offset: offset from the base to get the final address
2331 */
2332static void *smem_range_check(void *base, unsigned offset)
2333{
2334 int i;
2335 void *phys_addr;
2336 unsigned size;
2337
2338 for (i = 0; i < num_smem_areas; ++i) {
2339 phys_addr = smem_areas[i].phys_addr;
2340 size = smem_areas[i].size;
2341 if (base < phys_addr)
2342 return NULL;
2343 if (base > phys_addr + size)
2344 continue;
2345 if (base >= phys_addr && base + offset < phys_addr + size)
2346 return smem_areas[i].virt_addr + offset;
2347 }
2348
2349 return NULL;
2350}
2351
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002352/* smem_alloc returns the pointer to smem item if it is already allocated.
2353 * Otherwise, it returns NULL.
2354 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002355void *smem_alloc(unsigned id, unsigned size)
2356{
2357 return smem_find(id, size);
2358}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002359EXPORT_SYMBOL(smem_alloc);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002360
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002361/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
2362 * it allocates it and then returns the pointer to it.
2363 */
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302364void *smem_alloc2(unsigned id, unsigned size_in)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002365{
2366 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2367 struct smem_heap_entry *toc = shared->heap_toc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002368 unsigned long flags;
2369 void *ret = NULL;
2370
2371 if (!shared->heap_info.initialized) {
2372 pr_err("%s: smem heap info not initialized\n", __func__);
2373 return NULL;
2374 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002375
2376 if (id >= SMEM_NUM_ITEMS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002377 return NULL;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002378
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002379 size_in = ALIGN(size_in, 8);
2380 remote_spin_lock_irqsave(&remote_spinlock, flags);
2381 if (toc[id].allocated) {
2382 SMD_DBG("%s: %u already allocated\n", __func__, id);
2383 if (size_in != toc[id].size)
2384 pr_err("%s: wrong size %u (expected %u)\n",
2385 __func__, toc[id].size, size_in);
2386 else
2387 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2388 } else if (id > SMEM_FIXED_ITEM_LAST) {
2389 SMD_DBG("%s: allocating %u\n", __func__, id);
2390 if (shared->heap_info.heap_remaining >= size_in) {
2391 toc[id].offset = shared->heap_info.free_offset;
2392 toc[id].size = size_in;
2393 wmb();
2394 toc[id].allocated = 1;
2395
2396 shared->heap_info.free_offset += size_in;
2397 shared->heap_info.heap_remaining -= size_in;
2398 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2399 } else
2400 pr_err("%s: not enough memory %u (required %u)\n",
2401 __func__, shared->heap_info.heap_remaining,
2402 size_in);
2403 }
2404 wmb();
2405 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
2406 return ret;
2407}
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302408EXPORT_SYMBOL(smem_alloc2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002409
2410void *smem_get_entry(unsigned id, unsigned *size)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002411{
2412 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2413 struct smem_heap_entry *toc = shared->heap_toc;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302414 int use_spinlocks = spinlocks_initialized;
2415 void *ret = 0;
2416 unsigned long flags = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002417
2418 if (id >= SMEM_NUM_ITEMS)
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302419 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002420
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302421 if (use_spinlocks)
2422 remote_spin_lock_irqsave(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002423 /* toc is in device memory and cannot be speculatively accessed */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002424 if (toc[id].allocated) {
2425 *size = toc[id].size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002426 barrier();
Jeff Hugobdc734d2012-03-26 16:05:39 -06002427 if (!(toc[id].reserved & BASE_ADDR_MASK))
2428 ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
2429 else
2430 ret = smem_range_check(
2431 (void *)(toc[id].reserved & BASE_ADDR_MASK),
2432 toc[id].offset);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002433 } else {
2434 *size = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002435 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302436 if (use_spinlocks)
2437 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002438
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302439 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002440}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002441EXPORT_SYMBOL(smem_get_entry);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002442
2443void *smem_find(unsigned id, unsigned size_in)
2444{
2445 unsigned size;
2446 void *ptr;
2447
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002448 ptr = smem_get_entry(id, &size);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002449 if (!ptr)
2450 return 0;
2451
2452 size_in = ALIGN(size_in, 8);
2453 if (size_in != size) {
2454 pr_err("smem_find(%d, %d): wrong size %d\n",
2455 id, size_in, size);
2456 return 0;
2457 }
2458
2459 return ptr;
2460}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002461EXPORT_SYMBOL(smem_find);
2462
2463static int smsm_cb_init(void)
2464{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002465 struct smsm_state_info *state_info;
2466 int n;
2467 int ret = 0;
2468
2469 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2470 GFP_KERNEL);
2471
2472 if (!smsm_states) {
2473 pr_err("%s: SMSM init failed\n", __func__);
2474 return -ENOMEM;
2475 }
2476
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002477 smsm_cb_wq = create_singlethread_workqueue("smsm_cb_wq");
2478 if (!smsm_cb_wq) {
2479 pr_err("%s: smsm_cb_wq creation failed\n", __func__);
2480 kfree(smsm_states);
2481 return -EFAULT;
2482 }
2483
Eric Holmbergc8002902011-09-16 13:55:57 -06002484 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002485 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2486 state_info = &smsm_states[n];
2487 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
Eric Holmberge8a39322012-04-03 15:14:02 -06002488 state_info->intr_mask_set = 0x0;
2489 state_info->intr_mask_clear = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002490 INIT_LIST_HEAD(&state_info->callbacks);
2491 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002492 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002493
2494 return ret;
2495}
2496
2497static int smsm_init(void)
2498{
2499 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2500 int i;
2501 struct smsm_size_info_type *smsm_size_info;
2502
2503 i = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_ALLOC);
2504 if (i) {
2505 pr_err("%s: remote spinlock init failed %d\n", __func__, i);
2506 return i;
2507 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302508 spinlocks_initialized = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002509
2510 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
2511 sizeof(struct smsm_size_info_type));
2512 if (smsm_size_info) {
2513 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2514 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2515 }
2516
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002517 i = kfifo_alloc(&smsm_snapshot_fifo,
2518 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2519 GFP_KERNEL);
2520 if (i) {
2521 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2522 return i;
2523 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002524 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2525 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002526
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002527 if (!smsm_info.state) {
2528 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
2529 SMSM_NUM_ENTRIES *
2530 sizeof(uint32_t));
2531
2532 if (smsm_info.state) {
2533 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2534 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2535 __raw_writel(0, \
2536 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2537 }
2538 }
2539
2540 if (!smsm_info.intr_mask) {
2541 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
2542 SMSM_NUM_ENTRIES *
2543 SMSM_NUM_HOSTS *
2544 sizeof(uint32_t));
2545
Eric Holmberge8a39322012-04-03 15:14:02 -06002546 if (smsm_info.intr_mask) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002547 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
Eric Holmberge8a39322012-04-03 15:14:02 -06002548 __raw_writel(0x0,
2549 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2550
2551 /* Configure legacy modem bits */
2552 __raw_writel(LEGACY_MODEM_SMSM_MASK,
2553 SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
2554 SMSM_APPS));
2555 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002556 }
2557
2558 if (!smsm_info.intr_mux)
2559 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
2560 SMSM_NUM_INTR_MUX *
2561 sizeof(uint32_t));
2562
2563 i = smsm_cb_init();
2564 if (i)
2565 return i;
2566
2567 wmb();
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06002568 smsm_driver_state_notify(SMSM_INIT, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002569 return 0;
2570}
2571
2572void smsm_reset_modem(unsigned mode)
2573{
2574 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2575 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2576 } else if (mode == SMSM_MODEM_WAIT) {
2577 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2578 } else { /* reset_mode is SMSM_RESET or default */
2579 mode = SMSM_RESET;
2580 }
2581
2582 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2583}
2584EXPORT_SYMBOL(smsm_reset_modem);
2585
2586void smsm_reset_modem_cont(void)
2587{
2588 unsigned long flags;
2589 uint32_t state;
2590
2591 if (!smsm_info.state)
2592 return;
2593
2594 spin_lock_irqsave(&smem_lock, flags);
2595 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2596 & ~SMSM_MODEM_WAIT;
2597 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2598 wmb();
2599 spin_unlock_irqrestore(&smem_lock, flags);
2600}
2601EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002602
Eric Holmbergda31d042012-03-28 14:01:02 -06002603static void smsm_cb_snapshot(uint32_t use_wakelock)
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002604{
2605 int n;
2606 uint32_t new_state;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002607 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002608 int ret;
2609
2610 ret = kfifo_avail(&smsm_snapshot_fifo);
Eric Holmbergda31d042012-03-28 14:01:02 -06002611 if (ret < SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002612 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2613 return;
2614 }
2615
Eric Holmberg96b55f62012-04-03 19:10:46 -06002616 /*
2617 * To avoid a race condition with notify_smsm_cb_clients_worker, the
2618 * following sequence must be followed:
2619 * 1) increment snapshot count
2620 * 2) insert data into FIFO
2621 *
2622 * Potentially in parallel, the worker:
2623 * a) verifies >= 1 snapshots are in FIFO
2624 * b) processes snapshot
2625 * c) decrements reference count
2626 *
2627 * This order ensures that 1 will always occur before abc.
2628 */
Eric Holmbergda31d042012-03-28 14:01:02 -06002629 if (use_wakelock) {
2630 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2631 if (smsm_snapshot_count == 0) {
2632 SMx_POWER_INFO("SMSM snapshot wake lock\n");
2633 wake_lock(&smsm_snapshot_wakelock);
2634 }
2635 ++smsm_snapshot_count;
2636 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2637 }
Eric Holmberg96b55f62012-04-03 19:10:46 -06002638
2639 /* queue state entries */
2640 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2641 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2642
2643 ret = kfifo_in(&smsm_snapshot_fifo,
2644 &new_state, sizeof(new_state));
2645 if (ret != sizeof(new_state)) {
2646 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2647 goto restore_snapshot_count;
2648 }
2649 }
2650
2651 /* queue wakelock usage flag */
2652 ret = kfifo_in(&smsm_snapshot_fifo,
2653 &use_wakelock, sizeof(use_wakelock));
2654 if (ret != sizeof(use_wakelock)) {
2655 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2656 goto restore_snapshot_count;
2657 }
2658
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002659 queue_work(smsm_cb_wq, &smsm_cb_work);
Eric Holmberg96b55f62012-04-03 19:10:46 -06002660 return;
2661
2662restore_snapshot_count:
2663 if (use_wakelock) {
2664 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2665 if (smsm_snapshot_count) {
2666 --smsm_snapshot_count;
2667 if (smsm_snapshot_count == 0) {
2668 SMx_POWER_INFO("SMSM snapshot wake unlock\n");
2669 wake_unlock(&smsm_snapshot_wakelock);
2670 }
2671 } else {
2672 pr_err("%s: invalid snapshot count\n", __func__);
2673 }
2674 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2675 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002676}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002677
2678static irqreturn_t smsm_irq_handler(int irq, void *data)
2679{
2680 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002681
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002682 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002683 uint32_t mux_val;
2684 static uint32_t prev_smem_q6_apps_smsm;
2685
2686 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2687 mux_val = __raw_readl(
2688 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2689 if (mux_val != prev_smem_q6_apps_smsm)
2690 prev_smem_q6_apps_smsm = mux_val;
2691 }
2692
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002693 spin_lock_irqsave(&smem_lock, flags);
Eric Holmbergda31d042012-03-28 14:01:02 -06002694 smsm_cb_snapshot(1);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002695 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002696 return IRQ_HANDLED;
2697 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002698
2699 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002700 if (!smsm_info.state) {
2701 SMSM_INFO("<SM NO STATE>\n");
2702 } else {
2703 unsigned old_apps, apps;
2704 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002705
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002706 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002707
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002708 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2709 if (apps & SMSM_RESET) {
2710 /* If we get an interrupt and the apps SMSM_RESET
2711 bit is already set, the modem is acking the
2712 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002713 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302714 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002715 /* Issue a fake irq to handle any
2716 * smd state changes during reset
2717 */
2718 smd_fake_irq_handler(0);
Daniel Walker79848a22010-03-16 15:20:07 -07002719
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002720 /* queue modem restart notify chain */
2721 modem_queue_start_reset_notify();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002722
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002723 } else if (modm & SMSM_RESET) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002724 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
Ram Somani8b9589f2012-04-03 12:07:18 +05302725 if (!disable_smsm_reset_handshake) {
2726 apps |= SMSM_RESET;
2727 flush_cache_all();
2728 outer_flush_all();
2729 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002730 modem_queue_start_reset_notify();
2731
2732 } else if (modm & SMSM_INIT) {
2733 if (!(apps & SMSM_INIT)) {
2734 apps |= SMSM_INIT;
2735 modem_queue_smsm_init_notify();
2736 }
2737
2738 if (modm & SMSM_SMDINIT)
2739 apps |= SMSM_SMDINIT;
2740 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2741 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2742 apps |= SMSM_RUN;
2743 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2744 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2745 modem_queue_start_reset_notify();
2746 }
2747
2748 if (old_apps != apps) {
2749 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2750 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2751 do_smd_probe();
2752 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2753 }
2754
Eric Holmbergda31d042012-03-28 14:01:02 -06002755 smsm_cb_snapshot(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002756 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002757 spin_unlock_irqrestore(&smem_lock, flags);
2758 return IRQ_HANDLED;
2759}
2760
Eric Holmberg98c6c642012-02-24 11:29:35 -07002761static irqreturn_t smsm_modem_irq_handler(int irq, void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002762{
Eric Holmberg98c6c642012-02-24 11:29:35 -07002763 SMx_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002764 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002765 return smsm_irq_handler(irq, data);
2766}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002767
Eric Holmberg98c6c642012-02-24 11:29:35 -07002768static irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
2769{
2770 SMx_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002771 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002772 return smsm_irq_handler(irq, data);
2773}
2774
2775static irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
2776{
2777 SMx_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002778 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002779 return smsm_irq_handler(irq, data);
2780}
2781
2782static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
2783{
2784 SMx_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002785 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002786 return smsm_irq_handler(irq, data);
2787}
2788
Eric Holmberge8a39322012-04-03 15:14:02 -06002789/*
2790 * Changes the global interrupt mask. The set and clear masks are re-applied
2791 * every time the global interrupt mask is updated for callback registration
2792 * and de-registration.
2793 *
2794 * The clear mask is applied first, so if a bit is set to 1 in both the clear
2795 * mask and the set mask, the result will be that the interrupt is set.
2796 *
2797 * @smsm_entry SMSM entry to change
2798 * @clear_mask 1 = clear bit, 0 = no-op
2799 * @set_mask 1 = set bit, 0 = no-op
2800 *
2801 * @returns 0 for success, < 0 for error
2802 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002803int smsm_change_intr_mask(uint32_t smsm_entry,
2804 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002805{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002806 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002807 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002808
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002809 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2810 pr_err("smsm_change_state: Invalid entry %d\n",
2811 smsm_entry);
2812 return -EINVAL;
2813 }
2814
2815 if (!smsm_info.intr_mask) {
2816 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002817 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002818 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002819
2820 spin_lock_irqsave(&smem_lock, flags);
Eric Holmberge8a39322012-04-03 15:14:02 -06002821 smsm_states[smsm_entry].intr_mask_clear = clear_mask;
2822 smsm_states[smsm_entry].intr_mask_set = set_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002823
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002824 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2825 new_mask = (old_mask & ~clear_mask) | set_mask;
2826 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002827
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002828 wmb();
2829 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002830
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002831 return 0;
2832}
2833EXPORT_SYMBOL(smsm_change_intr_mask);
2834
2835int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2836{
2837 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2838 pr_err("smsm_change_state: Invalid entry %d\n",
2839 smsm_entry);
2840 return -EINVAL;
2841 }
2842
2843 if (!smsm_info.intr_mask) {
2844 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2845 return -EIO;
2846 }
2847
2848 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2849 return 0;
2850}
2851EXPORT_SYMBOL(smsm_get_intr_mask);
2852
2853int smsm_change_state(uint32_t smsm_entry,
2854 uint32_t clear_mask, uint32_t set_mask)
2855{
2856 unsigned long flags;
2857 uint32_t old_state, new_state;
2858
2859 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2860 pr_err("smsm_change_state: Invalid entry %d",
2861 smsm_entry);
2862 return -EINVAL;
2863 }
2864
2865 if (!smsm_info.state) {
2866 pr_err("smsm_change_state <SM NO STATE>\n");
2867 return -EIO;
2868 }
2869 spin_lock_irqsave(&smem_lock, flags);
2870
2871 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2872 new_state = (old_state & ~clear_mask) | set_mask;
2873 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2874 SMSM_DBG("smsm_change_state %x\n", new_state);
2875 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002876
2877 spin_unlock_irqrestore(&smem_lock, flags);
2878
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002879 return 0;
2880}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002881EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002882
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002883uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002884{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002885 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002886
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002887 /* needs interface change to return error code */
2888 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2889 pr_err("smsm_change_state: Invalid entry %d",
2890 smsm_entry);
2891 return 0;
2892 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002893
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002894 if (!smsm_info.state) {
2895 pr_err("smsm_get_state <SM NO STATE>\n");
2896 } else {
2897 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2898 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002899
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002900 return rv;
2901}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002902EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002903
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002904/**
2905 * Performs SMSM callback client notifiction.
2906 */
2907void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002908{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002909 struct smsm_state_cb_info *cb_info;
2910 struct smsm_state_info *state_info;
2911 int n;
2912 uint32_t new_state;
2913 uint32_t state_changes;
Eric Holmbergda31d042012-03-28 14:01:02 -06002914 uint32_t use_wakelock;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002915 int ret;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002916 unsigned long flags;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002917
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002918 if (!smd_initialized)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002919 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002920
Eric Holmbergda31d042012-03-28 14:01:02 -06002921 while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002922 mutex_lock(&smsm_lock);
2923 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2924 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002925
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002926 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2927 sizeof(new_state));
2928 if (ret != sizeof(new_state)) {
2929 pr_err("%s: snapshot underflow %d\n",
2930 __func__, ret);
2931 mutex_unlock(&smsm_lock);
2932 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002933 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002934
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002935 state_changes = state_info->last_value ^ new_state;
2936 if (state_changes) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07002937 SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n",
2938 n, state_info->last_value,
2939 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002940 list_for_each_entry(cb_info,
2941 &state_info->callbacks, cb_list) {
2942
2943 if (cb_info->mask & state_changes)
2944 cb_info->notify(cb_info->data,
2945 state_info->last_value,
2946 new_state);
2947 }
2948 state_info->last_value = new_state;
2949 }
2950 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002951
Eric Holmbergda31d042012-03-28 14:01:02 -06002952 /* read wakelock flag */
2953 ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock,
2954 sizeof(use_wakelock));
2955 if (ret != sizeof(use_wakelock)) {
2956 pr_err("%s: snapshot underflow %d\n",
2957 __func__, ret);
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002958 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002959 return;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002960 }
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002961 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002962
2963 if (use_wakelock) {
2964 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2965 if (smsm_snapshot_count) {
2966 --smsm_snapshot_count;
2967 if (smsm_snapshot_count == 0) {
2968 SMx_POWER_INFO("SMSM snapshot"
2969 " wake unlock\n");
2970 wake_unlock(&smsm_snapshot_wakelock);
2971 }
2972 } else {
2973 pr_err("%s: invalid snapshot count\n",
2974 __func__);
2975 }
2976 spin_unlock_irqrestore(&smsm_snapshot_count_lock,
2977 flags);
2978 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002979 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002980}
2981
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002982
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002983/**
2984 * Registers callback for SMSM state notifications when the specified
2985 * bits change.
2986 *
2987 * @smsm_entry Processor entry to deregister
2988 * @mask Bits to deregister (if result is 0, callback is removed)
2989 * @notify Notification function to deregister
2990 * @data Opaque data passed in to callback
2991 *
2992 * @returns Status code
2993 * <0 error code
2994 * 0 inserted new entry
2995 * 1 updated mask of existing entry
2996 */
2997int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
2998 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002999{
Eric Holmberge8a39322012-04-03 15:14:02 -06003000 struct smsm_state_info *state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003001 struct smsm_state_cb_info *cb_info;
3002 struct smsm_state_cb_info *cb_found = 0;
Eric Holmberge8a39322012-04-03 15:14:02 -06003003 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003004 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003005
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003006 if (smsm_entry >= SMSM_NUM_ENTRIES)
3007 return -EINVAL;
3008
Eric Holmbergc8002902011-09-16 13:55:57 -06003009 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003010
3011 if (!smsm_states) {
3012 /* smsm not yet initialized */
3013 ret = -ENODEV;
3014 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003015 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003016
Eric Holmberge8a39322012-04-03 15:14:02 -06003017 state = &smsm_states[smsm_entry];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003018 list_for_each_entry(cb_info,
Eric Holmberge8a39322012-04-03 15:14:02 -06003019 &state->callbacks, cb_list) {
3020 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003021 (cb_info->data == data)) {
3022 cb_info->mask |= mask;
3023 cb_found = cb_info;
3024 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003025 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003026 new_mask |= cb_info->mask;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003027 }
3028
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003029 if (!cb_found) {
3030 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
3031 GFP_ATOMIC);
3032 if (!cb_info) {
3033 ret = -ENOMEM;
3034 goto cleanup;
3035 }
3036
3037 cb_info->mask = mask;
3038 cb_info->notify = notify;
3039 cb_info->data = data;
3040 INIT_LIST_HEAD(&cb_info->cb_list);
3041 list_add_tail(&cb_info->cb_list,
Eric Holmberge8a39322012-04-03 15:14:02 -06003042 &state->callbacks);
3043 new_mask |= mask;
3044 }
3045
3046 /* update interrupt notification mask */
3047 if (smsm_entry == SMSM_MODEM_STATE)
3048 new_mask |= LEGACY_MODEM_SMSM_MASK;
3049
3050 if (smsm_info.intr_mask) {
3051 unsigned long flags;
3052
3053 spin_lock_irqsave(&smem_lock, flags);
3054 new_mask = (new_mask & ~state->intr_mask_clear)
3055 | state->intr_mask_set;
3056 __raw_writel(new_mask,
3057 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3058 wmb();
3059 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003060 }
3061
3062cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06003063 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003064 return ret;
3065}
3066EXPORT_SYMBOL(smsm_state_cb_register);
3067
3068
3069/**
3070 * Deregisters for SMSM state notifications for the specified bits.
3071 *
3072 * @smsm_entry Processor entry to deregister
3073 * @mask Bits to deregister (if result is 0, callback is removed)
3074 * @notify Notification function to deregister
3075 * @data Opaque data passed in to callback
3076 *
3077 * @returns Status code
3078 * <0 error code
3079 * 0 not found
3080 * 1 updated mask
3081 * 2 removed callback
3082 */
3083int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
3084 void (*notify)(void *, uint32_t, uint32_t), void *data)
3085{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003086 struct smsm_state_cb_info *cb_info;
Eric Holmberge8a39322012-04-03 15:14:02 -06003087 struct smsm_state_cb_info *cb_tmp;
3088 struct smsm_state_info *state;
3089 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003090 int ret = 0;
3091
3092 if (smsm_entry >= SMSM_NUM_ENTRIES)
3093 return -EINVAL;
3094
Eric Holmbergc8002902011-09-16 13:55:57 -06003095 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003096
3097 if (!smsm_states) {
3098 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06003099 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003100 return -ENODEV;
3101 }
3102
Eric Holmberge8a39322012-04-03 15:14:02 -06003103 state = &smsm_states[smsm_entry];
3104 list_for_each_entry_safe(cb_info, cb_tmp,
3105 &state->callbacks, cb_list) {
3106 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003107 (cb_info->data == data)) {
3108 cb_info->mask &= ~mask;
3109 ret = 1;
3110 if (!cb_info->mask) {
3111 /* no mask bits set, remove callback */
3112 list_del(&cb_info->cb_list);
3113 kfree(cb_info);
3114 ret = 2;
Eric Holmberge8a39322012-04-03 15:14:02 -06003115 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003116 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003117 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003118 new_mask |= cb_info->mask;
3119 }
3120
3121 /* update interrupt notification mask */
3122 if (smsm_entry == SMSM_MODEM_STATE)
3123 new_mask |= LEGACY_MODEM_SMSM_MASK;
3124
3125 if (smsm_info.intr_mask) {
3126 unsigned long flags;
3127
3128 spin_lock_irqsave(&smem_lock, flags);
3129 new_mask = (new_mask & ~state->intr_mask_clear)
3130 | state->intr_mask_set;
3131 __raw_writel(new_mask,
3132 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3133 wmb();
3134 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003135 }
3136
Eric Holmbergc8002902011-09-16 13:55:57 -06003137 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003138 return ret;
3139}
3140EXPORT_SYMBOL(smsm_state_cb_deregister);
3141
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003142int smsm_driver_state_notifier_register(struct notifier_block *nb)
3143{
3144 int ret;
3145 if (!nb)
3146 return -EINVAL;
3147 mutex_lock(&smsm_driver_state_notifier_lock);
3148 ret = raw_notifier_chain_register(&smsm_driver_state_notifier_list, nb);
3149 mutex_unlock(&smsm_driver_state_notifier_lock);
3150 return ret;
3151}
3152EXPORT_SYMBOL(smsm_driver_state_notifier_register);
3153
3154int smsm_driver_state_notifier_unregister(struct notifier_block *nb)
3155{
3156 int ret;
3157 if (!nb)
3158 return -EINVAL;
3159 mutex_lock(&smsm_driver_state_notifier_lock);
3160 ret = raw_notifier_chain_unregister(&smsm_driver_state_notifier_list,
3161 nb);
3162 mutex_unlock(&smsm_driver_state_notifier_lock);
3163 return ret;
3164}
3165EXPORT_SYMBOL(smsm_driver_state_notifier_unregister);
3166
3167static void smsm_driver_state_notify(uint32_t state, void *data)
3168{
3169 mutex_lock(&smsm_driver_state_notifier_lock);
3170 raw_notifier_call_chain(&smsm_driver_state_notifier_list,
3171 state, data);
3172 mutex_unlock(&smsm_driver_state_notifier_lock);
3173}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003174
3175int smd_core_init(void)
3176{
3177 int r;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003178 unsigned long flags = IRQF_TRIGGER_RISING;
3179 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003180
Brian Swetland37521a32009-07-01 18:30:47 -07003181 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003182 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003183 if (r < 0)
3184 return r;
3185 r = enable_irq_wake(INT_A9_M2A_0);
3186 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003187 pr_err("smd_core_init: "
3188 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003189
Eric Holmberg98c6c642012-02-24 11:29:35 -07003190 r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003191 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003192 if (r < 0) {
3193 free_irq(INT_A9_M2A_0, 0);
3194 return r;
3195 }
3196 r = enable_irq_wake(INT_A9_M2A_5);
3197 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003198 pr_err("smd_core_init: "
3199 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003200
Brian Swetland37521a32009-07-01 18:30:47 -07003201#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003202#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
3203 flags |= IRQF_SHARED;
3204#endif
Brian Swetland37521a32009-07-01 18:30:47 -07003205 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003206 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07003207 if (r < 0) {
3208 free_irq(INT_A9_M2A_0, 0);
3209 free_irq(INT_A9_M2A_5, 0);
3210 return r;
3211 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003212
Eric Holmberg98c6c642012-02-24 11:29:35 -07003213 r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler,
3214 flags, "smsm_dev", smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003215 if (r < 0) {
3216 free_irq(INT_A9_M2A_0, 0);
3217 free_irq(INT_A9_M2A_5, 0);
3218 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
3219 return r;
3220 }
3221
3222 r = enable_irq_wake(INT_ADSP_A11);
3223 if (r < 0)
3224 pr_err("smd_core_init: "
3225 "enable_irq_wake failed for INT_ADSP_A11\n");
3226
3227#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
3228 r = enable_irq_wake(INT_ADSP_A11_SMSM);
3229 if (r < 0)
3230 pr_err("smd_core_init: enable_irq_wake "
3231 "failed for INT_ADSP_A11_SMSM\n");
3232#endif
3233 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07003234#endif
3235
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003236#if defined(CONFIG_DSPS)
3237 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
3238 flags, "smd_dev", smd_dsps_irq_handler);
3239 if (r < 0) {
3240 free_irq(INT_A9_M2A_0, 0);
3241 free_irq(INT_A9_M2A_5, 0);
3242 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003243 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003244 return r;
3245 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003246
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003247 r = enable_irq_wake(INT_DSPS_A11);
3248 if (r < 0)
3249 pr_err("smd_core_init: "
3250 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003251#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003252
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003253#if defined(CONFIG_WCNSS)
3254 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
3255 flags, "smd_dev", smd_wcnss_irq_handler);
3256 if (r < 0) {
3257 free_irq(INT_A9_M2A_0, 0);
3258 free_irq(INT_A9_M2A_5, 0);
3259 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003260 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003261 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3262 return r;
3263 }
3264
3265 r = enable_irq_wake(INT_WCNSS_A11);
3266 if (r < 0)
3267 pr_err("smd_core_init: "
3268 "enable_irq_wake failed for INT_WCNSS_A11\n");
3269
Eric Holmberg98c6c642012-02-24 11:29:35 -07003270 r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler,
3271 flags, "smsm_dev", smsm_wcnss_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003272 if (r < 0) {
3273 free_irq(INT_A9_M2A_0, 0);
3274 free_irq(INT_A9_M2A_5, 0);
3275 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003276 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003277 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3278 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
3279 return r;
3280 }
3281
3282 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
3283 if (r < 0)
3284 pr_err("smd_core_init: "
3285 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
3286#endif
3287
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003288#if defined(CONFIG_DSPS_SMSM)
Eric Holmberg98c6c642012-02-24 11:29:35 -07003289 r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler,
3290 flags, "smsm_dev", smsm_dsps_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003291 if (r < 0) {
3292 free_irq(INT_A9_M2A_0, 0);
3293 free_irq(INT_A9_M2A_5, 0);
3294 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003295 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003296 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3297 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003298 free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003299 return r;
3300 }
3301
3302 r = enable_irq_wake(INT_DSPS_A11_SMSM);
3303 if (r < 0)
3304 pr_err("smd_core_init: "
3305 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
3306#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003307 SMD_INFO("smd_core_init() done\n");
3308
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003309 return 0;
3310}
3311
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303312static int intr_init(struct interrupt_config_item *private_irq,
3313 struct smd_irq_config *platform_irq,
3314 struct platform_device *pdev
3315 )
3316{
3317 int irq_id;
3318 int ret;
3319 int ret_wake;
3320
3321 private_irq->out_bit_pos = platform_irq->out_bit_pos;
3322 private_irq->out_offset = platform_irq->out_offset;
3323 private_irq->out_base = platform_irq->out_base;
3324
3325 irq_id = platform_get_irq_byname(
3326 pdev,
3327 platform_irq->irq_name
3328 );
3329 SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__,
3330 platform_irq->irq_name, irq_id);
3331 ret = request_irq(irq_id,
3332 private_irq->irq_handler,
3333 platform_irq->flags,
3334 platform_irq->device_name,
3335 (void *)platform_irq->dev_id
3336 );
3337 if (ret < 0) {
3338 platform_irq->irq_id = ret;
Eric Holmbergdeace152012-07-25 12:17:11 -06003339 private_irq->irq_id = ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303340 } else {
3341 platform_irq->irq_id = irq_id;
Eric Holmbergdeace152012-07-25 12:17:11 -06003342 private_irq->irq_id = irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303343 ret_wake = enable_irq_wake(irq_id);
3344 if (ret_wake < 0) {
3345 pr_err("smd: enable_irq_wake failed on %s",
3346 platform_irq->irq_name);
3347 }
3348 }
3349
3350 return ret;
3351}
3352
Jeff Hugobdc734d2012-03-26 16:05:39 -06003353int sort_cmp_func(const void *a, const void *b)
3354{
3355 struct smem_area *left = (struct smem_area *)(a);
3356 struct smem_area *right = (struct smem_area *)(b);
3357
3358 return left->phys_addr - right->phys_addr;
3359}
3360
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303361int smd_core_platform_init(struct platform_device *pdev)
3362{
3363 int i;
3364 int ret;
3365 uint32_t num_ss;
3366 struct smd_platform *smd_platform_data;
3367 struct smd_subsystem_config *smd_ss_config_list;
3368 struct smd_subsystem_config *cfg;
3369 int err_ret = 0;
Jeff Hugobdc734d2012-03-26 16:05:39 -06003370 struct smd_smem_regions *smd_smem_areas;
3371 int smem_idx = 0;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303372
3373 smd_platform_data = pdev->dev.platform_data;
3374 num_ss = smd_platform_data->num_ss_configs;
3375 smd_ss_config_list = smd_platform_data->smd_ss_configs;
3376
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06003377 if (smd_platform_data->smd_ssr_config)
3378 disable_smsm_reset_handshake = smd_platform_data->
3379 smd_ssr_config->disable_smsm_reset_handshake;
3380
Jeff Hugobdc734d2012-03-26 16:05:39 -06003381 smd_smem_areas = smd_platform_data->smd_smem_areas;
3382 if (smd_smem_areas) {
3383 num_smem_areas = smd_platform_data->num_smem_areas;
3384 smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
3385 GFP_KERNEL);
3386 if (!smem_areas) {
3387 pr_err("%s: smem_areas kmalloc failed\n", __func__);
3388 err_ret = -ENOMEM;
3389 goto smem_areas_alloc_fail;
3390 }
3391
3392 for (smem_idx = 0; smem_idx < num_smem_areas; ++smem_idx) {
3393 smem_areas[smem_idx].phys_addr =
3394 smd_smem_areas[smem_idx].phys_addr;
3395 smem_areas[smem_idx].size =
3396 smd_smem_areas[smem_idx].size;
3397 smem_areas[smem_idx].virt_addr = ioremap_nocache(
3398 (unsigned long)(smem_areas[smem_idx].phys_addr),
3399 smem_areas[smem_idx].size);
3400 if (!smem_areas[smem_idx].virt_addr) {
3401 pr_err("%s: ioremap_nocache() of addr:%p"
3402 " size: %x\n", __func__,
3403 smem_areas[smem_idx].phys_addr,
3404 smem_areas[smem_idx].size);
3405 err_ret = -ENOMEM;
3406 ++smem_idx;
3407 goto smem_failed;
3408 }
3409 }
3410 sort(smem_areas, num_smem_areas,
3411 sizeof(struct smem_area),
3412 sort_cmp_func, NULL);
3413 }
3414
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303415 for (i = 0; i < num_ss; i++) {
3416 cfg = &smd_ss_config_list[i];
3417
3418 ret = intr_init(
3419 &private_intr_config[cfg->irq_config_id].smd,
3420 &cfg->smd_int,
3421 pdev
3422 );
3423
3424 if (ret < 0) {
3425 err_ret = ret;
3426 pr_err("smd: register irq failed on %s\n",
3427 cfg->smd_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003428 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303429 }
3430
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003431 /* only init smsm structs if this edge supports smsm */
3432 if (cfg->smsm_int.irq_id)
3433 ret = intr_init(
3434 &private_intr_config[cfg->irq_config_id].smsm,
3435 &cfg->smsm_int,
3436 pdev
3437 );
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303438
3439 if (ret < 0) {
3440 err_ret = ret;
3441 pr_err("smd: register irq failed on %s\n",
3442 cfg->smsm_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003443 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303444 }
Eric Holmberg17992c12012-02-29 12:54:44 -07003445
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003446 if (cfg->subsys_name)
3447 strlcpy(edge_to_pids[cfg->edge].subsys_name,
Eric Holmberg17992c12012-02-29 12:54:44 -07003448 cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303449 }
3450
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303451
3452 SMD_INFO("smd_core_platform_init() done\n");
3453 return 0;
3454
Jeff Hugobdc734d2012-03-26 16:05:39 -06003455intr_failed:
3456 pr_err("smd: deregistering IRQs\n");
3457 for (i = 0; i < num_ss; ++i) {
3458 cfg = &smd_ss_config_list[i];
3459
3460 if (cfg->smd_int.irq_id >= 0)
3461 free_irq(cfg->smd_int.irq_id,
3462 (void *)cfg->smd_int.dev_id
3463 );
3464 if (cfg->smsm_int.irq_id >= 0)
3465 free_irq(cfg->smsm_int.irq_id,
3466 (void *)cfg->smsm_int.dev_id
3467 );
3468 }
3469smem_failed:
3470 for (smem_idx = smem_idx - 1; smem_idx >= 0; --smem_idx)
3471 iounmap(smem_areas[smem_idx].virt_addr);
3472 kfree(smem_areas);
3473smem_areas_alloc_fail:
3474 return err_ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303475}
3476
Gregory Bean4416e9e2010-07-28 10:22:12 -07003477static int __devinit msm_smd_probe(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003478{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303479 int ret;
Daniel Walker0aec66d2010-03-18 12:31:08 -07003480
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303481 SMD_INFO("smd probe\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003482 INIT_WORK(&probe_work, smd_channel_probe_worker);
3483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003484 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3485 if (IS_ERR(channel_close_wq)) {
3486 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3487 return -ENOMEM;
3488 }
3489
3490 if (smsm_init()) {
3491 pr_err("smsm_init() failed\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003492 return -1;
3493 }
3494
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303495 if (pdev) {
3496 if (pdev->dev.of_node) {
3497 pr_err("SMD: Device tree not currently supported\n");
3498 return -ENODEV;
3499 } else if (pdev->dev.platform_data) {
3500 ret = smd_core_platform_init(pdev);
3501 if (ret) {
3502 pr_err(
3503 "SMD: smd_core_platform_init() failed\n");
3504 return -ENODEV;
3505 }
3506 } else {
3507 ret = smd_core_init();
3508 if (ret) {
3509 pr_err("smd_core_init() failed\n");
3510 return -ENODEV;
3511 }
3512 }
3513 } else {
3514 pr_err("SMD: PDEV not found\n");
3515 return -ENODEV;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003516 }
Iliyan Malchev1207bab2009-11-15 18:16:43 -08003517
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003518 smd_initialized = 1;
3519
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003520 smd_alloc_loopback_channel();
Eric Holmbergc33d4ab2011-10-24 10:28:25 -06003521 smsm_irq_handler(0, 0);
3522 tasklet_schedule(&smd_fake_irq_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003523
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003524 return 0;
3525}
3526
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003527static int restart_notifier_cb(struct notifier_block *this,
3528 unsigned long code,
3529 void *data);
3530
3531static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003532 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3533 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
3534 {SMD_WCNSS, "riva", .nb.notifier_call = restart_notifier_cb},
3535 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003536 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003537};
3538
3539static int restart_notifier_cb(struct notifier_block *this,
3540 unsigned long code,
3541 void *data)
3542{
3543 if (code == SUBSYS_AFTER_SHUTDOWN) {
3544 struct restart_notifier_block *notifier;
3545
3546 notifier = container_of(this,
3547 struct restart_notifier_block, nb);
3548 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3549 __func__, notifier->processor,
3550 notifier->name);
3551
3552 smd_channel_reset(notifier->processor);
3553 }
3554
3555 return NOTIFY_DONE;
3556}
3557
3558static __init int modem_restart_late_init(void)
3559{
3560 int i;
3561 void *handle;
3562 struct restart_notifier_block *nb;
3563
3564 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
3565 nb = &restart_notifiers[i];
3566 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
3567 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
3568 __func__, nb->name, handle);
3569 }
3570 return 0;
3571}
3572late_initcall(modem_restart_late_init);
3573
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003574static struct platform_driver msm_smd_driver = {
3575 .probe = msm_smd_probe,
3576 .driver = {
3577 .name = MODULE_NAME,
3578 .owner = THIS_MODULE,
3579 },
3580};
3581
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003582int __init msm_smd_init(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003583{
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003584 static bool registered;
3585
3586 if (registered)
3587 return 0;
3588
3589 registered = true;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003590 return platform_driver_register(&msm_smd_driver);
3591}
3592
3593module_init(msm_smd_init);
3594
3595MODULE_DESCRIPTION("MSM Shared Memory Core");
3596MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
3597MODULE_LICENSE("GPL");