blob: e33f87b72ffbab20d45c2c02317ab2ac240a95cc [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07004 * Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f942012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Jeff Hugobdc734d2012-03-26 16:05:39 -060037#include <linux/sort.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070038#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070040#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053042#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070043#include <mach/proc_comm.h>
Ram Somani8b9589f2012-04-03 12:07:18 +053044#include <asm/cacheflush.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070045
46#include "smd_private.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047#include "modem_notifier.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
Jeff Hugo56b933a2011-09-28 14:42:05 -060050 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060051 || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064)
Brian Swetland37521a32009-07-01 18:30:47 -070052#define CONFIG_QDSP6 1
53#endif
54
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060055#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \
56 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057#define CONFIG_DSPS 1
58#endif
59
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060060#if defined(CONFIG_ARCH_MSM8960) \
61 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060063#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070065
66#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067#define SMEM_VERSION 0x000B
68#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070069#define SMSM_SNAPSHOT_CNT 64
Eric Holmbergda31d042012-03-28 14:01:02 -060070#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72uint32_t SMSM_NUM_ENTRIES = 8;
73uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070074
Eric Holmberge8a39322012-04-03 15:14:02 -060075/* Legacy SMSM interrupt notifications */
76#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT \
77 | SMSM_RUN | SMSM_SYSTEM_DOWNLOAD)
Brian Swetland2eb44eb2008-09-29 16:00:48 -070078
79enum {
80 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081 MSM_SMSM_DEBUG = 1U << 1,
82 MSM_SMD_INFO = 1U << 2,
83 MSM_SMSM_INFO = 1U << 3,
Eric Holmberg98c6c642012-02-24 11:29:35 -070084 MSM_SMx_POWER_INFO = 1U << 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085};
86
87struct smsm_shared_info {
88 uint32_t *state;
89 uint32_t *intr_mask;
90 uint32_t *intr_mux;
91};
92
93static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f942012-03-19 10:04:22 -060094static struct kfifo smsm_snapshot_fifo;
95static struct wake_lock smsm_snapshot_wakelock;
96static int smsm_snapshot_count;
97static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098
99struct smsm_size_info_type {
100 uint32_t num_hosts;
101 uint32_t num_entries;
102 uint32_t reserved0;
103 uint32_t reserved1;
104};
105
106struct smsm_state_cb_info {
107 struct list_head cb_list;
108 uint32_t mask;
109 void *data;
110 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
111};
112
113struct smsm_state_info {
114 struct list_head callbacks;
115 uint32_t last_value;
Eric Holmberge8a39322012-04-03 15:14:02 -0600116 uint32_t intr_mask_set;
117 uint32_t intr_mask_clear;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118};
119
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530120struct interrupt_config_item {
121 /* must be initialized */
122 irqreturn_t (*irq_handler)(int req, void *data);
123 /* outgoing interrupt config (set from platform data) */
124 uint32_t out_bit_pos;
125 void __iomem *out_base;
126 uint32_t out_offset;
Eric Holmbergdeace152012-07-25 12:17:11 -0600127 int irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530128};
129
130struct interrupt_config {
131 struct interrupt_config_item smd;
132 struct interrupt_config_item smsm;
133};
134
135static irqreturn_t smd_modem_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700136static irqreturn_t smsm_modem_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530137static irqreturn_t smd_dsp_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700138static irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530139static irqreturn_t smd_dsps_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700140static irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530141static irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700142static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600143static irqreturn_t smd_rpm_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530144static irqreturn_t smsm_irq_handler(int irq, void *data);
145
146static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
147 [SMD_MODEM] = {
148 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700149 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530150 },
151 [SMD_Q6] = {
152 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700153 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530154 },
155 [SMD_DSPS] = {
156 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700157 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530158 },
159 [SMD_WCNSS] = {
160 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700161 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530162 },
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600163 [SMD_RPM] = {
164 .smd.irq_handler = smd_rpm_irq_handler,
165 .smsm.irq_handler = NULL, /* does not support smsm */
166 },
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530167};
Jeff Hugobdc734d2012-03-26 16:05:39 -0600168
169struct smem_area {
170 void *phys_addr;
171 unsigned size;
172 void __iomem *virt_addr;
173};
174static uint32_t num_smem_areas;
175static struct smem_area *smem_areas;
176static void *smem_range_check(void *base, unsigned offset);
177
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700178struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530179
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
181#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
182 entry * SMSM_NUM_HOSTS + host)
183#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
184
185/* Internal definitions which are not exported in some targets */
186enum {
187 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700188};
189
190static int msm_smd_debug_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700191module_param_named(debug_mask, msm_smd_debug_mask,
192 int, S_IRUGO | S_IWUSR | S_IWGRP);
193
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194#if defined(CONFIG_MSM_SMD_DEBUG)
195#define SMD_DBG(x...) do { \
196 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
197 printk(KERN_DEBUG x); \
198 } while (0)
199
200#define SMSM_DBG(x...) do { \
201 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
202 printk(KERN_DEBUG x); \
203 } while (0)
204
205#define SMD_INFO(x...) do { \
206 if (msm_smd_debug_mask & MSM_SMD_INFO) \
207 printk(KERN_INFO x); \
208 } while (0)
209
210#define SMSM_INFO(x...) do { \
211 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
212 printk(KERN_INFO x); \
213 } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700214#define SMx_POWER_INFO(x...) do { \
215 if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \
216 printk(KERN_INFO x); \
217 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218#else
219#define SMD_DBG(x...) do { } while (0)
220#define SMSM_DBG(x...) do { } while (0)
221#define SMD_INFO(x...) do { } while (0)
222#define SMSM_INFO(x...) do { } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700223#define SMx_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224#endif
225
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700226static unsigned last_heap_free = 0xffffffff;
227
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228static inline void smd_write_intr(unsigned int val,
229 const void __iomem *addr);
230
231#if defined(CONFIG_ARCH_MSM7X30)
232#define MSM_TRIG_A2M_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530233 (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700234#define MSM_TRIG_A2Q6_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530235 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236#define MSM_TRIG_A2M_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530237 (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238#define MSM_TRIG_A2Q6_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530239 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600241#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700242#define MSM_TRIG_A2WCNSS_SMD_INT
243#define MSM_TRIG_A2WCNSS_SMSM_INT
244#elif defined(CONFIG_ARCH_MSM8X60)
245#define MSM_TRIG_A2M_SMD_INT \
246 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
247#define MSM_TRIG_A2Q6_SMD_INT \
248 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
249#define MSM_TRIG_A2M_SMSM_INT \
250 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
251#define MSM_TRIG_A2Q6_SMSM_INT \
252 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
253#define MSM_TRIG_A2DSPS_SMD_INT \
254 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600255#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256#define MSM_TRIG_A2WCNSS_SMD_INT
257#define MSM_TRIG_A2WCNSS_SMSM_INT
Jeff Hugo56b933a2011-09-28 14:42:05 -0600258#elif defined(CONFIG_ARCH_MSM9615)
259#define MSM_TRIG_A2M_SMD_INT \
260 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
261#define MSM_TRIG_A2Q6_SMD_INT \
262 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
263#define MSM_TRIG_A2M_SMSM_INT \
264 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
265#define MSM_TRIG_A2Q6_SMSM_INT \
266 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
267#define MSM_TRIG_A2DSPS_SMD_INT
268#define MSM_TRIG_A2DSPS_SMSM_INT
269#define MSM_TRIG_A2WCNSS_SMD_INT
270#define MSM_TRIG_A2WCNSS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271#elif defined(CONFIG_ARCH_FSM9XXX)
272#define MSM_TRIG_A2Q6_SMD_INT \
273 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
274#define MSM_TRIG_A2Q6_SMSM_INT \
275 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
276#define MSM_TRIG_A2M_SMD_INT \
277 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
278#define MSM_TRIG_A2M_SMSM_INT \
279 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
280#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600281#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700282#define MSM_TRIG_A2WCNSS_SMD_INT
283#define MSM_TRIG_A2WCNSS_SMSM_INT
Eric Holmberg73d45462012-02-28 10:41:31 -0700284#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285#define MSM_TRIG_A2M_SMD_INT \
286 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700287#define MSM_TRIG_A2Q6_SMD_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288#define MSM_TRIG_A2M_SMSM_INT \
289 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700290#define MSM_TRIG_A2Q6_SMSM_INT
291#define MSM_TRIG_A2DSPS_SMD_INT
292#define MSM_TRIG_A2DSPS_SMSM_INT
293#define MSM_TRIG_A2WCNSS_SMD_INT
294#define MSM_TRIG_A2WCNSS_SMSM_INT
295#elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A)
296#define MSM_TRIG_A2M_SMD_INT \
297 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
298#define MSM_TRIG_A2Q6_SMD_INT
299#define MSM_TRIG_A2M_SMSM_INT \
300 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
301#define MSM_TRIG_A2Q6_SMSM_INT
302#define MSM_TRIG_A2DSPS_SMD_INT
303#define MSM_TRIG_A2DSPS_SMSM_INT
304#define MSM_TRIG_A2WCNSS_SMD_INT
305#define MSM_TRIG_A2WCNSS_SMSM_INT
306#else /* use platform device / device tree configuration */
307#define MSM_TRIG_A2M_SMD_INT
308#define MSM_TRIG_A2Q6_SMD_INT
309#define MSM_TRIG_A2M_SMSM_INT
310#define MSM_TRIG_A2Q6_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600312#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313#define MSM_TRIG_A2WCNSS_SMD_INT
314#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700315#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316
Jeff Hugoee40b152012-02-09 17:39:47 -0700317/*
318 * stub out legacy macros if they are not being used so that the legacy
319 * code compiles even though it is not used
320 *
321 * these definitions should not be used in active code and will cause
322 * an early failure
323 */
324#ifndef INT_A9_M2A_0
325#define INT_A9_M2A_0 -1
326#endif
327#ifndef INT_A9_M2A_5
328#define INT_A9_M2A_5 -1
329#endif
330#ifndef INT_ADSP_A11
331#define INT_ADSP_A11 -1
332#endif
333#ifndef INT_ADSP_A11_SMSM
334#define INT_ADSP_A11_SMSM -1
335#endif
336#ifndef INT_DSPS_A11
337#define INT_DSPS_A11 -1
338#endif
339#ifndef INT_DSPS_A11_SMSM
340#define INT_DSPS_A11_SMSM -1
341#endif
342#ifndef INT_WCNSS_A11
343#define INT_WCNSS_A11 -1
344#endif
345#ifndef INT_WCNSS_A11_SMSM
346#define INT_WCNSS_A11_SMSM -1
347#endif
348
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349#define SMD_LOOPBACK_CID 100
350
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600351#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
352static remote_spinlock_t remote_spinlock;
353
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergda31d042012-03-28 14:01:02 -0600356static void smsm_cb_snapshot(uint32_t use_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700357
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -0600358static struct workqueue_struct *smsm_cb_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700359static void notify_smsm_cb_clients_worker(struct work_struct *work);
360static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600361static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362static struct smsm_state_info *smsm_states;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +0530363static int spinlocks_initialized;
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -0600364
365/**
366 * Variables to indicate smd module initialization.
367 * Dependents to register for smd module init notifier.
368 */
369static int smd_module_inited;
370static RAW_NOTIFIER_HEAD(smd_module_init_notifier_list);
371static DEFINE_MUTEX(smd_module_init_notifier_lock);
372static void smd_module_init_notify(uint32_t state, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373
374static inline void smd_write_intr(unsigned int val,
375 const void __iomem *addr)
376{
377 wmb();
378 __raw_writel(val, addr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700379}
380
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700381static inline void notify_modem_smd(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700382{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530383 static const struct interrupt_config_item *intr
384 = &private_intr_config[SMD_MODEM].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700385 if (intr->out_base) {
386 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530387 smd_write_intr(intr->out_bit_pos,
388 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700389 } else {
390 ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530391 MSM_TRIG_A2M_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700392 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700393}
394
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700395static inline void notify_dsp_smd(void)
396{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530397 static const struct interrupt_config_item *intr
398 = &private_intr_config[SMD_Q6].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700399 if (intr->out_base) {
400 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530401 smd_write_intr(intr->out_bit_pos,
402 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700403 } else {
404 ++interrupt_stats[SMD_Q6].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530405 MSM_TRIG_A2Q6_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700406 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700407}
408
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530409static inline void notify_dsps_smd(void)
410{
411 static const struct interrupt_config_item *intr
412 = &private_intr_config[SMD_DSPS].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700413 if (intr->out_base) {
414 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530415 smd_write_intr(intr->out_bit_pos,
416 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700417 } else {
418 ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530419 MSM_TRIG_A2DSPS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700420 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530421}
422
423static inline void notify_wcnss_smd(void)
424{
425 static const struct interrupt_config_item *intr
426 = &private_intr_config[SMD_WCNSS].smd;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530427
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700428 if (intr->out_base) {
429 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530430 smd_write_intr(intr->out_bit_pos,
431 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700432 } else {
433 ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530434 MSM_TRIG_A2WCNSS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700435 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530436}
437
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600438static inline void notify_rpm_smd(void)
439{
440 static const struct interrupt_config_item *intr
441 = &private_intr_config[SMD_RPM].smd;
442
443 if (intr->out_base) {
444 ++interrupt_stats[SMD_RPM].smd_out_config_count;
445 smd_write_intr(intr->out_bit_pos,
446 intr->out_base + intr->out_offset);
447 }
448}
449
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530450static inline void notify_modem_smsm(void)
451{
452 static const struct interrupt_config_item *intr
453 = &private_intr_config[SMD_MODEM].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700454 if (intr->out_base) {
455 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530456 smd_write_intr(intr->out_bit_pos,
457 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700458 } else {
459 ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530460 MSM_TRIG_A2M_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700461 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530462}
463
464static inline void notify_dsp_smsm(void)
465{
466 static const struct interrupt_config_item *intr
467 = &private_intr_config[SMD_Q6].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700468 if (intr->out_base) {
469 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530470 smd_write_intr(intr->out_bit_pos,
471 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700472 } else {
473 ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530474 MSM_TRIG_A2Q6_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700475 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530476}
477
478static inline void notify_dsps_smsm(void)
479{
480 static const struct interrupt_config_item *intr
481 = &private_intr_config[SMD_DSPS].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700482 if (intr->out_base) {
483 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530484 smd_write_intr(intr->out_bit_pos,
485 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700486 } else {
487 ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530488 MSM_TRIG_A2DSPS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700489 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530490}
491
492static inline void notify_wcnss_smsm(void)
493{
494 static const struct interrupt_config_item *intr
495 = &private_intr_config[SMD_WCNSS].smsm;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530496
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700497 if (intr->out_base) {
498 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530499 smd_write_intr(intr->out_bit_pos,
500 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700501 } else {
502 ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530503 MSM_TRIG_A2WCNSS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700504 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530505}
506
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700507static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
508{
509 /* older protocol don't use smsm_intr_mask,
510 but still communicates with modem */
511 if (!smsm_info.intr_mask ||
512 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
513 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530514 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700515
516 if (smsm_info.intr_mask &&
517 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
518 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700519 uint32_t mux_val;
520
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600521 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522 mux_val = __raw_readl(
523 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
524 mux_val++;
525 __raw_writel(mux_val,
526 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
527 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530528 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529 }
530
531 if (smsm_info.intr_mask &&
532 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
533 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530534 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700535 }
536
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600537 if (smsm_info.intr_mask &&
538 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
539 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530540 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600541 }
542
Eric Holmbergda31d042012-03-28 14:01:02 -0600543 /*
544 * Notify local SMSM callback clients without wakelock since this
545 * code is used by power management during power-down/-up sequencing
546 * on DEM-based targets. Grabbing a wakelock in this case will
547 * abort the power-down sequencing.
548 */
Eric Holmberg51676a12012-07-10 18:45:23 -0600549 if (smsm_info.intr_mask &&
550 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS))
551 & notify_mask)) {
552 smsm_cb_snapshot(0);
553 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700554}
555
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700556void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700557{
558 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700559 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700560
561 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
562 if (x != 0) {
563 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700564 SMD_INFO("smem: DIAG '%s'\n", x);
565 }
566
567 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
568 if (x != 0) {
569 x[size - 1] = 0;
570 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700571 }
572}
573
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700574
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700575static void handle_modem_crash(void)
576{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700577 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700578 smd_diag();
579
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700580 /* hard reboot if possible FIXME
581 if (msm_reset_hook)
582 msm_reset_hook();
583 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700584
585 /* in this case the modem or watchdog should reboot us */
586 for (;;)
587 ;
588}
589
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700590int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700591{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700592 /* if the modem's not ready yet, we have to hope for the best */
593 if (!smsm_info.state)
594 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700595
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700596 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700597 handle_modem_crash();
598 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700599 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700600 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700601}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700602EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700603
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700604/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700605 * irq handler and code that mutates the channel
606 * list or fiddles with channel state
607 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700608static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700609DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700610
611/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700612 * operations to avoid races while creating or
613 * destroying smd_channel structures
614 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700615static DEFINE_MUTEX(smd_creation_mutex);
616
617static int smd_initialized;
618
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700619struct smd_shared_v1 {
620 struct smd_half_channel ch0;
621 unsigned char data0[SMD_BUF_SIZE];
622 struct smd_half_channel ch1;
623 unsigned char data1[SMD_BUF_SIZE];
624};
625
626struct smd_shared_v2 {
627 struct smd_half_channel ch0;
628 struct smd_half_channel ch1;
629};
630
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600631struct smd_shared_v2_word_access {
632 struct smd_half_channel_word_access ch0;
633 struct smd_half_channel_word_access ch1;
634};
635
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700636struct smd_channel {
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600637 volatile void *send; /* some variant of smd_half_channel */
638 volatile void *recv; /* some variant of smd_half_channel */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700639 unsigned char *send_data;
640 unsigned char *recv_data;
641 unsigned fifo_size;
642 unsigned fifo_mask;
643 struct list_head ch_list;
644
645 unsigned current_packet;
646 unsigned n;
647 void *priv;
648 void (*notify)(void *priv, unsigned flags);
649
650 int (*read)(smd_channel_t *ch, void *data, int len, int user_buf);
651 int (*write)(smd_channel_t *ch, const void *data, int len,
652 int user_buf);
653 int (*read_avail)(smd_channel_t *ch);
654 int (*write_avail)(smd_channel_t *ch);
655 int (*read_from_cb)(smd_channel_t *ch, void *data, int len,
656 int user_buf);
657
658 void (*update_state)(smd_channel_t *ch);
659 unsigned last_state;
660 void (*notify_other_cpu)(void);
661
662 char name[20];
663 struct platform_device pdev;
664 unsigned type;
665
666 int pending_pkt_sz;
667
668 char is_pkt_ch;
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600669
670 /*
671 * private internal functions to access *send and *recv.
672 * never to be exported outside of smd
673 */
674 struct smd_half_channel_access *half_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700675};
676
677struct edge_to_pid {
678 uint32_t local_pid;
679 uint32_t remote_pid;
Eric Holmberg17992c12012-02-29 12:54:44 -0700680 char subsys_name[SMD_MAX_CH_NAME_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700681};
682
683/**
684 * Maps edge type to local and remote processor ID's.
685 */
686static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700687 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
688 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "q6"},
689 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
690 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
691 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
692 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
693 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
694 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
695 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
696 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
697 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
698 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
699 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
700 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
701 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600702 [SMD_APPS_RPM] = {SMD_APPS, SMD_RPM},
703 [SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
704 [SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
705 [SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706};
707
708struct restart_notifier_block {
709 unsigned processor;
710 char *name;
711 struct notifier_block nb;
712};
713
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -0600714static int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700715static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
716
717static LIST_HEAD(smd_ch_closed_list);
718static LIST_HEAD(smd_ch_closing_list);
719static LIST_HEAD(smd_ch_to_close_list);
720static LIST_HEAD(smd_ch_list_modem);
721static LIST_HEAD(smd_ch_list_dsp);
722static LIST_HEAD(smd_ch_list_dsps);
723static LIST_HEAD(smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600724static LIST_HEAD(smd_ch_list_rpm);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700725
726static unsigned char smd_ch_allocated[64];
727static struct work_struct probe_work;
728
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700729static void finalize_channel_close_fn(struct work_struct *work);
730static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
731static struct workqueue_struct *channel_close_wq;
732
733static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
734
735/* on smp systems, the probe might get called from multiple cores,
736 hence use a lock */
737static DEFINE_MUTEX(smd_probe_lock);
738
739static void smd_channel_probe_worker(struct work_struct *work)
740{
741 struct smd_alloc_elm *shared;
742 unsigned n;
743 uint32_t type;
744
745 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
746
747 if (!shared) {
748 pr_err("%s: allocation table not initialized\n", __func__);
749 return;
750 }
751
752 mutex_lock(&smd_probe_lock);
753 for (n = 0; n < 64; n++) {
754 if (smd_ch_allocated[n])
755 continue;
756
757 /* channel should be allocated only if APPS
758 processor is involved */
759 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600760 if (type >= ARRAY_SIZE(edge_to_pids) ||
761 edge_to_pids[type].local_pid != SMD_APPS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700762 continue;
763 if (!shared[n].ref_count)
764 continue;
765 if (!shared[n].name[0])
766 continue;
767
768 if (!smd_alloc_channel(&shared[n]))
769 smd_ch_allocated[n] = 1;
770 else
771 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
772 }
773 mutex_unlock(&smd_probe_lock);
774}
775
776/**
777 * Lookup processor ID and determine if it belongs to the proved edge
778 * type.
779 *
780 * @shared2: Pointer to v2 shared channel structure
781 * @type: Edge type
782 * @pid: Processor ID of processor on edge
783 * @local_ch: Channel that belongs to processor @pid
784 * @remote_ch: Other side of edge contained @pid
Jeff Hugo70a7e562012-09-07 11:24:32 -0600785 * @is_word_access_ch: Bool, is this a word aligned access channel
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700786 *
787 * Returns 0 for not on edge, 1 for found on edge
788 */
Jeff Hugo70a7e562012-09-07 11:24:32 -0600789static int pid_is_on_edge(void *shared2,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700790 uint32_t type, uint32_t pid,
Jeff Hugo70a7e562012-09-07 11:24:32 -0600791 void **local_ch,
792 void **remote_ch,
793 int is_word_access_ch
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700794 )
795{
796 int ret = 0;
797 struct edge_to_pid *edge;
Jeff Hugo70a7e562012-09-07 11:24:32 -0600798 void *ch0;
799 void *ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700800
801 *local_ch = 0;
802 *remote_ch = 0;
803
804 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
805 return 0;
806
Jeff Hugo70a7e562012-09-07 11:24:32 -0600807 if (is_word_access_ch) {
808 ch0 = &((struct smd_shared_v2_word_access *)(shared2))->ch0;
809 ch1 = &((struct smd_shared_v2_word_access *)(shared2))->ch1;
810 } else {
811 ch0 = &((struct smd_shared_v2 *)(shared2))->ch0;
812 ch1 = &((struct smd_shared_v2 *)(shared2))->ch1;
813 }
814
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700815 edge = &edge_to_pids[type];
816 if (edge->local_pid != edge->remote_pid) {
817 if (pid == edge->local_pid) {
Jeff Hugo70a7e562012-09-07 11:24:32 -0600818 *local_ch = ch0;
819 *remote_ch = ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700820 ret = 1;
821 } else if (pid == edge->remote_pid) {
Jeff Hugo70a7e562012-09-07 11:24:32 -0600822 *local_ch = ch1;
823 *remote_ch = ch0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700824 ret = 1;
825 }
826 }
827
828 return ret;
829}
830
Eric Holmberg17992c12012-02-29 12:54:44 -0700831/*
832 * Returns a pointer to the subsystem name or NULL if no
833 * subsystem name is available.
834 *
835 * @type - Edge definition
836 */
837const char *smd_edge_to_subsystem(uint32_t type)
838{
839 const char *subsys = NULL;
840
841 if (type < ARRAY_SIZE(edge_to_pids)) {
842 subsys = edge_to_pids[type].subsys_name;
843 if (subsys[0] == 0x0)
844 subsys = NULL;
845 }
846 return subsys;
847}
848EXPORT_SYMBOL(smd_edge_to_subsystem);
849
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700850/*
851 * Returns a pointer to the subsystem name given the
852 * remote processor ID.
853 *
854 * @pid Remote processor ID
855 * @returns Pointer to subsystem name or NULL if not found
856 */
857const char *smd_pid_to_subsystem(uint32_t pid)
858{
859 const char *subsys = NULL;
860 int i;
861
862 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
863 if (pid == edge_to_pids[i].remote_pid &&
864 edge_to_pids[i].subsys_name[0] != 0x0
865 ) {
866 subsys = edge_to_pids[i].subsys_name;
867 break;
868 }
869 }
870
871 return subsys;
872}
873EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700874
Jeff Hugo70a7e562012-09-07 11:24:32 -0600875static void smd_reset_edge(void *void_ch, unsigned new_state,
876 int is_word_access_ch)
Eric Holmberg2a563c32011-10-05 14:51:43 -0600877{
Jeff Hugo70a7e562012-09-07 11:24:32 -0600878 if (is_word_access_ch) {
879 struct smd_half_channel_word_access *ch =
880 (struct smd_half_channel_word_access *)(void_ch);
881 if (ch->state != SMD_SS_CLOSED) {
882 ch->state = new_state;
883 ch->fDSR = 0;
884 ch->fCTS = 0;
885 ch->fCD = 0;
886 ch->fSTATE = 1;
887 }
888 } else {
889 struct smd_half_channel *ch =
890 (struct smd_half_channel *)(void_ch);
891 if (ch->state != SMD_SS_CLOSED) {
892 ch->state = new_state;
893 ch->fDSR = 0;
894 ch->fCTS = 0;
895 ch->fCD = 0;
896 ch->fSTATE = 1;
897 }
Eric Holmberg2a563c32011-10-05 14:51:43 -0600898 }
899}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700900
901static void smd_channel_reset_state(struct smd_alloc_elm *shared,
902 unsigned new_state, unsigned pid)
903{
904 unsigned n;
Jeff Hugo70a7e562012-09-07 11:24:32 -0600905 void *shared2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700906 uint32_t type;
Jeff Hugo70a7e562012-09-07 11:24:32 -0600907 void *local_ch;
908 void *remote_ch;
909 int is_word_access;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700910
911 for (n = 0; n < SMD_CHANNELS; n++) {
912 if (!shared[n].ref_count)
913 continue;
914 if (!shared[n].name[0])
915 continue;
916
917 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo70a7e562012-09-07 11:24:32 -0600918 is_word_access = is_word_access_ch(type);
919 if (is_word_access)
920 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n,
921 sizeof(struct smd_shared_v2_word_access));
922 else
923 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n,
924 sizeof(struct smd_shared_v2));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700925 if (!shared2)
926 continue;
927
Jeff Hugo70a7e562012-09-07 11:24:32 -0600928 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch,
929 is_word_access))
930 smd_reset_edge(local_ch, new_state, is_word_access);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700931
Eric Holmberg2a563c32011-10-05 14:51:43 -0600932 /*
933 * ModemFW is in the same subsystem as ModemSW, but has
934 * separate SMD edges that need to be reset.
935 */
936 if (pid == SMSM_MODEM &&
937 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
Jeff Hugo70a7e562012-09-07 11:24:32 -0600938 &local_ch, &remote_ch, is_word_access))
939 smd_reset_edge(local_ch, new_state, is_word_access);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700940 }
941}
942
943
944void smd_channel_reset(uint32_t restart_pid)
945{
946 struct smd_alloc_elm *shared;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700947 unsigned long flags;
948
949 SMD_DBG("%s: starting reset\n", __func__);
Eric Holmberg957f5b52012-09-07 13:54:31 -0600950
951 /* release any held spinlocks */
952 remote_spin_release(&remote_spinlock, restart_pid);
953 remote_spin_release_all(restart_pid);
954
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700955 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
956 if (!shared) {
957 pr_err("%s: allocation table not initialized\n", __func__);
958 return;
959 }
960
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700961 /* reset SMSM entry */
962 if (smsm_info.state) {
963 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
964
Eric Holmberg351a63c2011-12-02 17:49:43 -0700965 /* restart SMSM init handshake */
966 if (restart_pid == SMSM_MODEM) {
967 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -0700968 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
969 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -0700970 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700971
972 /* notify SMSM processors */
973 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -0700974 notify_modem_smsm();
975 notify_dsp_smsm();
976 notify_dsps_smsm();
977 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700978 }
979
980 /* change all remote states to CLOSING */
981 mutex_lock(&smd_probe_lock);
982 spin_lock_irqsave(&smd_lock, flags);
983 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
984 spin_unlock_irqrestore(&smd_lock, flags);
985 mutex_unlock(&smd_probe_lock);
986
987 /* notify SMD processors */
988 mb();
989 smd_fake_irq_handler(0);
990 notify_modem_smd();
991 notify_dsp_smd();
992 notify_dsps_smd();
993 notify_wcnss_smd();
994
995 /* change all remote states to CLOSED */
996 mutex_lock(&smd_probe_lock);
997 spin_lock_irqsave(&smd_lock, flags);
998 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
999 spin_unlock_irqrestore(&smd_lock, flags);
1000 mutex_unlock(&smd_probe_lock);
1001
1002 /* notify SMD processors */
1003 mb();
1004 smd_fake_irq_handler(0);
1005 notify_modem_smd();
1006 notify_dsp_smd();
1007 notify_dsps_smd();
1008 notify_wcnss_smd();
1009
1010 SMD_DBG("%s: finished reset\n", __func__);
1011}
1012
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001013/* how many bytes are available for reading */
1014static int smd_stream_read_avail(struct smd_channel *ch)
1015{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001016 return (ch->half_ch->get_head(ch->recv) -
1017 ch->half_ch->get_tail(ch->recv)) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001018}
1019
1020/* how many bytes we are free to write */
1021static int smd_stream_write_avail(struct smd_channel *ch)
1022{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001023 return ch->fifo_mask - ((ch->half_ch->get_head(ch->send) -
1024 ch->half_ch->get_tail(ch->send)) & ch->fifo_mask);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001025}
1026
1027static int smd_packet_read_avail(struct smd_channel *ch)
1028{
1029 if (ch->current_packet) {
1030 int n = smd_stream_read_avail(ch);
1031 if (n > ch->current_packet)
1032 n = ch->current_packet;
1033 return n;
1034 } else {
1035 return 0;
1036 }
1037}
1038
1039static int smd_packet_write_avail(struct smd_channel *ch)
1040{
1041 int n = smd_stream_write_avail(ch);
1042 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
1043}
1044
1045static int ch_is_open(struct smd_channel *ch)
1046{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001047 return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED ||
1048 ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING)
1049 && (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001050}
1051
1052/* provide a pointer and length to readable data in the fifo */
1053static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
1054{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001055 unsigned head = ch->half_ch->get_head(ch->recv);
1056 unsigned tail = ch->half_ch->get_tail(ch->recv);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001057 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001058
1059 if (tail <= head)
1060 return head - tail;
1061 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001062 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001063}
1064
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001065static int read_intr_blocked(struct smd_channel *ch)
1066{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001067 return ch->half_ch->get_fBLOCKREADINTR(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001068}
1069
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001070/* advance the fifo read pointer after data from ch_read_buffer is consumed */
1071static void ch_read_done(struct smd_channel *ch, unsigned count)
1072{
1073 BUG_ON(count > smd_stream_read_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001074 ch->half_ch->set_tail(ch->recv,
1075 (ch->half_ch->get_tail(ch->recv) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001076 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001077 ch->half_ch->set_fTAIL(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001078}
1079
1080/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -07001081 * by smd_*_read() and update_packet_state()
1082 * will read-and-discard if the _data pointer is null
1083 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001084static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001085{
1086 void *ptr;
1087 unsigned n;
1088 unsigned char *data = _data;
1089 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001090 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001091
1092 while (len > 0) {
1093 n = ch_read_buffer(ch, &ptr);
1094 if (n == 0)
1095 break;
1096
1097 if (n > len)
1098 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001099 if (_data) {
1100 if (user_buf) {
1101 r = copy_to_user(data, ptr, n);
1102 if (r > 0) {
1103 pr_err("%s: "
1104 "copy_to_user could not copy "
1105 "%i bytes.\n",
1106 __func__,
1107 r);
1108 }
1109 } else
1110 memcpy(data, ptr, n);
1111 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001112
1113 data += n;
1114 len -= n;
1115 ch_read_done(ch, n);
1116 }
1117
1118 return orig_len - len;
1119}
1120
1121static void update_stream_state(struct smd_channel *ch)
1122{
1123 /* streams have no special state requiring updating */
1124}
1125
1126static void update_packet_state(struct smd_channel *ch)
1127{
1128 unsigned hdr[5];
1129 int r;
1130
1131 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001132 while (ch->current_packet == 0) {
1133 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001134
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001135 /* don't bother unless we can get the full header */
1136 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1137 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001138
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001139 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1140 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001141
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001142 ch->current_packet = hdr[0];
1143 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001144}
1145
1146/* provide a pointer and length to next free space in the fifo */
1147static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1148{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001149 unsigned head = ch->half_ch->get_head(ch->send);
1150 unsigned tail = ch->half_ch->get_tail(ch->send);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001151 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001152
1153 if (head < tail) {
1154 return tail - head - 1;
1155 } else {
1156 if (tail == 0)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001157 return ch->fifo_size - head - 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001158 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001159 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001160 }
1161}
1162
1163/* advace the fifo write pointer after freespace
1164 * from ch_write_buffer is filled
1165 */
1166static void ch_write_done(struct smd_channel *ch, unsigned count)
1167{
1168 BUG_ON(count > smd_stream_write_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001169 ch->half_ch->set_head(ch->send,
1170 (ch->half_ch->get_head(ch->send) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001171 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001172 ch->half_ch->set_fHEAD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001173}
1174
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001175static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001176{
1177 if (n == SMD_SS_OPENED) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001178 ch->half_ch->set_fDSR(ch->send, 1);
1179 ch->half_ch->set_fCTS(ch->send, 1);
1180 ch->half_ch->set_fCD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001181 } else {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001182 ch->half_ch->set_fDSR(ch->send, 0);
1183 ch->half_ch->set_fCTS(ch->send, 0);
1184 ch->half_ch->set_fCD(ch->send, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001185 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001186 ch->half_ch->set_state(ch->send, n);
1187 ch->half_ch->set_fSTATE(ch->send, 1);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001188 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001189}
1190
1191static void do_smd_probe(void)
1192{
1193 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1194 if (shared->heap_info.free_offset != last_heap_free) {
1195 last_heap_free = shared->heap_info.free_offset;
1196 schedule_work(&probe_work);
1197 }
1198}
1199
1200static void smd_state_change(struct smd_channel *ch,
1201 unsigned last, unsigned next)
1202{
1203 ch->last_state = next;
1204
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001205 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001206
1207 switch (next) {
1208 case SMD_SS_OPENING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001209 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING ||
1210 ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
1211 ch->half_ch->set_tail(ch->recv, 0);
1212 ch->half_ch->set_head(ch->send, 0);
1213 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001214 ch_set_state(ch, SMD_SS_OPENING);
1215 }
1216 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001217 case SMD_SS_OPENED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001218 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001219 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001220 ch->notify(ch->priv, SMD_EVENT_OPEN);
1221 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001222 break;
1223 case SMD_SS_FLUSHING:
1224 case SMD_SS_RESET:
1225 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001226 break;
1227 case SMD_SS_CLOSED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001228 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001229 ch_set_state(ch, SMD_SS_CLOSING);
1230 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001231 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001232 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1233 }
1234 break;
1235 case SMD_SS_CLOSING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001236 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001237 list_move(&ch->ch_list,
1238 &smd_ch_to_close_list);
1239 queue_work(channel_close_wq,
1240 &finalize_channel_close_work);
1241 }
1242 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001243 }
1244}
1245
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001246static void handle_smd_irq_closing_list(void)
1247{
1248 unsigned long flags;
1249 struct smd_channel *ch;
1250 struct smd_channel *index;
1251 unsigned tmp;
1252
1253 spin_lock_irqsave(&smd_lock, flags);
1254 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001255 if (ch->half_ch->get_fSTATE(ch->recv))
1256 ch->half_ch->set_fSTATE(ch->recv, 0);
1257 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001258 if (tmp != ch->last_state)
1259 smd_state_change(ch, ch->last_state, tmp);
1260 }
1261 spin_unlock_irqrestore(&smd_lock, flags);
1262}
1263
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001264static void handle_smd_irq(struct list_head *list, void (*notify)(void))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001265{
1266 unsigned long flags;
1267 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001268 unsigned ch_flags;
1269 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001270 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001271
1272 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001273 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001274 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001275 ch_flags = 0;
1276 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001277 if (ch->half_ch->get_fHEAD(ch->recv)) {
1278 ch->half_ch->set_fHEAD(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001279 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001280 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001281 if (ch->half_ch->get_fTAIL(ch->recv)) {
1282 ch->half_ch->set_fTAIL(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001283 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001284 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001285 if (ch->half_ch->get_fSTATE(ch->recv)) {
1286 ch->half_ch->set_fSTATE(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001287 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001288 }
1289 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001290 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001291 if (tmp != ch->last_state) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07001292 SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
1293 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001294 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001295 state_change = 1;
1296 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001297 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001298 ch->update_state(ch);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001299 SMx_POWER_INFO("SMD ch%d '%s' Data event r%d/w%d\n",
1300 ch->n, ch->name,
1301 ch->read_avail(ch),
1302 ch->fifo_size - ch->write_avail(ch));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001303 ch->notify(ch->priv, SMD_EVENT_DATA);
1304 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001305 if (ch_flags & 0x4 && !state_change) {
1306 SMx_POWER_INFO("SMD ch%d '%s' State update\n",
1307 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001308 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001309 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001310 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001311 spin_unlock_irqrestore(&smd_lock, flags);
1312 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001313}
1314
Brian Swetland37521a32009-07-01 18:30:47 -07001315static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001316{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001317 SMx_POWER_INFO("SMD Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001318 ++interrupt_stats[SMD_MODEM].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001319 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001320 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001321 return IRQ_HANDLED;
1322}
1323
1324static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
1325{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001326 SMx_POWER_INFO("SMD Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001327 ++interrupt_stats[SMD_Q6].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001328 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001329 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001330 return IRQ_HANDLED;
1331}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001332
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001333static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
1334{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001335 SMx_POWER_INFO("SMD Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001336 ++interrupt_stats[SMD_DSPS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001337 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1338 handle_smd_irq_closing_list();
1339 return IRQ_HANDLED;
1340}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001341
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001342static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1343{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001344 SMx_POWER_INFO("SMD Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001345 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001346 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1347 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001348 return IRQ_HANDLED;
1349}
1350
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001351static irqreturn_t smd_rpm_irq_handler(int irq, void *data)
1352{
1353 SMx_POWER_INFO("SMD Int RPM->Apps\n");
1354 ++interrupt_stats[SMD_RPM].smd_in_count;
1355 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
1356 handle_smd_irq_closing_list();
1357 return IRQ_HANDLED;
1358}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001359
1360static void smd_fake_irq_handler(unsigned long arg)
1361{
Brian Swetland37521a32009-07-01 18:30:47 -07001362 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1363 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001364 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1365 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001366 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001367 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001368}
1369
1370static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1371
Brian Swetland37521a32009-07-01 18:30:47 -07001372static inline int smd_need_int(struct smd_channel *ch)
1373{
1374 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001375 if (ch->half_ch->get_fHEAD(ch->recv) ||
1376 ch->half_ch->get_fTAIL(ch->recv) ||
1377 ch->half_ch->get_fSTATE(ch->recv))
Brian Swetland37521a32009-07-01 18:30:47 -07001378 return 1;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001379 if (ch->half_ch->get_state(ch->recv) != ch->last_state)
Brian Swetland37521a32009-07-01 18:30:47 -07001380 return 1;
1381 }
1382 return 0;
1383}
1384
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001385void smd_sleep_exit(void)
1386{
1387 unsigned long flags;
1388 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001389 int need_int = 0;
1390
1391 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001392 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1393 if (smd_need_int(ch)) {
1394 need_int = 1;
1395 break;
1396 }
1397 }
1398 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1399 if (smd_need_int(ch)) {
1400 need_int = 1;
1401 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001402 }
1403 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001404 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1405 if (smd_need_int(ch)) {
1406 need_int = 1;
1407 break;
1408 }
1409 }
1410 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1411 if (smd_need_int(ch)) {
1412 need_int = 1;
1413 break;
1414 }
1415 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001416 spin_unlock_irqrestore(&smd_lock, flags);
1417 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001418
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001419 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001420 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001421 tasklet_schedule(&smd_fake_irq_tasklet);
1422 }
1423}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001424EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001425
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001426static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001427{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001428 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1429 return 0;
1430 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001431 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001432
1433 /* for cases where xfer type is 0 */
1434 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001435 return 0;
1436
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001437 /* for cases where xfer type is 0 */
1438 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1439 return 0;
1440
1441 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001442 return 1;
1443 else
1444 return 0;
1445}
1446
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001447static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1448 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001449{
1450 void *ptr;
1451 const unsigned char *buf = _data;
1452 unsigned xfer;
1453 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001454 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001455
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001456 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001457 if (len < 0)
1458 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001459 else if (len == 0)
1460 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001461
1462 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001463 if (!ch_is_open(ch)) {
1464 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001465 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001466 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001467 if (xfer > len)
1468 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001469 if (user_buf) {
1470 r = copy_from_user(ptr, buf, xfer);
1471 if (r > 0) {
1472 pr_err("%s: "
1473 "copy_from_user could not copy %i "
1474 "bytes.\n",
1475 __func__,
1476 r);
1477 }
1478 } else
1479 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001480 ch_write_done(ch, xfer);
1481 len -= xfer;
1482 buf += xfer;
1483 if (len == 0)
1484 break;
1485 }
1486
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001487 if (orig_len - len)
1488 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001489
1490 return orig_len - len;
1491}
1492
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001493static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1494 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001495{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001496 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001497 unsigned hdr[5];
1498
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001499 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001500 if (len < 0)
1501 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001502 else if (len == 0)
1503 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001504
1505 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1506 return -ENOMEM;
1507
1508 hdr[0] = len;
1509 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1510
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001511
1512 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1513 if (ret < 0 || ret != sizeof(hdr)) {
1514 SMD_DBG("%s failed to write pkt header: "
1515 "%d returned\n", __func__, ret);
1516 return -1;
1517 }
1518
1519
1520 ret = smd_stream_write(ch, _data, len, user_buf);
1521 if (ret < 0 || ret != len) {
1522 SMD_DBG("%s failed to write pkt data: "
1523 "%d returned\n", __func__, ret);
1524 return ret;
1525 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001526
1527 return len;
1528}
1529
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001530static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001531{
1532 int r;
1533
1534 if (len < 0)
1535 return -EINVAL;
1536
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001537 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001538 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001539 if (!read_intr_blocked(ch))
1540 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001541
1542 return r;
1543}
1544
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001545static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001546{
1547 unsigned long flags;
1548 int r;
1549
1550 if (len < 0)
1551 return -EINVAL;
1552
1553 if (len > ch->current_packet)
1554 len = ch->current_packet;
1555
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001556 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001557 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001558 if (!read_intr_blocked(ch))
1559 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001560
1561 spin_lock_irqsave(&smd_lock, flags);
1562 ch->current_packet -= r;
1563 update_packet_state(ch);
1564 spin_unlock_irqrestore(&smd_lock, flags);
1565
1566 return r;
1567}
1568
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001569static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1570 int user_buf)
1571{
1572 int r;
1573
1574 if (len < 0)
1575 return -EINVAL;
1576
1577 if (len > ch->current_packet)
1578 len = ch->current_packet;
1579
1580 r = ch_read(ch, data, len, user_buf);
1581 if (r > 0)
1582 if (!read_intr_blocked(ch))
1583 ch->notify_other_cpu();
1584
1585 ch->current_packet -= r;
1586 update_packet_state(ch);
1587
1588 return r;
1589}
1590
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301591#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001592static int smd_alloc_v2(struct smd_channel *ch)
1593{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001594 void *buffer;
1595 unsigned buffer_sz;
1596
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001597 if (is_word_access_ch(ch->type)) {
1598 struct smd_shared_v2_word_access *shared2;
1599 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1600 sizeof(*shared2));
1601 if (!shared2) {
1602 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1603 return -EINVAL;
1604 }
1605 ch->send = &shared2->ch0;
1606 ch->recv = &shared2->ch1;
1607 } else {
1608 struct smd_shared_v2 *shared2;
1609 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1610 sizeof(*shared2));
1611 if (!shared2) {
1612 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1613 return -EINVAL;
1614 }
1615 ch->send = &shared2->ch0;
1616 ch->recv = &shared2->ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001617 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001618 ch->half_ch = get_half_ch_funcs(ch->type);
1619
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001620 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1621 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301622 SMD_INFO("smem_get_entry failed\n");
1623 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001624 }
1625
1626 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301627 if (buffer_sz & (buffer_sz - 1)) {
1628 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1629 return -EINVAL;
1630 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001631 buffer_sz /= 2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001632 ch->send_data = buffer;
1633 ch->recv_data = buffer + buffer_sz;
1634 ch->fifo_size = buffer_sz;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001635
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001636 return 0;
1637}
1638
1639static int smd_alloc_v1(struct smd_channel *ch)
1640{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301641 return -EINVAL;
1642}
1643
1644#else /* define v1 for older targets */
1645static int smd_alloc_v2(struct smd_channel *ch)
1646{
1647 return -EINVAL;
1648}
1649
1650static int smd_alloc_v1(struct smd_channel *ch)
1651{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001652 struct smd_shared_v1 *shared1;
1653 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1654 if (!shared1) {
1655 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301656 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001657 }
1658 ch->send = &shared1->ch0;
1659 ch->recv = &shared1->ch1;
1660 ch->send_data = shared1->data0;
1661 ch->recv_data = shared1->data1;
1662 ch->fifo_size = SMD_BUF_SIZE;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001663 ch->half_ch = get_half_ch_funcs(ch->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001664 return 0;
1665}
1666
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301667#endif
1668
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001669static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001670{
1671 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001672
1673 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1674 if (ch == 0) {
1675 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001676 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001677 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001678 ch->n = alloc_elm->cid;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001679 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001680
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001681 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001682 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001683 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001684 }
1685
1686 ch->fifo_mask = ch->fifo_size - 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001687
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001688 /* probe_worker guarentees ch->type will be a valid type */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001689 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001690 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001691 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001692 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001693 else if (ch->type == SMD_APPS_DSPS)
1694 ch->notify_other_cpu = notify_dsps_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001695 else if (ch->type == SMD_APPS_WCNSS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001696 ch->notify_other_cpu = notify_wcnss_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001697 else if (ch->type == SMD_APPS_RPM)
1698 ch->notify_other_cpu = notify_rpm_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001699
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001700 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001701 ch->read = smd_packet_read;
1702 ch->write = smd_packet_write;
1703 ch->read_avail = smd_packet_read_avail;
1704 ch->write_avail = smd_packet_write_avail;
1705 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001706 ch->read_from_cb = smd_packet_read_from_cb;
1707 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001708 } else {
1709 ch->read = smd_stream_read;
1710 ch->write = smd_stream_write;
1711 ch->read_avail = smd_stream_read_avail;
1712 ch->write_avail = smd_stream_write_avail;
1713 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001714 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001715 }
1716
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001717 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1718 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001719
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001720 ch->pdev.name = ch->name;
1721 ch->pdev.id = ch->type;
1722
1723 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1724 ch->name, ch->n);
1725
1726 mutex_lock(&smd_creation_mutex);
1727 list_add(&ch->ch_list, &smd_ch_closed_list);
1728 mutex_unlock(&smd_creation_mutex);
1729
1730 platform_device_register(&ch->pdev);
1731 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1732 /* create a platform driver to be used by smd_tty driver
1733 * so that it can access the loopback port
1734 */
1735 loopback_tty_pdev.id = ch->type;
1736 platform_device_register(&loopback_tty_pdev);
1737 }
1738 return 0;
1739}
1740
1741static inline void notify_loopback_smd(void)
1742{
1743 unsigned long flags;
1744 struct smd_channel *ch;
1745
1746 spin_lock_irqsave(&smd_lock, flags);
1747 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1748 ch->notify(ch->priv, SMD_EVENT_DATA);
1749 }
1750 spin_unlock_irqrestore(&smd_lock, flags);
1751}
1752
1753static int smd_alloc_loopback_channel(void)
1754{
1755 static struct smd_half_channel smd_loopback_ctl;
1756 static char smd_loopback_data[SMD_BUF_SIZE];
1757 struct smd_channel *ch;
1758
1759 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1760 if (ch == 0) {
1761 pr_err("%s: out of memory\n", __func__);
1762 return -1;
1763 }
1764 ch->n = SMD_LOOPBACK_CID;
1765
1766 ch->send = &smd_loopback_ctl;
1767 ch->recv = &smd_loopback_ctl;
1768 ch->send_data = smd_loopback_data;
1769 ch->recv_data = smd_loopback_data;
1770 ch->fifo_size = SMD_BUF_SIZE;
1771
1772 ch->fifo_mask = ch->fifo_size - 1;
1773 ch->type = SMD_LOOPBACK_TYPE;
1774 ch->notify_other_cpu = notify_loopback_smd;
1775
1776 ch->read = smd_stream_read;
1777 ch->write = smd_stream_write;
1778 ch->read_avail = smd_stream_read_avail;
1779 ch->write_avail = smd_stream_write_avail;
1780 ch->update_state = update_stream_state;
1781 ch->read_from_cb = smd_stream_read;
1782
1783 memset(ch->name, 0, 20);
1784 memcpy(ch->name, "local_loopback", 14);
1785
1786 ch->pdev.name = ch->name;
1787 ch->pdev.id = ch->type;
1788
1789 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001790
1791 mutex_lock(&smd_creation_mutex);
1792 list_add(&ch->ch_list, &smd_ch_closed_list);
1793 mutex_unlock(&smd_creation_mutex);
1794
1795 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001796 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001797}
1798
1799static void do_nothing_notify(void *priv, unsigned flags)
1800{
1801}
1802
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001803static void finalize_channel_close_fn(struct work_struct *work)
1804{
1805 unsigned long flags;
1806 struct smd_channel *ch;
1807 struct smd_channel *index;
1808
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001809 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001810 spin_lock_irqsave(&smd_lock, flags);
1811 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1812 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001813 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001814 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1815 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001816 }
1817 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001818 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001819}
1820
1821struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001822{
1823 struct smd_channel *ch;
1824
1825 mutex_lock(&smd_creation_mutex);
1826 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001827 if (!strcmp(name, ch->name) &&
1828 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001829 list_del(&ch->ch_list);
1830 mutex_unlock(&smd_creation_mutex);
1831 return ch;
1832 }
1833 }
1834 mutex_unlock(&smd_creation_mutex);
1835
1836 return NULL;
1837}
1838
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001839int smd_named_open_on_edge(const char *name, uint32_t edge,
1840 smd_channel_t **_ch,
1841 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001842{
1843 struct smd_channel *ch;
1844 unsigned long flags;
1845
1846 if (smd_initialized == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001847 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001848 return -ENODEV;
1849 }
1850
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001851 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1852
1853 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001854 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001855 /* check closing list for port */
1856 spin_lock_irqsave(&smd_lock, flags);
1857 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1858 if (!strncmp(name, ch->name, 20) &&
1859 (edge == ch->type)) {
1860 /* channel exists, but is being closed */
1861 spin_unlock_irqrestore(&smd_lock, flags);
1862 return -EAGAIN;
1863 }
1864 }
1865
1866 /* check closing workqueue list for port */
1867 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1868 if (!strncmp(name, ch->name, 20) &&
1869 (edge == ch->type)) {
1870 /* channel exists, but is being closed */
1871 spin_unlock_irqrestore(&smd_lock, flags);
1872 return -EAGAIN;
1873 }
1874 }
1875 spin_unlock_irqrestore(&smd_lock, flags);
1876
1877 /* one final check to handle closing->closed race condition */
1878 ch = smd_get_channel(name, edge);
1879 if (!ch)
1880 return -ENODEV;
1881 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001882
1883 if (notify == 0)
1884 notify = do_nothing_notify;
1885
1886 ch->notify = notify;
1887 ch->current_packet = 0;
1888 ch->last_state = SMD_SS_CLOSED;
1889 ch->priv = priv;
1890
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001891 if (edge == SMD_LOOPBACK_TYPE) {
1892 ch->last_state = SMD_SS_OPENED;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001893 ch->half_ch->set_state(ch->send, SMD_SS_OPENED);
1894 ch->half_ch->set_fDSR(ch->send, 1);
1895 ch->half_ch->set_fCTS(ch->send, 1);
1896 ch->half_ch->set_fCD(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001897 }
1898
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001899 *_ch = ch;
1900
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001901 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1902
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001903 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001904 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001905 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001906 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001907 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001908 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1909 list_add(&ch->ch_list, &smd_ch_list_dsps);
1910 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1911 list_add(&ch->ch_list, &smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001912 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_RPM)
1913 list_add(&ch->ch_list, &smd_ch_list_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001914 else
1915 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001916
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001917 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1918
1919 if (edge != SMD_LOOPBACK_TYPE)
1920 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1921
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001922 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001923
1924 return 0;
1925}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001926EXPORT_SYMBOL(smd_named_open_on_edge);
1927
1928
1929int smd_open(const char *name, smd_channel_t **_ch,
1930 void *priv, void (*notify)(void *, unsigned))
1931{
1932 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
1933 notify);
1934}
1935EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001936
1937int smd_close(smd_channel_t *ch)
1938{
1939 unsigned long flags;
1940
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001941 if (ch == 0)
1942 return -1;
1943
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001944 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001945
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001946 spin_lock_irqsave(&smd_lock, flags);
1947 list_del(&ch->ch_list);
1948 if (ch->n == SMD_LOOPBACK_CID) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001949 ch->half_ch->set_fDSR(ch->send, 0);
1950 ch->half_ch->set_fCTS(ch->send, 0);
1951 ch->half_ch->set_fCD(ch->send, 0);
1952 ch->half_ch->set_state(ch->send, SMD_SS_CLOSED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001953 } else
1954 ch_set_state(ch, SMD_SS_CLOSED);
1955
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001956 if (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001957 list_add(&ch->ch_list, &smd_ch_closing_list);
1958 spin_unlock_irqrestore(&smd_lock, flags);
1959 } else {
1960 spin_unlock_irqrestore(&smd_lock, flags);
1961 ch->notify = do_nothing_notify;
1962 mutex_lock(&smd_creation_mutex);
1963 list_add(&ch->ch_list, &smd_ch_closed_list);
1964 mutex_unlock(&smd_creation_mutex);
1965 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001966
1967 return 0;
1968}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001969EXPORT_SYMBOL(smd_close);
1970
1971int smd_write_start(smd_channel_t *ch, int len)
1972{
1973 int ret;
1974 unsigned hdr[5];
1975
1976 if (!ch) {
1977 pr_err("%s: Invalid channel specified\n", __func__);
1978 return -ENODEV;
1979 }
1980 if (!ch->is_pkt_ch) {
1981 pr_err("%s: non-packet channel specified\n", __func__);
1982 return -EACCES;
1983 }
1984 if (len < 1) {
1985 pr_err("%s: invalid length: %d\n", __func__, len);
1986 return -EINVAL;
1987 }
1988
1989 if (ch->pending_pkt_sz) {
1990 pr_err("%s: packet of size: %d in progress\n", __func__,
1991 ch->pending_pkt_sz);
1992 return -EBUSY;
1993 }
1994 ch->pending_pkt_sz = len;
1995
1996 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
1997 ch->pending_pkt_sz = 0;
1998 SMD_DBG("%s: no space to write packet header\n", __func__);
1999 return -EAGAIN;
2000 }
2001
2002 hdr[0] = len;
2003 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
2004
2005
2006 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
2007 if (ret < 0 || ret != sizeof(hdr)) {
2008 ch->pending_pkt_sz = 0;
2009 pr_err("%s: packet header failed to write\n", __func__);
2010 return -EPERM;
2011 }
2012 return 0;
2013}
2014EXPORT_SYMBOL(smd_write_start);
2015
2016int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
2017{
2018 int bytes_written;
2019
2020 if (!ch) {
2021 pr_err("%s: Invalid channel specified\n", __func__);
2022 return -ENODEV;
2023 }
2024 if (len < 1) {
2025 pr_err("%s: invalid length: %d\n", __func__, len);
2026 return -EINVAL;
2027 }
2028
2029 if (!ch->pending_pkt_sz) {
2030 pr_err("%s: no transaction in progress\n", __func__);
2031 return -ENOEXEC;
2032 }
2033 if (ch->pending_pkt_sz - len < 0) {
2034 pr_err("%s: segment of size: %d will make packet go over "
2035 "length\n", __func__, len);
2036 return -EINVAL;
2037 }
2038
2039 bytes_written = smd_stream_write(ch, data, len, user_buf);
2040
2041 ch->pending_pkt_sz -= bytes_written;
2042
2043 return bytes_written;
2044}
2045EXPORT_SYMBOL(smd_write_segment);
2046
2047int smd_write_end(smd_channel_t *ch)
2048{
2049
2050 if (!ch) {
2051 pr_err("%s: Invalid channel specified\n", __func__);
2052 return -ENODEV;
2053 }
2054 if (ch->pending_pkt_sz) {
2055 pr_err("%s: current packet not completely written\n", __func__);
2056 return -E2BIG;
2057 }
2058
2059 return 0;
2060}
2061EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002062
2063int smd_read(smd_channel_t *ch, void *data, int len)
2064{
Jack Pham1b236d12012-03-19 15:27:18 -07002065 if (!ch) {
2066 pr_err("%s: Invalid channel specified\n", __func__);
2067 return -ENODEV;
2068 }
2069
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002070 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002071}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002072EXPORT_SYMBOL(smd_read);
2073
2074int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
2075{
Jack Pham1b236d12012-03-19 15:27:18 -07002076 if (!ch) {
2077 pr_err("%s: Invalid channel specified\n", __func__);
2078 return -ENODEV;
2079 }
2080
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002081 return ch->read(ch, data, len, 1);
2082}
2083EXPORT_SYMBOL(smd_read_user_buffer);
2084
2085int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
2086{
Jack Pham1b236d12012-03-19 15:27:18 -07002087 if (!ch) {
2088 pr_err("%s: Invalid channel specified\n", __func__);
2089 return -ENODEV;
2090 }
2091
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002092 return ch->read_from_cb(ch, data, len, 0);
2093}
2094EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002095
2096int smd_write(smd_channel_t *ch, const void *data, int len)
2097{
Jack Pham1b236d12012-03-19 15:27:18 -07002098 if (!ch) {
2099 pr_err("%s: Invalid channel specified\n", __func__);
2100 return -ENODEV;
2101 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002102
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002103 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002104}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002105EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002106
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002107int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08002108{
Jack Pham1b236d12012-03-19 15:27:18 -07002109 if (!ch) {
2110 pr_err("%s: Invalid channel specified\n", __func__);
2111 return -ENODEV;
2112 }
2113
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002114 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002115}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002116EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002117
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002118int smd_read_avail(smd_channel_t *ch)
2119{
Jack Pham1b236d12012-03-19 15:27:18 -07002120 if (!ch) {
2121 pr_err("%s: Invalid channel specified\n", __func__);
2122 return -ENODEV;
2123 }
2124
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002125 return ch->read_avail(ch);
2126}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002127EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002128
2129int smd_write_avail(smd_channel_t *ch)
2130{
Jack Pham1b236d12012-03-19 15:27:18 -07002131 if (!ch) {
2132 pr_err("%s: Invalid channel specified\n", __func__);
2133 return -ENODEV;
2134 }
2135
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002136 return ch->write_avail(ch);
2137}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002138EXPORT_SYMBOL(smd_write_avail);
2139
2140void smd_enable_read_intr(smd_channel_t *ch)
2141{
2142 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002143 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002144}
2145EXPORT_SYMBOL(smd_enable_read_intr);
2146
2147void smd_disable_read_intr(smd_channel_t *ch)
2148{
2149 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002150 ch->half_ch->set_fBLOCKREADINTR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002151}
2152EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002153
Eric Holmbergdeace152012-07-25 12:17:11 -06002154/**
2155 * Enable/disable receive interrupts for the remote processor used by a
2156 * particular channel.
2157 * @ch: open channel handle to use for the edge
2158 * @mask: 1 = mask interrupts; 0 = unmask interrupts
2159 * @returns: 0 for success; < 0 for failure
2160 *
2161 * Note that this enables/disables all interrupts from the remote subsystem for
2162 * all channels. As such, it should be used with care and only for specific
2163 * use cases such as power-collapse sequencing.
2164 */
2165int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask)
2166{
2167 struct irq_chip *irq_chip;
2168 struct irq_data *irq_data;
2169 struct interrupt_config_item *int_cfg;
2170
2171 if (!ch)
2172 return -EINVAL;
2173
2174 if (ch->type >= ARRAY_SIZE(edge_to_pids))
2175 return -ENODEV;
2176
2177 int_cfg = &private_intr_config[edge_to_pids[ch->type].remote_pid].smd;
2178
2179 if (int_cfg->irq_id < 0)
2180 return -ENODEV;
2181
2182 irq_chip = irq_get_chip(int_cfg->irq_id);
2183 if (!irq_chip)
2184 return -ENODEV;
2185
2186 irq_data = irq_get_irq_data(int_cfg->irq_id);
2187 if (!irq_data)
2188 return -ENODEV;
2189
2190 if (mask) {
2191 SMx_POWER_INFO("SMD Masking interrupts from %s\n",
2192 edge_to_pids[ch->type].subsys_name);
2193 irq_chip->irq_mask(irq_data);
2194 } else {
2195 SMx_POWER_INFO("SMD Unmasking interrupts from %s\n",
2196 edge_to_pids[ch->type].subsys_name);
2197 irq_chip->irq_unmask(irq_data);
2198 }
2199
2200 return 0;
2201}
2202EXPORT_SYMBOL(smd_mask_receive_interrupt);
2203
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002204int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2205{
2206 return -1;
2207}
2208
2209int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2210{
2211 return -1;
2212}
2213
2214int smd_cur_packet_size(smd_channel_t *ch)
2215{
Jack Pham1b236d12012-03-19 15:27:18 -07002216 if (!ch) {
2217 pr_err("%s: Invalid channel specified\n", __func__);
2218 return -ENODEV;
2219 }
2220
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002221 return ch->current_packet;
2222}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002223EXPORT_SYMBOL(smd_cur_packet_size);
2224
2225int smd_tiocmget(smd_channel_t *ch)
2226{
Jack Pham1b236d12012-03-19 15:27:18 -07002227 if (!ch) {
2228 pr_err("%s: Invalid channel specified\n", __func__);
2229 return -ENODEV;
2230 }
2231
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002232 return (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) |
2233 (ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) |
2234 (ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) |
2235 (ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) |
2236 (ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) |
2237 (ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002238}
2239EXPORT_SYMBOL(smd_tiocmget);
2240
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002241/* this api will be called while holding smd_lock */
2242int
2243smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002244{
Jack Pham1b236d12012-03-19 15:27:18 -07002245 if (!ch) {
2246 pr_err("%s: Invalid channel specified\n", __func__);
2247 return -ENODEV;
2248 }
2249
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002250 if (set & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002251 ch->half_ch->set_fDSR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002252
2253 if (set & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002254 ch->half_ch->set_fCTS(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002255
2256 if (clear & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002257 ch->half_ch->set_fDSR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002258
2259 if (clear & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002260 ch->half_ch->set_fCTS(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002261
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002262 ch->half_ch->set_fSTATE(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002263 barrier();
2264 ch->notify_other_cpu();
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002265
2266 return 0;
2267}
2268EXPORT_SYMBOL(smd_tiocmset_from_cb);
2269
2270int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2271{
2272 unsigned long flags;
2273
Jack Pham1b236d12012-03-19 15:27:18 -07002274 if (!ch) {
2275 pr_err("%s: Invalid channel specified\n", __func__);
2276 return -ENODEV;
2277 }
2278
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002279 spin_lock_irqsave(&smd_lock, flags);
2280 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002281 spin_unlock_irqrestore(&smd_lock, flags);
2282
2283 return 0;
2284}
2285EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002286
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002287int smd_is_pkt_avail(smd_channel_t *ch)
2288{
Jeff Hugoa8549f12012-08-13 20:36:18 -06002289 unsigned long flags;
2290
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002291 if (!ch || !ch->is_pkt_ch)
2292 return -EINVAL;
2293
2294 if (ch->current_packet)
2295 return 1;
2296
Jeff Hugoa8549f12012-08-13 20:36:18 -06002297 spin_lock_irqsave(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002298 update_packet_state(ch);
Jeff Hugoa8549f12012-08-13 20:36:18 -06002299 spin_unlock_irqrestore(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002300
2301 return ch->current_packet ? 1 : 0;
2302}
2303EXPORT_SYMBOL(smd_is_pkt_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002304
2305
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002306/* -------------------------------------------------------------------------- */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002307
Jeff Hugobdc734d2012-03-26 16:05:39 -06002308/*
2309 * Shared Memory Range Check
2310 *
2311 * Takes a physical address and an offset and checks if the resulting physical
2312 * address would fit into one of the aux smem regions. If so, returns the
2313 * corresponding virtual address. Otherwise returns NULL. Expects the array
2314 * of smem regions to be in ascending physical address order.
2315 *
2316 * @base: physical base address to check
2317 * @offset: offset from the base to get the final address
2318 */
2319static void *smem_range_check(void *base, unsigned offset)
2320{
2321 int i;
2322 void *phys_addr;
2323 unsigned size;
2324
2325 for (i = 0; i < num_smem_areas; ++i) {
2326 phys_addr = smem_areas[i].phys_addr;
2327 size = smem_areas[i].size;
2328 if (base < phys_addr)
2329 return NULL;
2330 if (base > phys_addr + size)
2331 continue;
2332 if (base >= phys_addr && base + offset < phys_addr + size)
2333 return smem_areas[i].virt_addr + offset;
2334 }
2335
2336 return NULL;
2337}
2338
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002339/* smem_alloc returns the pointer to smem item if it is already allocated.
2340 * Otherwise, it returns NULL.
2341 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002342void *smem_alloc(unsigned id, unsigned size)
2343{
2344 return smem_find(id, size);
2345}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002346EXPORT_SYMBOL(smem_alloc);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002347
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002348/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
2349 * it allocates it and then returns the pointer to it.
2350 */
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302351void *smem_alloc2(unsigned id, unsigned size_in)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002352{
2353 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2354 struct smem_heap_entry *toc = shared->heap_toc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002355 unsigned long flags;
2356 void *ret = NULL;
2357
2358 if (!shared->heap_info.initialized) {
2359 pr_err("%s: smem heap info not initialized\n", __func__);
2360 return NULL;
2361 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002362
2363 if (id >= SMEM_NUM_ITEMS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002364 return NULL;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002365
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002366 size_in = ALIGN(size_in, 8);
2367 remote_spin_lock_irqsave(&remote_spinlock, flags);
2368 if (toc[id].allocated) {
2369 SMD_DBG("%s: %u already allocated\n", __func__, id);
2370 if (size_in != toc[id].size)
2371 pr_err("%s: wrong size %u (expected %u)\n",
2372 __func__, toc[id].size, size_in);
2373 else
2374 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2375 } else if (id > SMEM_FIXED_ITEM_LAST) {
2376 SMD_DBG("%s: allocating %u\n", __func__, id);
2377 if (shared->heap_info.heap_remaining >= size_in) {
2378 toc[id].offset = shared->heap_info.free_offset;
2379 toc[id].size = size_in;
2380 wmb();
2381 toc[id].allocated = 1;
2382
2383 shared->heap_info.free_offset += size_in;
2384 shared->heap_info.heap_remaining -= size_in;
2385 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2386 } else
2387 pr_err("%s: not enough memory %u (required %u)\n",
2388 __func__, shared->heap_info.heap_remaining,
2389 size_in);
2390 }
2391 wmb();
2392 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
2393 return ret;
2394}
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302395EXPORT_SYMBOL(smem_alloc2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002396
2397void *smem_get_entry(unsigned id, unsigned *size)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002398{
2399 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2400 struct smem_heap_entry *toc = shared->heap_toc;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302401 int use_spinlocks = spinlocks_initialized;
2402 void *ret = 0;
2403 unsigned long flags = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002404
2405 if (id >= SMEM_NUM_ITEMS)
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302406 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002407
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302408 if (use_spinlocks)
2409 remote_spin_lock_irqsave(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002410 /* toc is in device memory and cannot be speculatively accessed */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002411 if (toc[id].allocated) {
2412 *size = toc[id].size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002413 barrier();
Jeff Hugobdc734d2012-03-26 16:05:39 -06002414 if (!(toc[id].reserved & BASE_ADDR_MASK))
2415 ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
2416 else
2417 ret = smem_range_check(
2418 (void *)(toc[id].reserved & BASE_ADDR_MASK),
2419 toc[id].offset);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002420 } else {
2421 *size = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002422 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302423 if (use_spinlocks)
2424 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002425
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302426 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002427}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002428EXPORT_SYMBOL(smem_get_entry);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002429
2430void *smem_find(unsigned id, unsigned size_in)
2431{
2432 unsigned size;
2433 void *ptr;
2434
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002435 ptr = smem_get_entry(id, &size);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002436 if (!ptr)
2437 return 0;
2438
2439 size_in = ALIGN(size_in, 8);
2440 if (size_in != size) {
2441 pr_err("smem_find(%d, %d): wrong size %d\n",
2442 id, size_in, size);
2443 return 0;
2444 }
2445
2446 return ptr;
2447}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002448EXPORT_SYMBOL(smem_find);
2449
2450static int smsm_cb_init(void)
2451{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002452 struct smsm_state_info *state_info;
2453 int n;
2454 int ret = 0;
2455
2456 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2457 GFP_KERNEL);
2458
2459 if (!smsm_states) {
2460 pr_err("%s: SMSM init failed\n", __func__);
2461 return -ENOMEM;
2462 }
2463
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002464 smsm_cb_wq = create_singlethread_workqueue("smsm_cb_wq");
2465 if (!smsm_cb_wq) {
2466 pr_err("%s: smsm_cb_wq creation failed\n", __func__);
2467 kfree(smsm_states);
2468 return -EFAULT;
2469 }
2470
Eric Holmbergc8002902011-09-16 13:55:57 -06002471 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002472 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2473 state_info = &smsm_states[n];
2474 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
Eric Holmberge8a39322012-04-03 15:14:02 -06002475 state_info->intr_mask_set = 0x0;
2476 state_info->intr_mask_clear = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002477 INIT_LIST_HEAD(&state_info->callbacks);
2478 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002479 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002480
2481 return ret;
2482}
2483
2484static int smsm_init(void)
2485{
2486 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2487 int i;
2488 struct smsm_size_info_type *smsm_size_info;
2489
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002490 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
2491 sizeof(struct smsm_size_info_type));
2492 if (smsm_size_info) {
2493 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2494 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2495 }
2496
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002497 i = kfifo_alloc(&smsm_snapshot_fifo,
2498 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2499 GFP_KERNEL);
2500 if (i) {
2501 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2502 return i;
2503 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002504 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2505 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002506
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002507 if (!smsm_info.state) {
2508 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
2509 SMSM_NUM_ENTRIES *
2510 sizeof(uint32_t));
2511
2512 if (smsm_info.state) {
2513 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2514 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2515 __raw_writel(0, \
2516 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2517 }
2518 }
2519
2520 if (!smsm_info.intr_mask) {
2521 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
2522 SMSM_NUM_ENTRIES *
2523 SMSM_NUM_HOSTS *
2524 sizeof(uint32_t));
2525
Eric Holmberge8a39322012-04-03 15:14:02 -06002526 if (smsm_info.intr_mask) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002527 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
Eric Holmberge8a39322012-04-03 15:14:02 -06002528 __raw_writel(0x0,
2529 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2530
2531 /* Configure legacy modem bits */
2532 __raw_writel(LEGACY_MODEM_SMSM_MASK,
2533 SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
2534 SMSM_APPS));
2535 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002536 }
2537
2538 if (!smsm_info.intr_mux)
2539 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
2540 SMSM_NUM_INTR_MUX *
2541 sizeof(uint32_t));
2542
2543 i = smsm_cb_init();
2544 if (i)
2545 return i;
2546
2547 wmb();
2548 return 0;
2549}
2550
2551void smsm_reset_modem(unsigned mode)
2552{
2553 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2554 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2555 } else if (mode == SMSM_MODEM_WAIT) {
2556 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2557 } else { /* reset_mode is SMSM_RESET or default */
2558 mode = SMSM_RESET;
2559 }
2560
2561 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2562}
2563EXPORT_SYMBOL(smsm_reset_modem);
2564
2565void smsm_reset_modem_cont(void)
2566{
2567 unsigned long flags;
2568 uint32_t state;
2569
2570 if (!smsm_info.state)
2571 return;
2572
2573 spin_lock_irqsave(&smem_lock, flags);
2574 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2575 & ~SMSM_MODEM_WAIT;
2576 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2577 wmb();
2578 spin_unlock_irqrestore(&smem_lock, flags);
2579}
2580EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002581
Eric Holmbergda31d042012-03-28 14:01:02 -06002582static void smsm_cb_snapshot(uint32_t use_wakelock)
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002583{
2584 int n;
2585 uint32_t new_state;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002586 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002587 int ret;
2588
2589 ret = kfifo_avail(&smsm_snapshot_fifo);
Eric Holmbergda31d042012-03-28 14:01:02 -06002590 if (ret < SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002591 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2592 return;
2593 }
2594
Eric Holmberg96b55f62012-04-03 19:10:46 -06002595 /*
2596 * To avoid a race condition with notify_smsm_cb_clients_worker, the
2597 * following sequence must be followed:
2598 * 1) increment snapshot count
2599 * 2) insert data into FIFO
2600 *
2601 * Potentially in parallel, the worker:
2602 * a) verifies >= 1 snapshots are in FIFO
2603 * b) processes snapshot
2604 * c) decrements reference count
2605 *
2606 * This order ensures that 1 will always occur before abc.
2607 */
Eric Holmbergda31d042012-03-28 14:01:02 -06002608 if (use_wakelock) {
2609 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2610 if (smsm_snapshot_count == 0) {
2611 SMx_POWER_INFO("SMSM snapshot wake lock\n");
2612 wake_lock(&smsm_snapshot_wakelock);
2613 }
2614 ++smsm_snapshot_count;
2615 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2616 }
Eric Holmberg96b55f62012-04-03 19:10:46 -06002617
2618 /* queue state entries */
2619 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2620 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2621
2622 ret = kfifo_in(&smsm_snapshot_fifo,
2623 &new_state, sizeof(new_state));
2624 if (ret != sizeof(new_state)) {
2625 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2626 goto restore_snapshot_count;
2627 }
2628 }
2629
2630 /* queue wakelock usage flag */
2631 ret = kfifo_in(&smsm_snapshot_fifo,
2632 &use_wakelock, sizeof(use_wakelock));
2633 if (ret != sizeof(use_wakelock)) {
2634 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2635 goto restore_snapshot_count;
2636 }
2637
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002638 queue_work(smsm_cb_wq, &smsm_cb_work);
Eric Holmberg96b55f62012-04-03 19:10:46 -06002639 return;
2640
2641restore_snapshot_count:
2642 if (use_wakelock) {
2643 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2644 if (smsm_snapshot_count) {
2645 --smsm_snapshot_count;
2646 if (smsm_snapshot_count == 0) {
2647 SMx_POWER_INFO("SMSM snapshot wake unlock\n");
2648 wake_unlock(&smsm_snapshot_wakelock);
2649 }
2650 } else {
2651 pr_err("%s: invalid snapshot count\n", __func__);
2652 }
2653 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2654 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002655}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002656
2657static irqreturn_t smsm_irq_handler(int irq, void *data)
2658{
2659 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002660
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002661 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002662 uint32_t mux_val;
2663 static uint32_t prev_smem_q6_apps_smsm;
2664
2665 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2666 mux_val = __raw_readl(
2667 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2668 if (mux_val != prev_smem_q6_apps_smsm)
2669 prev_smem_q6_apps_smsm = mux_val;
2670 }
2671
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002672 spin_lock_irqsave(&smem_lock, flags);
Eric Holmbergda31d042012-03-28 14:01:02 -06002673 smsm_cb_snapshot(1);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002674 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002675 return IRQ_HANDLED;
2676 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002677
2678 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002679 if (!smsm_info.state) {
2680 SMSM_INFO("<SM NO STATE>\n");
2681 } else {
2682 unsigned old_apps, apps;
2683 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002684
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002685 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002686
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002687 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2688 if (apps & SMSM_RESET) {
2689 /* If we get an interrupt and the apps SMSM_RESET
2690 bit is already set, the modem is acking the
2691 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002692 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302693 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002694 /* Issue a fake irq to handle any
2695 * smd state changes during reset
2696 */
2697 smd_fake_irq_handler(0);
Daniel Walker79848a22010-03-16 15:20:07 -07002698
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002699 /* queue modem restart notify chain */
2700 modem_queue_start_reset_notify();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002701
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002702 } else if (modm & SMSM_RESET) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002703 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
Ram Somani8b9589f2012-04-03 12:07:18 +05302704 if (!disable_smsm_reset_handshake) {
2705 apps |= SMSM_RESET;
2706 flush_cache_all();
2707 outer_flush_all();
2708 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002709 modem_queue_start_reset_notify();
2710
2711 } else if (modm & SMSM_INIT) {
2712 if (!(apps & SMSM_INIT)) {
2713 apps |= SMSM_INIT;
2714 modem_queue_smsm_init_notify();
2715 }
2716
2717 if (modm & SMSM_SMDINIT)
2718 apps |= SMSM_SMDINIT;
2719 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2720 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2721 apps |= SMSM_RUN;
2722 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2723 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2724 modem_queue_start_reset_notify();
2725 }
2726
2727 if (old_apps != apps) {
2728 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2729 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2730 do_smd_probe();
2731 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2732 }
2733
Eric Holmbergda31d042012-03-28 14:01:02 -06002734 smsm_cb_snapshot(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002735 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002736 spin_unlock_irqrestore(&smem_lock, flags);
2737 return IRQ_HANDLED;
2738}
2739
Eric Holmberg98c6c642012-02-24 11:29:35 -07002740static irqreturn_t smsm_modem_irq_handler(int irq, void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002741{
Eric Holmberg98c6c642012-02-24 11:29:35 -07002742 SMx_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002743 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002744 return smsm_irq_handler(irq, data);
2745}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002746
Eric Holmberg98c6c642012-02-24 11:29:35 -07002747static irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
2748{
2749 SMx_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002750 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002751 return smsm_irq_handler(irq, data);
2752}
2753
2754static irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
2755{
2756 SMx_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002757 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002758 return smsm_irq_handler(irq, data);
2759}
2760
2761static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
2762{
2763 SMx_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002764 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002765 return smsm_irq_handler(irq, data);
2766}
2767
Eric Holmberge8a39322012-04-03 15:14:02 -06002768/*
2769 * Changes the global interrupt mask. The set and clear masks are re-applied
2770 * every time the global interrupt mask is updated for callback registration
2771 * and de-registration.
2772 *
2773 * The clear mask is applied first, so if a bit is set to 1 in both the clear
2774 * mask and the set mask, the result will be that the interrupt is set.
2775 *
2776 * @smsm_entry SMSM entry to change
2777 * @clear_mask 1 = clear bit, 0 = no-op
2778 * @set_mask 1 = set bit, 0 = no-op
2779 *
2780 * @returns 0 for success, < 0 for error
2781 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002782int smsm_change_intr_mask(uint32_t smsm_entry,
2783 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002784{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002785 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002786 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002787
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002788 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2789 pr_err("smsm_change_state: Invalid entry %d\n",
2790 smsm_entry);
2791 return -EINVAL;
2792 }
2793
2794 if (!smsm_info.intr_mask) {
2795 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002796 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002797 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002798
2799 spin_lock_irqsave(&smem_lock, flags);
Eric Holmberge8a39322012-04-03 15:14:02 -06002800 smsm_states[smsm_entry].intr_mask_clear = clear_mask;
2801 smsm_states[smsm_entry].intr_mask_set = set_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002802
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002803 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2804 new_mask = (old_mask & ~clear_mask) | set_mask;
2805 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002806
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002807 wmb();
2808 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002809
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002810 return 0;
2811}
2812EXPORT_SYMBOL(smsm_change_intr_mask);
2813
2814int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2815{
2816 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2817 pr_err("smsm_change_state: Invalid entry %d\n",
2818 smsm_entry);
2819 return -EINVAL;
2820 }
2821
2822 if (!smsm_info.intr_mask) {
2823 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2824 return -EIO;
2825 }
2826
2827 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2828 return 0;
2829}
2830EXPORT_SYMBOL(smsm_get_intr_mask);
2831
2832int smsm_change_state(uint32_t smsm_entry,
2833 uint32_t clear_mask, uint32_t set_mask)
2834{
2835 unsigned long flags;
2836 uint32_t old_state, new_state;
2837
2838 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2839 pr_err("smsm_change_state: Invalid entry %d",
2840 smsm_entry);
2841 return -EINVAL;
2842 }
2843
2844 if (!smsm_info.state) {
2845 pr_err("smsm_change_state <SM NO STATE>\n");
2846 return -EIO;
2847 }
2848 spin_lock_irqsave(&smem_lock, flags);
2849
2850 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2851 new_state = (old_state & ~clear_mask) | set_mask;
2852 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2853 SMSM_DBG("smsm_change_state %x\n", new_state);
2854 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002855
2856 spin_unlock_irqrestore(&smem_lock, flags);
2857
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002858 return 0;
2859}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002860EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002861
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002862uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002863{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002864 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002865
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002866 /* needs interface change to return error code */
2867 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2868 pr_err("smsm_change_state: Invalid entry %d",
2869 smsm_entry);
2870 return 0;
2871 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002872
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002873 if (!smsm_info.state) {
2874 pr_err("smsm_get_state <SM NO STATE>\n");
2875 } else {
2876 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2877 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002878
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002879 return rv;
2880}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002881EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002882
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002883/**
2884 * Performs SMSM callback client notifiction.
2885 */
2886void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002887{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002888 struct smsm_state_cb_info *cb_info;
2889 struct smsm_state_info *state_info;
2890 int n;
2891 uint32_t new_state;
2892 uint32_t state_changes;
Eric Holmbergda31d042012-03-28 14:01:02 -06002893 uint32_t use_wakelock;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002894 int ret;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002895 unsigned long flags;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002896
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002897 if (!smd_initialized)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002898 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002899
Eric Holmbergda31d042012-03-28 14:01:02 -06002900 while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002901 mutex_lock(&smsm_lock);
2902 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2903 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002904
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002905 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2906 sizeof(new_state));
2907 if (ret != sizeof(new_state)) {
2908 pr_err("%s: snapshot underflow %d\n",
2909 __func__, ret);
2910 mutex_unlock(&smsm_lock);
2911 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002912 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002913
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002914 state_changes = state_info->last_value ^ new_state;
2915 if (state_changes) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07002916 SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n",
2917 n, state_info->last_value,
2918 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002919 list_for_each_entry(cb_info,
2920 &state_info->callbacks, cb_list) {
2921
2922 if (cb_info->mask & state_changes)
2923 cb_info->notify(cb_info->data,
2924 state_info->last_value,
2925 new_state);
2926 }
2927 state_info->last_value = new_state;
2928 }
2929 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002930
Eric Holmbergda31d042012-03-28 14:01:02 -06002931 /* read wakelock flag */
2932 ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock,
2933 sizeof(use_wakelock));
2934 if (ret != sizeof(use_wakelock)) {
2935 pr_err("%s: snapshot underflow %d\n",
2936 __func__, ret);
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002937 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002938 return;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002939 }
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002940 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002941
2942 if (use_wakelock) {
2943 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2944 if (smsm_snapshot_count) {
2945 --smsm_snapshot_count;
2946 if (smsm_snapshot_count == 0) {
2947 SMx_POWER_INFO("SMSM snapshot"
2948 " wake unlock\n");
2949 wake_unlock(&smsm_snapshot_wakelock);
2950 }
2951 } else {
2952 pr_err("%s: invalid snapshot count\n",
2953 __func__);
2954 }
2955 spin_unlock_irqrestore(&smsm_snapshot_count_lock,
2956 flags);
2957 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002958 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002959}
2960
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002961
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002962/**
2963 * Registers callback for SMSM state notifications when the specified
2964 * bits change.
2965 *
2966 * @smsm_entry Processor entry to deregister
2967 * @mask Bits to deregister (if result is 0, callback is removed)
2968 * @notify Notification function to deregister
2969 * @data Opaque data passed in to callback
2970 *
2971 * @returns Status code
2972 * <0 error code
2973 * 0 inserted new entry
2974 * 1 updated mask of existing entry
2975 */
2976int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
2977 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002978{
Eric Holmberge8a39322012-04-03 15:14:02 -06002979 struct smsm_state_info *state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002980 struct smsm_state_cb_info *cb_info;
2981 struct smsm_state_cb_info *cb_found = 0;
Eric Holmberge8a39322012-04-03 15:14:02 -06002982 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002983 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002984
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002985 if (smsm_entry >= SMSM_NUM_ENTRIES)
2986 return -EINVAL;
2987
Eric Holmbergc8002902011-09-16 13:55:57 -06002988 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002989
2990 if (!smsm_states) {
2991 /* smsm not yet initialized */
2992 ret = -ENODEV;
2993 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002994 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002995
Eric Holmberge8a39322012-04-03 15:14:02 -06002996 state = &smsm_states[smsm_entry];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002997 list_for_each_entry(cb_info,
Eric Holmberge8a39322012-04-03 15:14:02 -06002998 &state->callbacks, cb_list) {
2999 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003000 (cb_info->data == data)) {
3001 cb_info->mask |= mask;
3002 cb_found = cb_info;
3003 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003004 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003005 new_mask |= cb_info->mask;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003006 }
3007
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003008 if (!cb_found) {
3009 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
3010 GFP_ATOMIC);
3011 if (!cb_info) {
3012 ret = -ENOMEM;
3013 goto cleanup;
3014 }
3015
3016 cb_info->mask = mask;
3017 cb_info->notify = notify;
3018 cb_info->data = data;
3019 INIT_LIST_HEAD(&cb_info->cb_list);
3020 list_add_tail(&cb_info->cb_list,
Eric Holmberge8a39322012-04-03 15:14:02 -06003021 &state->callbacks);
3022 new_mask |= mask;
3023 }
3024
3025 /* update interrupt notification mask */
3026 if (smsm_entry == SMSM_MODEM_STATE)
3027 new_mask |= LEGACY_MODEM_SMSM_MASK;
3028
3029 if (smsm_info.intr_mask) {
3030 unsigned long flags;
3031
3032 spin_lock_irqsave(&smem_lock, flags);
3033 new_mask = (new_mask & ~state->intr_mask_clear)
3034 | state->intr_mask_set;
3035 __raw_writel(new_mask,
3036 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3037 wmb();
3038 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003039 }
3040
3041cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06003042 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003043 return ret;
3044}
3045EXPORT_SYMBOL(smsm_state_cb_register);
3046
3047
3048/**
3049 * Deregisters for SMSM state notifications for the specified bits.
3050 *
3051 * @smsm_entry Processor entry to deregister
3052 * @mask Bits to deregister (if result is 0, callback is removed)
3053 * @notify Notification function to deregister
3054 * @data Opaque data passed in to callback
3055 *
3056 * @returns Status code
3057 * <0 error code
3058 * 0 not found
3059 * 1 updated mask
3060 * 2 removed callback
3061 */
3062int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
3063 void (*notify)(void *, uint32_t, uint32_t), void *data)
3064{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003065 struct smsm_state_cb_info *cb_info;
Eric Holmberge8a39322012-04-03 15:14:02 -06003066 struct smsm_state_cb_info *cb_tmp;
3067 struct smsm_state_info *state;
3068 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003069 int ret = 0;
3070
3071 if (smsm_entry >= SMSM_NUM_ENTRIES)
3072 return -EINVAL;
3073
Eric Holmbergc8002902011-09-16 13:55:57 -06003074 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003075
3076 if (!smsm_states) {
3077 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06003078 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003079 return -ENODEV;
3080 }
3081
Eric Holmberge8a39322012-04-03 15:14:02 -06003082 state = &smsm_states[smsm_entry];
3083 list_for_each_entry_safe(cb_info, cb_tmp,
3084 &state->callbacks, cb_list) {
3085 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003086 (cb_info->data == data)) {
3087 cb_info->mask &= ~mask;
3088 ret = 1;
3089 if (!cb_info->mask) {
3090 /* no mask bits set, remove callback */
3091 list_del(&cb_info->cb_list);
3092 kfree(cb_info);
3093 ret = 2;
Eric Holmberge8a39322012-04-03 15:14:02 -06003094 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003095 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003096 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003097 new_mask |= cb_info->mask;
3098 }
3099
3100 /* update interrupt notification mask */
3101 if (smsm_entry == SMSM_MODEM_STATE)
3102 new_mask |= LEGACY_MODEM_SMSM_MASK;
3103
3104 if (smsm_info.intr_mask) {
3105 unsigned long flags;
3106
3107 spin_lock_irqsave(&smem_lock, flags);
3108 new_mask = (new_mask & ~state->intr_mask_clear)
3109 | state->intr_mask_set;
3110 __raw_writel(new_mask,
3111 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3112 wmb();
3113 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003114 }
3115
Eric Holmbergc8002902011-09-16 13:55:57 -06003116 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003117 return ret;
3118}
3119EXPORT_SYMBOL(smsm_state_cb_deregister);
3120
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003121int smd_module_init_notifier_register(struct notifier_block *nb)
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003122{
3123 int ret;
3124 if (!nb)
3125 return -EINVAL;
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003126 mutex_lock(&smd_module_init_notifier_lock);
3127 ret = raw_notifier_chain_register(&smd_module_init_notifier_list, nb);
3128 if (smd_module_inited)
3129 nb->notifier_call(nb, 0, NULL);
3130 mutex_unlock(&smd_module_init_notifier_lock);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003131 return ret;
3132}
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003133EXPORT_SYMBOL(smd_module_init_notifier_register);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003134
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003135int smd_module_init_notifier_unregister(struct notifier_block *nb)
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003136{
3137 int ret;
3138 if (!nb)
3139 return -EINVAL;
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003140 mutex_lock(&smd_module_init_notifier_lock);
3141 ret = raw_notifier_chain_unregister(&smd_module_init_notifier_list,
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003142 nb);
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003143 mutex_unlock(&smd_module_init_notifier_lock);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003144 return ret;
3145}
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003146EXPORT_SYMBOL(smd_module_init_notifier_unregister);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003147
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003148static void smd_module_init_notify(uint32_t state, void *data)
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003149{
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003150 mutex_lock(&smd_module_init_notifier_lock);
3151 smd_module_inited = 1;
3152 raw_notifier_call_chain(&smd_module_init_notifier_list,
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003153 state, data);
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003154 mutex_unlock(&smd_module_init_notifier_lock);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003155}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003156
3157int smd_core_init(void)
3158{
3159 int r;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003160 unsigned long flags = IRQF_TRIGGER_RISING;
3161 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003162
Brian Swetland37521a32009-07-01 18:30:47 -07003163 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003164 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003165 if (r < 0)
3166 return r;
3167 r = enable_irq_wake(INT_A9_M2A_0);
3168 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003169 pr_err("smd_core_init: "
3170 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003171
Eric Holmberg98c6c642012-02-24 11:29:35 -07003172 r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003173 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003174 if (r < 0) {
3175 free_irq(INT_A9_M2A_0, 0);
3176 return r;
3177 }
3178 r = enable_irq_wake(INT_A9_M2A_5);
3179 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003180 pr_err("smd_core_init: "
3181 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003182
Brian Swetland37521a32009-07-01 18:30:47 -07003183#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003184#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
3185 flags |= IRQF_SHARED;
3186#endif
Brian Swetland37521a32009-07-01 18:30:47 -07003187 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003188 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07003189 if (r < 0) {
3190 free_irq(INT_A9_M2A_0, 0);
3191 free_irq(INT_A9_M2A_5, 0);
3192 return r;
3193 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003194
Eric Holmberg98c6c642012-02-24 11:29:35 -07003195 r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler,
3196 flags, "smsm_dev", smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003197 if (r < 0) {
3198 free_irq(INT_A9_M2A_0, 0);
3199 free_irq(INT_A9_M2A_5, 0);
3200 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
3201 return r;
3202 }
3203
3204 r = enable_irq_wake(INT_ADSP_A11);
3205 if (r < 0)
3206 pr_err("smd_core_init: "
3207 "enable_irq_wake failed for INT_ADSP_A11\n");
3208
3209#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
3210 r = enable_irq_wake(INT_ADSP_A11_SMSM);
3211 if (r < 0)
3212 pr_err("smd_core_init: enable_irq_wake "
3213 "failed for INT_ADSP_A11_SMSM\n");
3214#endif
3215 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07003216#endif
3217
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003218#if defined(CONFIG_DSPS)
3219 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
3220 flags, "smd_dev", smd_dsps_irq_handler);
3221 if (r < 0) {
3222 free_irq(INT_A9_M2A_0, 0);
3223 free_irq(INT_A9_M2A_5, 0);
3224 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003225 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003226 return r;
3227 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003228
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003229 r = enable_irq_wake(INT_DSPS_A11);
3230 if (r < 0)
3231 pr_err("smd_core_init: "
3232 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003233#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003234
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003235#if defined(CONFIG_WCNSS)
3236 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
3237 flags, "smd_dev", smd_wcnss_irq_handler);
3238 if (r < 0) {
3239 free_irq(INT_A9_M2A_0, 0);
3240 free_irq(INT_A9_M2A_5, 0);
3241 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003242 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003243 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3244 return r;
3245 }
3246
3247 r = enable_irq_wake(INT_WCNSS_A11);
3248 if (r < 0)
3249 pr_err("smd_core_init: "
3250 "enable_irq_wake failed for INT_WCNSS_A11\n");
3251
Eric Holmberg98c6c642012-02-24 11:29:35 -07003252 r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler,
3253 flags, "smsm_dev", smsm_wcnss_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003254 if (r < 0) {
3255 free_irq(INT_A9_M2A_0, 0);
3256 free_irq(INT_A9_M2A_5, 0);
3257 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003258 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003259 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3260 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
3261 return r;
3262 }
3263
3264 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
3265 if (r < 0)
3266 pr_err("smd_core_init: "
3267 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
3268#endif
3269
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003270#if defined(CONFIG_DSPS_SMSM)
Eric Holmberg98c6c642012-02-24 11:29:35 -07003271 r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler,
3272 flags, "smsm_dev", smsm_dsps_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003273 if (r < 0) {
3274 free_irq(INT_A9_M2A_0, 0);
3275 free_irq(INT_A9_M2A_5, 0);
3276 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003277 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003278 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3279 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003280 free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003281 return r;
3282 }
3283
3284 r = enable_irq_wake(INT_DSPS_A11_SMSM);
3285 if (r < 0)
3286 pr_err("smd_core_init: "
3287 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
3288#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003289 SMD_INFO("smd_core_init() done\n");
3290
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003291 return 0;
3292}
3293
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303294static int intr_init(struct interrupt_config_item *private_irq,
3295 struct smd_irq_config *platform_irq,
3296 struct platform_device *pdev
3297 )
3298{
3299 int irq_id;
3300 int ret;
3301 int ret_wake;
3302
3303 private_irq->out_bit_pos = platform_irq->out_bit_pos;
3304 private_irq->out_offset = platform_irq->out_offset;
3305 private_irq->out_base = platform_irq->out_base;
3306
3307 irq_id = platform_get_irq_byname(
3308 pdev,
3309 platform_irq->irq_name
3310 );
3311 SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__,
3312 platform_irq->irq_name, irq_id);
3313 ret = request_irq(irq_id,
3314 private_irq->irq_handler,
3315 platform_irq->flags,
3316 platform_irq->device_name,
3317 (void *)platform_irq->dev_id
3318 );
3319 if (ret < 0) {
3320 platform_irq->irq_id = ret;
Eric Holmbergdeace152012-07-25 12:17:11 -06003321 private_irq->irq_id = ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303322 } else {
3323 platform_irq->irq_id = irq_id;
Eric Holmbergdeace152012-07-25 12:17:11 -06003324 private_irq->irq_id = irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303325 ret_wake = enable_irq_wake(irq_id);
3326 if (ret_wake < 0) {
3327 pr_err("smd: enable_irq_wake failed on %s",
3328 platform_irq->irq_name);
3329 }
3330 }
3331
3332 return ret;
3333}
3334
Jeff Hugobdc734d2012-03-26 16:05:39 -06003335int sort_cmp_func(const void *a, const void *b)
3336{
3337 struct smem_area *left = (struct smem_area *)(a);
3338 struct smem_area *right = (struct smem_area *)(b);
3339
3340 return left->phys_addr - right->phys_addr;
3341}
3342
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303343int smd_core_platform_init(struct platform_device *pdev)
3344{
3345 int i;
3346 int ret;
3347 uint32_t num_ss;
3348 struct smd_platform *smd_platform_data;
3349 struct smd_subsystem_config *smd_ss_config_list;
3350 struct smd_subsystem_config *cfg;
3351 int err_ret = 0;
Jeff Hugobdc734d2012-03-26 16:05:39 -06003352 struct smd_smem_regions *smd_smem_areas;
3353 int smem_idx = 0;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303354
3355 smd_platform_data = pdev->dev.platform_data;
3356 num_ss = smd_platform_data->num_ss_configs;
3357 smd_ss_config_list = smd_platform_data->smd_ss_configs;
3358
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06003359 if (smd_platform_data->smd_ssr_config)
3360 disable_smsm_reset_handshake = smd_platform_data->
3361 smd_ssr_config->disable_smsm_reset_handshake;
3362
Jeff Hugobdc734d2012-03-26 16:05:39 -06003363 smd_smem_areas = smd_platform_data->smd_smem_areas;
3364 if (smd_smem_areas) {
3365 num_smem_areas = smd_platform_data->num_smem_areas;
3366 smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
3367 GFP_KERNEL);
3368 if (!smem_areas) {
3369 pr_err("%s: smem_areas kmalloc failed\n", __func__);
3370 err_ret = -ENOMEM;
3371 goto smem_areas_alloc_fail;
3372 }
3373
3374 for (smem_idx = 0; smem_idx < num_smem_areas; ++smem_idx) {
3375 smem_areas[smem_idx].phys_addr =
3376 smd_smem_areas[smem_idx].phys_addr;
3377 smem_areas[smem_idx].size =
3378 smd_smem_areas[smem_idx].size;
3379 smem_areas[smem_idx].virt_addr = ioremap_nocache(
3380 (unsigned long)(smem_areas[smem_idx].phys_addr),
3381 smem_areas[smem_idx].size);
3382 if (!smem_areas[smem_idx].virt_addr) {
3383 pr_err("%s: ioremap_nocache() of addr:%p"
3384 " size: %x\n", __func__,
3385 smem_areas[smem_idx].phys_addr,
3386 smem_areas[smem_idx].size);
3387 err_ret = -ENOMEM;
3388 ++smem_idx;
3389 goto smem_failed;
3390 }
3391 }
3392 sort(smem_areas, num_smem_areas,
3393 sizeof(struct smem_area),
3394 sort_cmp_func, NULL);
3395 }
3396
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303397 for (i = 0; i < num_ss; i++) {
3398 cfg = &smd_ss_config_list[i];
3399
3400 ret = intr_init(
3401 &private_intr_config[cfg->irq_config_id].smd,
3402 &cfg->smd_int,
3403 pdev
3404 );
3405
3406 if (ret < 0) {
3407 err_ret = ret;
3408 pr_err("smd: register irq failed on %s\n",
3409 cfg->smd_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003410 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303411 }
3412
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003413 /* only init smsm structs if this edge supports smsm */
3414 if (cfg->smsm_int.irq_id)
3415 ret = intr_init(
3416 &private_intr_config[cfg->irq_config_id].smsm,
3417 &cfg->smsm_int,
3418 pdev
3419 );
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303420
3421 if (ret < 0) {
3422 err_ret = ret;
3423 pr_err("smd: register irq failed on %s\n",
3424 cfg->smsm_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003425 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303426 }
Eric Holmberg17992c12012-02-29 12:54:44 -07003427
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003428 if (cfg->subsys_name)
3429 strlcpy(edge_to_pids[cfg->edge].subsys_name,
Eric Holmberg17992c12012-02-29 12:54:44 -07003430 cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303431 }
3432
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303433
3434 SMD_INFO("smd_core_platform_init() done\n");
3435 return 0;
3436
Jeff Hugobdc734d2012-03-26 16:05:39 -06003437intr_failed:
3438 pr_err("smd: deregistering IRQs\n");
3439 for (i = 0; i < num_ss; ++i) {
3440 cfg = &smd_ss_config_list[i];
3441
3442 if (cfg->smd_int.irq_id >= 0)
3443 free_irq(cfg->smd_int.irq_id,
3444 (void *)cfg->smd_int.dev_id
3445 );
3446 if (cfg->smsm_int.irq_id >= 0)
3447 free_irq(cfg->smsm_int.irq_id,
3448 (void *)cfg->smsm_int.dev_id
3449 );
3450 }
3451smem_failed:
3452 for (smem_idx = smem_idx - 1; smem_idx >= 0; --smem_idx)
3453 iounmap(smem_areas[smem_idx].virt_addr);
3454 kfree(smem_areas);
3455smem_areas_alloc_fail:
3456 return err_ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303457}
3458
Gregory Bean4416e9e2010-07-28 10:22:12 -07003459static int __devinit msm_smd_probe(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003460{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303461 int ret;
Daniel Walker0aec66d2010-03-18 12:31:08 -07003462
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303463 SMD_INFO("smd probe\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003464 INIT_WORK(&probe_work, smd_channel_probe_worker);
3465
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003466 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3467 if (IS_ERR(channel_close_wq)) {
3468 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3469 return -ENOMEM;
3470 }
3471
3472 if (smsm_init()) {
3473 pr_err("smsm_init() failed\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003474 return -1;
3475 }
3476
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303477 if (pdev) {
3478 if (pdev->dev.of_node) {
3479 pr_err("SMD: Device tree not currently supported\n");
3480 return -ENODEV;
3481 } else if (pdev->dev.platform_data) {
3482 ret = smd_core_platform_init(pdev);
3483 if (ret) {
3484 pr_err(
3485 "SMD: smd_core_platform_init() failed\n");
3486 return -ENODEV;
3487 }
3488 } else {
3489 ret = smd_core_init();
3490 if (ret) {
3491 pr_err("smd_core_init() failed\n");
3492 return -ENODEV;
3493 }
3494 }
3495 } else {
3496 pr_err("SMD: PDEV not found\n");
3497 return -ENODEV;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003498 }
Iliyan Malchev1207bab2009-11-15 18:16:43 -08003499
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003500 smd_initialized = 1;
3501
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003502 smd_alloc_loopback_channel();
Eric Holmbergc33d4ab2011-10-24 10:28:25 -06003503 smsm_irq_handler(0, 0);
3504 tasklet_schedule(&smd_fake_irq_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003505
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003506 return 0;
3507}
3508
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003509static int restart_notifier_cb(struct notifier_block *this,
3510 unsigned long code,
3511 void *data);
3512
3513static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003514 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3515 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
3516 {SMD_WCNSS, "riva", .nb.notifier_call = restart_notifier_cb},
3517 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003518 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003519};
3520
3521static int restart_notifier_cb(struct notifier_block *this,
3522 unsigned long code,
3523 void *data)
3524{
3525 if (code == SUBSYS_AFTER_SHUTDOWN) {
3526 struct restart_notifier_block *notifier;
3527
3528 notifier = container_of(this,
3529 struct restart_notifier_block, nb);
3530 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3531 __func__, notifier->processor,
3532 notifier->name);
3533
3534 smd_channel_reset(notifier->processor);
3535 }
3536
3537 return NOTIFY_DONE;
3538}
3539
3540static __init int modem_restart_late_init(void)
3541{
3542 int i;
3543 void *handle;
3544 struct restart_notifier_block *nb;
3545
3546 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
3547 nb = &restart_notifiers[i];
3548 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
3549 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
3550 __func__, nb->name, handle);
3551 }
3552 return 0;
3553}
3554late_initcall(modem_restart_late_init);
3555
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003556static struct platform_driver msm_smd_driver = {
3557 .probe = msm_smd_probe,
3558 .driver = {
3559 .name = MODULE_NAME,
3560 .owner = THIS_MODULE,
3561 },
3562};
3563
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003564int __init msm_smd_init(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003565{
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003566 static bool registered;
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003567 int rc;
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003568
3569 if (registered)
3570 return 0;
3571
3572 registered = true;
Karthikeyan Ramasubramanianed92ac22012-08-22 18:08:14 -06003573 rc = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_ALLOC);
3574 if (rc) {
3575 pr_err("%s: remote spinlock init failed %d\n", __func__, rc);
3576 return rc;
3577 }
3578 spinlocks_initialized = 1;
3579
3580 rc = platform_driver_register(&msm_smd_driver);
3581 if (rc) {
3582 pr_err("%s: msm_smd_driver register failed %d\n",
3583 __func__, rc);
3584 return rc;
3585 }
3586
3587 smd_module_init_notify(0, NULL);
3588
3589 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003590}
3591
3592module_init(msm_smd_init);
3593
3594MODULE_DESCRIPTION("MSM Shared Memory Core");
3595MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
3596MODULE_LICENSE("GPL");