blob: c7e082f56f5371fe73167fdbb20e4c3d94ccb7e6 [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07004 * Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f942012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Jeff Hugobdc734d2012-03-26 16:05:39 -060037#include <linux/sort.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070038#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070040#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053042#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070043#include <mach/proc_comm.h>
Ram Somani8b9589f2012-04-03 12:07:18 +053044#include <asm/cacheflush.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070045
46#include "smd_private.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047#include "modem_notifier.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
Jeff Hugo56b933a2011-09-28 14:42:05 -060050 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060051 || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064)
Brian Swetland37521a32009-07-01 18:30:47 -070052#define CONFIG_QDSP6 1
53#endif
54
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060055#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \
56 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057#define CONFIG_DSPS 1
58#endif
59
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060060#if defined(CONFIG_ARCH_MSM8960) \
61 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060063#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070065
66#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067#define SMEM_VERSION 0x000B
68#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070069#define SMSM_SNAPSHOT_CNT 64
Eric Holmbergda31d042012-03-28 14:01:02 -060070#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72uint32_t SMSM_NUM_ENTRIES = 8;
73uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070074
Eric Holmberge8a39322012-04-03 15:14:02 -060075/* Legacy SMSM interrupt notifications */
76#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT \
77 | SMSM_RUN | SMSM_SYSTEM_DOWNLOAD)
Brian Swetland2eb44eb2008-09-29 16:00:48 -070078
79enum {
80 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081 MSM_SMSM_DEBUG = 1U << 1,
82 MSM_SMD_INFO = 1U << 2,
83 MSM_SMSM_INFO = 1U << 3,
Eric Holmberg98c6c642012-02-24 11:29:35 -070084 MSM_SMx_POWER_INFO = 1U << 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085};
86
87struct smsm_shared_info {
88 uint32_t *state;
89 uint32_t *intr_mask;
90 uint32_t *intr_mux;
91};
92
93static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f942012-03-19 10:04:22 -060094static struct kfifo smsm_snapshot_fifo;
95static struct wake_lock smsm_snapshot_wakelock;
96static int smsm_snapshot_count;
97static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098
99struct smsm_size_info_type {
100 uint32_t num_hosts;
101 uint32_t num_entries;
102 uint32_t reserved0;
103 uint32_t reserved1;
104};
105
106struct smsm_state_cb_info {
107 struct list_head cb_list;
108 uint32_t mask;
109 void *data;
110 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
111};
112
113struct smsm_state_info {
114 struct list_head callbacks;
115 uint32_t last_value;
Eric Holmberge8a39322012-04-03 15:14:02 -0600116 uint32_t intr_mask_set;
117 uint32_t intr_mask_clear;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118};
119
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530120struct interrupt_config_item {
121 /* must be initialized */
122 irqreturn_t (*irq_handler)(int req, void *data);
123 /* outgoing interrupt config (set from platform data) */
124 uint32_t out_bit_pos;
125 void __iomem *out_base;
126 uint32_t out_offset;
Eric Holmbergdeace152012-07-25 12:17:11 -0600127 int irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530128};
129
130struct interrupt_config {
131 struct interrupt_config_item smd;
132 struct interrupt_config_item smsm;
133};
134
135static irqreturn_t smd_modem_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700136static irqreturn_t smsm_modem_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530137static irqreturn_t smd_dsp_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700138static irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530139static irqreturn_t smd_dsps_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700140static irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530141static irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700142static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600143static irqreturn_t smd_rpm_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530144static irqreturn_t smsm_irq_handler(int irq, void *data);
145
146static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
147 [SMD_MODEM] = {
148 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700149 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530150 },
151 [SMD_Q6] = {
152 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700153 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530154 },
155 [SMD_DSPS] = {
156 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700157 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530158 },
159 [SMD_WCNSS] = {
160 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700161 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530162 },
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600163 [SMD_RPM] = {
164 .smd.irq_handler = smd_rpm_irq_handler,
165 .smsm.irq_handler = NULL, /* does not support smsm */
166 },
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530167};
Jeff Hugobdc734d2012-03-26 16:05:39 -0600168
169struct smem_area {
170 void *phys_addr;
171 unsigned size;
172 void __iomem *virt_addr;
173};
174static uint32_t num_smem_areas;
175static struct smem_area *smem_areas;
176static void *smem_range_check(void *base, unsigned offset);
177
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700178struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530179
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
181#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
182 entry * SMSM_NUM_HOSTS + host)
183#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
184
185/* Internal definitions which are not exported in some targets */
186enum {
187 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700188};
189
190static int msm_smd_debug_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700191module_param_named(debug_mask, msm_smd_debug_mask,
192 int, S_IRUGO | S_IWUSR | S_IWGRP);
193
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194#if defined(CONFIG_MSM_SMD_DEBUG)
195#define SMD_DBG(x...) do { \
196 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
197 printk(KERN_DEBUG x); \
198 } while (0)
199
200#define SMSM_DBG(x...) do { \
201 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
202 printk(KERN_DEBUG x); \
203 } while (0)
204
205#define SMD_INFO(x...) do { \
206 if (msm_smd_debug_mask & MSM_SMD_INFO) \
207 printk(KERN_INFO x); \
208 } while (0)
209
210#define SMSM_INFO(x...) do { \
211 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
212 printk(KERN_INFO x); \
213 } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700214#define SMx_POWER_INFO(x...) do { \
215 if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \
216 printk(KERN_INFO x); \
217 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218#else
219#define SMD_DBG(x...) do { } while (0)
220#define SMSM_DBG(x...) do { } while (0)
221#define SMD_INFO(x...) do { } while (0)
222#define SMSM_INFO(x...) do { } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700223#define SMx_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224#endif
225
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700226static unsigned last_heap_free = 0xffffffff;
227
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228static inline void smd_write_intr(unsigned int val,
229 const void __iomem *addr);
230
231#if defined(CONFIG_ARCH_MSM7X30)
232#define MSM_TRIG_A2M_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530233 (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700234#define MSM_TRIG_A2Q6_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530235 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236#define MSM_TRIG_A2M_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530237 (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238#define MSM_TRIG_A2Q6_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530239 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600241#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700242#define MSM_TRIG_A2WCNSS_SMD_INT
243#define MSM_TRIG_A2WCNSS_SMSM_INT
244#elif defined(CONFIG_ARCH_MSM8X60)
245#define MSM_TRIG_A2M_SMD_INT \
246 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
247#define MSM_TRIG_A2Q6_SMD_INT \
248 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
249#define MSM_TRIG_A2M_SMSM_INT \
250 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
251#define MSM_TRIG_A2Q6_SMSM_INT \
252 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
253#define MSM_TRIG_A2DSPS_SMD_INT \
254 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600255#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256#define MSM_TRIG_A2WCNSS_SMD_INT
257#define MSM_TRIG_A2WCNSS_SMSM_INT
Jeff Hugo56b933a2011-09-28 14:42:05 -0600258#elif defined(CONFIG_ARCH_MSM9615)
259#define MSM_TRIG_A2M_SMD_INT \
260 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
261#define MSM_TRIG_A2Q6_SMD_INT \
262 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
263#define MSM_TRIG_A2M_SMSM_INT \
264 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
265#define MSM_TRIG_A2Q6_SMSM_INT \
266 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
267#define MSM_TRIG_A2DSPS_SMD_INT
268#define MSM_TRIG_A2DSPS_SMSM_INT
269#define MSM_TRIG_A2WCNSS_SMD_INT
270#define MSM_TRIG_A2WCNSS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271#elif defined(CONFIG_ARCH_FSM9XXX)
272#define MSM_TRIG_A2Q6_SMD_INT \
273 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
274#define MSM_TRIG_A2Q6_SMSM_INT \
275 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
276#define MSM_TRIG_A2M_SMD_INT \
277 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
278#define MSM_TRIG_A2M_SMSM_INT \
279 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
280#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600281#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700282#define MSM_TRIG_A2WCNSS_SMD_INT
283#define MSM_TRIG_A2WCNSS_SMSM_INT
Eric Holmberg73d45462012-02-28 10:41:31 -0700284#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285#define MSM_TRIG_A2M_SMD_INT \
286 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700287#define MSM_TRIG_A2Q6_SMD_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288#define MSM_TRIG_A2M_SMSM_INT \
289 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700290#define MSM_TRIG_A2Q6_SMSM_INT
291#define MSM_TRIG_A2DSPS_SMD_INT
292#define MSM_TRIG_A2DSPS_SMSM_INT
293#define MSM_TRIG_A2WCNSS_SMD_INT
294#define MSM_TRIG_A2WCNSS_SMSM_INT
295#elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A)
296#define MSM_TRIG_A2M_SMD_INT \
297 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
298#define MSM_TRIG_A2Q6_SMD_INT
299#define MSM_TRIG_A2M_SMSM_INT \
300 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
301#define MSM_TRIG_A2Q6_SMSM_INT
302#define MSM_TRIG_A2DSPS_SMD_INT
303#define MSM_TRIG_A2DSPS_SMSM_INT
304#define MSM_TRIG_A2WCNSS_SMD_INT
305#define MSM_TRIG_A2WCNSS_SMSM_INT
306#else /* use platform device / device tree configuration */
307#define MSM_TRIG_A2M_SMD_INT
308#define MSM_TRIG_A2Q6_SMD_INT
309#define MSM_TRIG_A2M_SMSM_INT
310#define MSM_TRIG_A2Q6_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600312#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313#define MSM_TRIG_A2WCNSS_SMD_INT
314#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700315#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316
Jeff Hugoee40b152012-02-09 17:39:47 -0700317/*
318 * stub out legacy macros if they are not being used so that the legacy
319 * code compiles even though it is not used
320 *
321 * these definitions should not be used in active code and will cause
322 * an early failure
323 */
324#ifndef INT_A9_M2A_0
325#define INT_A9_M2A_0 -1
326#endif
327#ifndef INT_A9_M2A_5
328#define INT_A9_M2A_5 -1
329#endif
330#ifndef INT_ADSP_A11
331#define INT_ADSP_A11 -1
332#endif
333#ifndef INT_ADSP_A11_SMSM
334#define INT_ADSP_A11_SMSM -1
335#endif
336#ifndef INT_DSPS_A11
337#define INT_DSPS_A11 -1
338#endif
339#ifndef INT_DSPS_A11_SMSM
340#define INT_DSPS_A11_SMSM -1
341#endif
342#ifndef INT_WCNSS_A11
343#define INT_WCNSS_A11 -1
344#endif
345#ifndef INT_WCNSS_A11_SMSM
346#define INT_WCNSS_A11_SMSM -1
347#endif
348
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349#define SMD_LOOPBACK_CID 100
350
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600351#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
352static remote_spinlock_t remote_spinlock;
353
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergda31d042012-03-28 14:01:02 -0600356static void smsm_cb_snapshot(uint32_t use_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700357
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -0600358static struct workqueue_struct *smsm_cb_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700359static void notify_smsm_cb_clients_worker(struct work_struct *work);
360static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600361static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362static struct smsm_state_info *smsm_states;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +0530363static int spinlocks_initialized;
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -0600364static RAW_NOTIFIER_HEAD(smsm_driver_state_notifier_list);
365static DEFINE_MUTEX(smsm_driver_state_notifier_lock);
366static void smsm_driver_state_notify(uint32_t state, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367
368static inline void smd_write_intr(unsigned int val,
369 const void __iomem *addr)
370{
371 wmb();
372 __raw_writel(val, addr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700373}
374
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700375static inline void notify_modem_smd(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700376{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530377 static const struct interrupt_config_item *intr
378 = &private_intr_config[SMD_MODEM].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700379 if (intr->out_base) {
380 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530381 smd_write_intr(intr->out_bit_pos,
382 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700383 } else {
384 ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530385 MSM_TRIG_A2M_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700386 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700387}
388
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700389static inline void notify_dsp_smd(void)
390{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530391 static const struct interrupt_config_item *intr
392 = &private_intr_config[SMD_Q6].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700393 if (intr->out_base) {
394 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530395 smd_write_intr(intr->out_bit_pos,
396 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700397 } else {
398 ++interrupt_stats[SMD_Q6].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530399 MSM_TRIG_A2Q6_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700400 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700401}
402
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530403static inline void notify_dsps_smd(void)
404{
405 static const struct interrupt_config_item *intr
406 = &private_intr_config[SMD_DSPS].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700407 if (intr->out_base) {
408 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530409 smd_write_intr(intr->out_bit_pos,
410 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700411 } else {
412 ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530413 MSM_TRIG_A2DSPS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700414 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530415}
416
417static inline void notify_wcnss_smd(void)
418{
419 static const struct interrupt_config_item *intr
420 = &private_intr_config[SMD_WCNSS].smd;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530421
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700422 if (intr->out_base) {
423 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530424 smd_write_intr(intr->out_bit_pos,
425 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700426 } else {
427 ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530428 MSM_TRIG_A2WCNSS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700429 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530430}
431
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600432static inline void notify_rpm_smd(void)
433{
434 static const struct interrupt_config_item *intr
435 = &private_intr_config[SMD_RPM].smd;
436
437 if (intr->out_base) {
438 ++interrupt_stats[SMD_RPM].smd_out_config_count;
439 smd_write_intr(intr->out_bit_pos,
440 intr->out_base + intr->out_offset);
441 }
442}
443
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530444static inline void notify_modem_smsm(void)
445{
446 static const struct interrupt_config_item *intr
447 = &private_intr_config[SMD_MODEM].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700448 if (intr->out_base) {
449 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530450 smd_write_intr(intr->out_bit_pos,
451 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700452 } else {
453 ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530454 MSM_TRIG_A2M_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700455 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530456}
457
458static inline void notify_dsp_smsm(void)
459{
460 static const struct interrupt_config_item *intr
461 = &private_intr_config[SMD_Q6].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700462 if (intr->out_base) {
463 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530464 smd_write_intr(intr->out_bit_pos,
465 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700466 } else {
467 ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530468 MSM_TRIG_A2Q6_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700469 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530470}
471
472static inline void notify_dsps_smsm(void)
473{
474 static const struct interrupt_config_item *intr
475 = &private_intr_config[SMD_DSPS].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700476 if (intr->out_base) {
477 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530478 smd_write_intr(intr->out_bit_pos,
479 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700480 } else {
481 ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530482 MSM_TRIG_A2DSPS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700483 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530484}
485
486static inline void notify_wcnss_smsm(void)
487{
488 static const struct interrupt_config_item *intr
489 = &private_intr_config[SMD_WCNSS].smsm;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530490
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700491 if (intr->out_base) {
492 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530493 smd_write_intr(intr->out_bit_pos,
494 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700495 } else {
496 ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530497 MSM_TRIG_A2WCNSS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700498 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530499}
500
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700501static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
502{
503 /* older protocol don't use smsm_intr_mask,
504 but still communicates with modem */
505 if (!smsm_info.intr_mask ||
506 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
507 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530508 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700509
510 if (smsm_info.intr_mask &&
511 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
512 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 uint32_t mux_val;
514
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600515 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700516 mux_val = __raw_readl(
517 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
518 mux_val++;
519 __raw_writel(mux_val,
520 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
521 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530522 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700523 }
524
525 if (smsm_info.intr_mask &&
526 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
527 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530528 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529 }
530
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600531 if (smsm_info.intr_mask &&
532 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
533 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530534 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600535 }
536
Eric Holmbergda31d042012-03-28 14:01:02 -0600537 /*
538 * Notify local SMSM callback clients without wakelock since this
539 * code is used by power management during power-down/-up sequencing
540 * on DEM-based targets. Grabbing a wakelock in this case will
541 * abort the power-down sequencing.
542 */
Eric Holmberg51676a12012-07-10 18:45:23 -0600543 if (smsm_info.intr_mask &&
544 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS))
545 & notify_mask)) {
546 smsm_cb_snapshot(0);
547 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700548}
549
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700550void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700551{
552 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700553 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700554
555 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
556 if (x != 0) {
557 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700558 SMD_INFO("smem: DIAG '%s'\n", x);
559 }
560
561 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
562 if (x != 0) {
563 x[size - 1] = 0;
564 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700565 }
566}
567
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700568
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700569static void handle_modem_crash(void)
570{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700572 smd_diag();
573
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700574 /* hard reboot if possible FIXME
575 if (msm_reset_hook)
576 msm_reset_hook();
577 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700578
579 /* in this case the modem or watchdog should reboot us */
580 for (;;)
581 ;
582}
583
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700584int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700585{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700586 /* if the modem's not ready yet, we have to hope for the best */
587 if (!smsm_info.state)
588 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700589
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700590 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700591 handle_modem_crash();
592 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700593 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700594 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700595}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700596EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700597
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700598/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700599 * irq handler and code that mutates the channel
600 * list or fiddles with channel state
601 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700602static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700603DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700604
605/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700606 * operations to avoid races while creating or
607 * destroying smd_channel structures
608 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700609static DEFINE_MUTEX(smd_creation_mutex);
610
611static int smd_initialized;
612
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700613struct smd_shared_v1 {
614 struct smd_half_channel ch0;
615 unsigned char data0[SMD_BUF_SIZE];
616 struct smd_half_channel ch1;
617 unsigned char data1[SMD_BUF_SIZE];
618};
619
620struct smd_shared_v2 {
621 struct smd_half_channel ch0;
622 struct smd_half_channel ch1;
623};
624
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600625struct smd_shared_v2_word_access {
626 struct smd_half_channel_word_access ch0;
627 struct smd_half_channel_word_access ch1;
628};
629
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630struct smd_channel {
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600631 volatile void *send; /* some variant of smd_half_channel */
632 volatile void *recv; /* some variant of smd_half_channel */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700633 unsigned char *send_data;
634 unsigned char *recv_data;
635 unsigned fifo_size;
636 unsigned fifo_mask;
637 struct list_head ch_list;
638
639 unsigned current_packet;
640 unsigned n;
641 void *priv;
642 void (*notify)(void *priv, unsigned flags);
643
644 int (*read)(smd_channel_t *ch, void *data, int len, int user_buf);
645 int (*write)(smd_channel_t *ch, const void *data, int len,
646 int user_buf);
647 int (*read_avail)(smd_channel_t *ch);
648 int (*write_avail)(smd_channel_t *ch);
649 int (*read_from_cb)(smd_channel_t *ch, void *data, int len,
650 int user_buf);
651
652 void (*update_state)(smd_channel_t *ch);
653 unsigned last_state;
654 void (*notify_other_cpu)(void);
655
656 char name[20];
657 struct platform_device pdev;
658 unsigned type;
659
660 int pending_pkt_sz;
661
662 char is_pkt_ch;
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600663
664 /*
665 * private internal functions to access *send and *recv.
666 * never to be exported outside of smd
667 */
668 struct smd_half_channel_access *half_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700669};
670
671struct edge_to_pid {
672 uint32_t local_pid;
673 uint32_t remote_pid;
Eric Holmberg17992c12012-02-29 12:54:44 -0700674 char subsys_name[SMD_MAX_CH_NAME_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700675};
676
677/**
678 * Maps edge type to local and remote processor ID's.
679 */
680static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700681 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
682 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "q6"},
683 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
684 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
685 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
686 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
687 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
688 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
689 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
690 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
691 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
692 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
693 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
694 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
695 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600696 [SMD_APPS_RPM] = {SMD_APPS, SMD_RPM},
697 [SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
698 [SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
699 [SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700700};
701
702struct restart_notifier_block {
703 unsigned processor;
704 char *name;
705 struct notifier_block nb;
706};
707
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -0600708static int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700709static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
710
711static LIST_HEAD(smd_ch_closed_list);
712static LIST_HEAD(smd_ch_closing_list);
713static LIST_HEAD(smd_ch_to_close_list);
714static LIST_HEAD(smd_ch_list_modem);
715static LIST_HEAD(smd_ch_list_dsp);
716static LIST_HEAD(smd_ch_list_dsps);
717static LIST_HEAD(smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600718static LIST_HEAD(smd_ch_list_rpm);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700719
720static unsigned char smd_ch_allocated[64];
721static struct work_struct probe_work;
722
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700723static void finalize_channel_close_fn(struct work_struct *work);
724static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
725static struct workqueue_struct *channel_close_wq;
726
727static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
728
729/* on smp systems, the probe might get called from multiple cores,
730 hence use a lock */
731static DEFINE_MUTEX(smd_probe_lock);
732
733static void smd_channel_probe_worker(struct work_struct *work)
734{
735 struct smd_alloc_elm *shared;
736 unsigned n;
737 uint32_t type;
738
739 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
740
741 if (!shared) {
742 pr_err("%s: allocation table not initialized\n", __func__);
743 return;
744 }
745
746 mutex_lock(&smd_probe_lock);
747 for (n = 0; n < 64; n++) {
748 if (smd_ch_allocated[n])
749 continue;
750
751 /* channel should be allocated only if APPS
752 processor is involved */
753 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600754 if (type >= ARRAY_SIZE(edge_to_pids) ||
755 edge_to_pids[type].local_pid != SMD_APPS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700756 continue;
757 if (!shared[n].ref_count)
758 continue;
759 if (!shared[n].name[0])
760 continue;
761
762 if (!smd_alloc_channel(&shared[n]))
763 smd_ch_allocated[n] = 1;
764 else
765 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
766 }
767 mutex_unlock(&smd_probe_lock);
768}
769
770/**
771 * Lookup processor ID and determine if it belongs to the proved edge
772 * type.
773 *
774 * @shared2: Pointer to v2 shared channel structure
775 * @type: Edge type
776 * @pid: Processor ID of processor on edge
777 * @local_ch: Channel that belongs to processor @pid
778 * @remote_ch: Other side of edge contained @pid
Jeff Hugo70a7e562012-09-07 11:24:32 -0600779 * @is_word_access_ch: Bool, is this a word aligned access channel
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700780 *
781 * Returns 0 for not on edge, 1 for found on edge
782 */
Jeff Hugo70a7e562012-09-07 11:24:32 -0600783static int pid_is_on_edge(void *shared2,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700784 uint32_t type, uint32_t pid,
Jeff Hugo70a7e562012-09-07 11:24:32 -0600785 void **local_ch,
786 void **remote_ch,
787 int is_word_access_ch
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700788 )
789{
790 int ret = 0;
791 struct edge_to_pid *edge;
Jeff Hugo70a7e562012-09-07 11:24:32 -0600792 void *ch0;
793 void *ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700794
795 *local_ch = 0;
796 *remote_ch = 0;
797
798 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
799 return 0;
800
Jeff Hugo70a7e562012-09-07 11:24:32 -0600801 if (is_word_access_ch) {
802 ch0 = &((struct smd_shared_v2_word_access *)(shared2))->ch0;
803 ch1 = &((struct smd_shared_v2_word_access *)(shared2))->ch1;
804 } else {
805 ch0 = &((struct smd_shared_v2 *)(shared2))->ch0;
806 ch1 = &((struct smd_shared_v2 *)(shared2))->ch1;
807 }
808
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700809 edge = &edge_to_pids[type];
810 if (edge->local_pid != edge->remote_pid) {
811 if (pid == edge->local_pid) {
Jeff Hugo70a7e562012-09-07 11:24:32 -0600812 *local_ch = ch0;
813 *remote_ch = ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700814 ret = 1;
815 } else if (pid == edge->remote_pid) {
Jeff Hugo70a7e562012-09-07 11:24:32 -0600816 *local_ch = ch1;
817 *remote_ch = ch0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700818 ret = 1;
819 }
820 }
821
822 return ret;
823}
824
Eric Holmberg17992c12012-02-29 12:54:44 -0700825/*
826 * Returns a pointer to the subsystem name or NULL if no
827 * subsystem name is available.
828 *
829 * @type - Edge definition
830 */
831const char *smd_edge_to_subsystem(uint32_t type)
832{
833 const char *subsys = NULL;
834
835 if (type < ARRAY_SIZE(edge_to_pids)) {
836 subsys = edge_to_pids[type].subsys_name;
837 if (subsys[0] == 0x0)
838 subsys = NULL;
839 }
840 return subsys;
841}
842EXPORT_SYMBOL(smd_edge_to_subsystem);
843
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700844/*
845 * Returns a pointer to the subsystem name given the
846 * remote processor ID.
847 *
848 * @pid Remote processor ID
849 * @returns Pointer to subsystem name or NULL if not found
850 */
851const char *smd_pid_to_subsystem(uint32_t pid)
852{
853 const char *subsys = NULL;
854 int i;
855
856 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
857 if (pid == edge_to_pids[i].remote_pid &&
858 edge_to_pids[i].subsys_name[0] != 0x0
859 ) {
860 subsys = edge_to_pids[i].subsys_name;
861 break;
862 }
863 }
864
865 return subsys;
866}
867EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700868
Jeff Hugo70a7e562012-09-07 11:24:32 -0600869static void smd_reset_edge(void *void_ch, unsigned new_state,
870 int is_word_access_ch)
Eric Holmberg2a563c32011-10-05 14:51:43 -0600871{
Jeff Hugo70a7e562012-09-07 11:24:32 -0600872 if (is_word_access_ch) {
873 struct smd_half_channel_word_access *ch =
874 (struct smd_half_channel_word_access *)(void_ch);
875 if (ch->state != SMD_SS_CLOSED) {
876 ch->state = new_state;
877 ch->fDSR = 0;
878 ch->fCTS = 0;
879 ch->fCD = 0;
880 ch->fSTATE = 1;
881 }
882 } else {
883 struct smd_half_channel *ch =
884 (struct smd_half_channel *)(void_ch);
885 if (ch->state != SMD_SS_CLOSED) {
886 ch->state = new_state;
887 ch->fDSR = 0;
888 ch->fCTS = 0;
889 ch->fCD = 0;
890 ch->fSTATE = 1;
891 }
Eric Holmberg2a563c32011-10-05 14:51:43 -0600892 }
893}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700894
895static void smd_channel_reset_state(struct smd_alloc_elm *shared,
896 unsigned new_state, unsigned pid)
897{
898 unsigned n;
Jeff Hugo70a7e562012-09-07 11:24:32 -0600899 void *shared2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700900 uint32_t type;
Jeff Hugo70a7e562012-09-07 11:24:32 -0600901 void *local_ch;
902 void *remote_ch;
903 int is_word_access;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700904
905 for (n = 0; n < SMD_CHANNELS; n++) {
906 if (!shared[n].ref_count)
907 continue;
908 if (!shared[n].name[0])
909 continue;
910
911 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo70a7e562012-09-07 11:24:32 -0600912 is_word_access = is_word_access_ch(type);
913 if (is_word_access)
914 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n,
915 sizeof(struct smd_shared_v2_word_access));
916 else
917 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n,
918 sizeof(struct smd_shared_v2));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700919 if (!shared2)
920 continue;
921
Jeff Hugo70a7e562012-09-07 11:24:32 -0600922 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch,
923 is_word_access))
924 smd_reset_edge(local_ch, new_state, is_word_access);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700925
Eric Holmberg2a563c32011-10-05 14:51:43 -0600926 /*
927 * ModemFW is in the same subsystem as ModemSW, but has
928 * separate SMD edges that need to be reset.
929 */
930 if (pid == SMSM_MODEM &&
931 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
Jeff Hugo70a7e562012-09-07 11:24:32 -0600932 &local_ch, &remote_ch, is_word_access))
933 smd_reset_edge(local_ch, new_state, is_word_access);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700934 }
935}
936
937
938void smd_channel_reset(uint32_t restart_pid)
939{
940 struct smd_alloc_elm *shared;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700941 unsigned long flags;
942
943 SMD_DBG("%s: starting reset\n", __func__);
Eric Holmberg957f5b52012-09-07 13:54:31 -0600944
945 /* release any held spinlocks */
946 remote_spin_release(&remote_spinlock, restart_pid);
947 remote_spin_release_all(restart_pid);
948
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700949 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
950 if (!shared) {
951 pr_err("%s: allocation table not initialized\n", __func__);
952 return;
953 }
954
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700955 /* reset SMSM entry */
956 if (smsm_info.state) {
957 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
958
Eric Holmberg351a63c2011-12-02 17:49:43 -0700959 /* restart SMSM init handshake */
960 if (restart_pid == SMSM_MODEM) {
961 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -0700962 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
963 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -0700964 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700965
966 /* notify SMSM processors */
967 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -0700968 notify_modem_smsm();
969 notify_dsp_smsm();
970 notify_dsps_smsm();
971 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700972 }
973
974 /* change all remote states to CLOSING */
975 mutex_lock(&smd_probe_lock);
976 spin_lock_irqsave(&smd_lock, flags);
977 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
978 spin_unlock_irqrestore(&smd_lock, flags);
979 mutex_unlock(&smd_probe_lock);
980
981 /* notify SMD processors */
982 mb();
983 smd_fake_irq_handler(0);
984 notify_modem_smd();
985 notify_dsp_smd();
986 notify_dsps_smd();
987 notify_wcnss_smd();
988
989 /* change all remote states to CLOSED */
990 mutex_lock(&smd_probe_lock);
991 spin_lock_irqsave(&smd_lock, flags);
992 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
993 spin_unlock_irqrestore(&smd_lock, flags);
994 mutex_unlock(&smd_probe_lock);
995
996 /* notify SMD processors */
997 mb();
998 smd_fake_irq_handler(0);
999 notify_modem_smd();
1000 notify_dsp_smd();
1001 notify_dsps_smd();
1002 notify_wcnss_smd();
1003
1004 SMD_DBG("%s: finished reset\n", __func__);
1005}
1006
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001007/* how many bytes are available for reading */
1008static int smd_stream_read_avail(struct smd_channel *ch)
1009{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001010 return (ch->half_ch->get_head(ch->recv) -
1011 ch->half_ch->get_tail(ch->recv)) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001012}
1013
1014/* how many bytes we are free to write */
1015static int smd_stream_write_avail(struct smd_channel *ch)
1016{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001017 return ch->fifo_mask - ((ch->half_ch->get_head(ch->send) -
1018 ch->half_ch->get_tail(ch->send)) & ch->fifo_mask);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001019}
1020
1021static int smd_packet_read_avail(struct smd_channel *ch)
1022{
1023 if (ch->current_packet) {
1024 int n = smd_stream_read_avail(ch);
1025 if (n > ch->current_packet)
1026 n = ch->current_packet;
1027 return n;
1028 } else {
1029 return 0;
1030 }
1031}
1032
1033static int smd_packet_write_avail(struct smd_channel *ch)
1034{
1035 int n = smd_stream_write_avail(ch);
1036 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
1037}
1038
1039static int ch_is_open(struct smd_channel *ch)
1040{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001041 return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED ||
1042 ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING)
1043 && (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001044}
1045
1046/* provide a pointer and length to readable data in the fifo */
1047static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
1048{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001049 unsigned head = ch->half_ch->get_head(ch->recv);
1050 unsigned tail = ch->half_ch->get_tail(ch->recv);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001051 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001052
1053 if (tail <= head)
1054 return head - tail;
1055 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001056 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001057}
1058
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001059static int read_intr_blocked(struct smd_channel *ch)
1060{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001061 return ch->half_ch->get_fBLOCKREADINTR(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001062}
1063
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001064/* advance the fifo read pointer after data from ch_read_buffer is consumed */
1065static void ch_read_done(struct smd_channel *ch, unsigned count)
1066{
1067 BUG_ON(count > smd_stream_read_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001068 ch->half_ch->set_tail(ch->recv,
1069 (ch->half_ch->get_tail(ch->recv) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001070 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001071 ch->half_ch->set_fTAIL(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001072}
1073
1074/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -07001075 * by smd_*_read() and update_packet_state()
1076 * will read-and-discard if the _data pointer is null
1077 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001078static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001079{
1080 void *ptr;
1081 unsigned n;
1082 unsigned char *data = _data;
1083 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001084 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001085
1086 while (len > 0) {
1087 n = ch_read_buffer(ch, &ptr);
1088 if (n == 0)
1089 break;
1090
1091 if (n > len)
1092 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001093 if (_data) {
1094 if (user_buf) {
1095 r = copy_to_user(data, ptr, n);
1096 if (r > 0) {
1097 pr_err("%s: "
1098 "copy_to_user could not copy "
1099 "%i bytes.\n",
1100 __func__,
1101 r);
1102 }
1103 } else
1104 memcpy(data, ptr, n);
1105 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001106
1107 data += n;
1108 len -= n;
1109 ch_read_done(ch, n);
1110 }
1111
1112 return orig_len - len;
1113}
1114
1115static void update_stream_state(struct smd_channel *ch)
1116{
1117 /* streams have no special state requiring updating */
1118}
1119
1120static void update_packet_state(struct smd_channel *ch)
1121{
1122 unsigned hdr[5];
1123 int r;
1124
1125 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001126 while (ch->current_packet == 0) {
1127 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001128
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001129 /* don't bother unless we can get the full header */
1130 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1131 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001132
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001133 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1134 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001135
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001136 ch->current_packet = hdr[0];
1137 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001138}
1139
1140/* provide a pointer and length to next free space in the fifo */
1141static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1142{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001143 unsigned head = ch->half_ch->get_head(ch->send);
1144 unsigned tail = ch->half_ch->get_tail(ch->send);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001145 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001146
1147 if (head < tail) {
1148 return tail - head - 1;
1149 } else {
1150 if (tail == 0)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001151 return ch->fifo_size - head - 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001152 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001153 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001154 }
1155}
1156
1157/* advace the fifo write pointer after freespace
1158 * from ch_write_buffer is filled
1159 */
1160static void ch_write_done(struct smd_channel *ch, unsigned count)
1161{
1162 BUG_ON(count > smd_stream_write_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001163 ch->half_ch->set_head(ch->send,
1164 (ch->half_ch->get_head(ch->send) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001165 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001166 ch->half_ch->set_fHEAD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001167}
1168
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001169static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001170{
1171 if (n == SMD_SS_OPENED) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001172 ch->half_ch->set_fDSR(ch->send, 1);
1173 ch->half_ch->set_fCTS(ch->send, 1);
1174 ch->half_ch->set_fCD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001175 } else {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001176 ch->half_ch->set_fDSR(ch->send, 0);
1177 ch->half_ch->set_fCTS(ch->send, 0);
1178 ch->half_ch->set_fCD(ch->send, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001179 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001180 ch->half_ch->set_state(ch->send, n);
1181 ch->half_ch->set_fSTATE(ch->send, 1);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001182 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001183}
1184
1185static void do_smd_probe(void)
1186{
1187 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1188 if (shared->heap_info.free_offset != last_heap_free) {
1189 last_heap_free = shared->heap_info.free_offset;
1190 schedule_work(&probe_work);
1191 }
1192}
1193
1194static void smd_state_change(struct smd_channel *ch,
1195 unsigned last, unsigned next)
1196{
1197 ch->last_state = next;
1198
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001199 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001200
1201 switch (next) {
1202 case SMD_SS_OPENING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001203 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING ||
1204 ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
1205 ch->half_ch->set_tail(ch->recv, 0);
1206 ch->half_ch->set_head(ch->send, 0);
1207 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001208 ch_set_state(ch, SMD_SS_OPENING);
1209 }
1210 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001211 case SMD_SS_OPENED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001212 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001213 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001214 ch->notify(ch->priv, SMD_EVENT_OPEN);
1215 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001216 break;
1217 case SMD_SS_FLUSHING:
1218 case SMD_SS_RESET:
1219 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001220 break;
1221 case SMD_SS_CLOSED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001222 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001223 ch_set_state(ch, SMD_SS_CLOSING);
1224 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001225 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001226 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1227 }
1228 break;
1229 case SMD_SS_CLOSING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001230 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001231 list_move(&ch->ch_list,
1232 &smd_ch_to_close_list);
1233 queue_work(channel_close_wq,
1234 &finalize_channel_close_work);
1235 }
1236 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001237 }
1238}
1239
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001240static void handle_smd_irq_closing_list(void)
1241{
1242 unsigned long flags;
1243 struct smd_channel *ch;
1244 struct smd_channel *index;
1245 unsigned tmp;
1246
1247 spin_lock_irqsave(&smd_lock, flags);
1248 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001249 if (ch->half_ch->get_fSTATE(ch->recv))
1250 ch->half_ch->set_fSTATE(ch->recv, 0);
1251 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001252 if (tmp != ch->last_state)
1253 smd_state_change(ch, ch->last_state, tmp);
1254 }
1255 spin_unlock_irqrestore(&smd_lock, flags);
1256}
1257
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001258static void handle_smd_irq(struct list_head *list, void (*notify)(void))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001259{
1260 unsigned long flags;
1261 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001262 unsigned ch_flags;
1263 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001264 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001265
1266 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001267 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001268 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001269 ch_flags = 0;
1270 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001271 if (ch->half_ch->get_fHEAD(ch->recv)) {
1272 ch->half_ch->set_fHEAD(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001273 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001274 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001275 if (ch->half_ch->get_fTAIL(ch->recv)) {
1276 ch->half_ch->set_fTAIL(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001277 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001278 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001279 if (ch->half_ch->get_fSTATE(ch->recv)) {
1280 ch->half_ch->set_fSTATE(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001281 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001282 }
1283 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001284 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001285 if (tmp != ch->last_state) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07001286 SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
1287 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001288 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001289 state_change = 1;
1290 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001291 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001292 ch->update_state(ch);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001293 SMx_POWER_INFO("SMD ch%d '%s' Data event r%d/w%d\n",
1294 ch->n, ch->name,
1295 ch->read_avail(ch),
1296 ch->fifo_size - ch->write_avail(ch));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001297 ch->notify(ch->priv, SMD_EVENT_DATA);
1298 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001299 if (ch_flags & 0x4 && !state_change) {
1300 SMx_POWER_INFO("SMD ch%d '%s' State update\n",
1301 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001302 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001303 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001304 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001305 spin_unlock_irqrestore(&smd_lock, flags);
1306 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001307}
1308
Brian Swetland37521a32009-07-01 18:30:47 -07001309static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001310{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001311 SMx_POWER_INFO("SMD Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001312 ++interrupt_stats[SMD_MODEM].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001313 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001314 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001315 return IRQ_HANDLED;
1316}
1317
1318static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
1319{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001320 SMx_POWER_INFO("SMD Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001321 ++interrupt_stats[SMD_Q6].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001322 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001323 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001324 return IRQ_HANDLED;
1325}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001326
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001327static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
1328{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001329 SMx_POWER_INFO("SMD Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001330 ++interrupt_stats[SMD_DSPS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001331 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1332 handle_smd_irq_closing_list();
1333 return IRQ_HANDLED;
1334}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001335
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001336static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1337{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001338 SMx_POWER_INFO("SMD Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001339 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001340 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1341 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001342 return IRQ_HANDLED;
1343}
1344
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001345static irqreturn_t smd_rpm_irq_handler(int irq, void *data)
1346{
1347 SMx_POWER_INFO("SMD Int RPM->Apps\n");
1348 ++interrupt_stats[SMD_RPM].smd_in_count;
1349 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
1350 handle_smd_irq_closing_list();
1351 return IRQ_HANDLED;
1352}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001353
1354static void smd_fake_irq_handler(unsigned long arg)
1355{
Brian Swetland37521a32009-07-01 18:30:47 -07001356 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1357 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001358 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1359 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001360 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001361 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001362}
1363
1364static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1365
Brian Swetland37521a32009-07-01 18:30:47 -07001366static inline int smd_need_int(struct smd_channel *ch)
1367{
1368 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001369 if (ch->half_ch->get_fHEAD(ch->recv) ||
1370 ch->half_ch->get_fTAIL(ch->recv) ||
1371 ch->half_ch->get_fSTATE(ch->recv))
Brian Swetland37521a32009-07-01 18:30:47 -07001372 return 1;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001373 if (ch->half_ch->get_state(ch->recv) != ch->last_state)
Brian Swetland37521a32009-07-01 18:30:47 -07001374 return 1;
1375 }
1376 return 0;
1377}
1378
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001379void smd_sleep_exit(void)
1380{
1381 unsigned long flags;
1382 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001383 int need_int = 0;
1384
1385 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001386 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1387 if (smd_need_int(ch)) {
1388 need_int = 1;
1389 break;
1390 }
1391 }
1392 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1393 if (smd_need_int(ch)) {
1394 need_int = 1;
1395 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001396 }
1397 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001398 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1399 if (smd_need_int(ch)) {
1400 need_int = 1;
1401 break;
1402 }
1403 }
1404 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1405 if (smd_need_int(ch)) {
1406 need_int = 1;
1407 break;
1408 }
1409 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001410 spin_unlock_irqrestore(&smd_lock, flags);
1411 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001412
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001413 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001414 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001415 tasklet_schedule(&smd_fake_irq_tasklet);
1416 }
1417}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001418EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001419
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001420static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001421{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001422 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1423 return 0;
1424 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001425 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001426
1427 /* for cases where xfer type is 0 */
1428 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001429 return 0;
1430
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001431 /* for cases where xfer type is 0 */
1432 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1433 return 0;
1434
1435 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001436 return 1;
1437 else
1438 return 0;
1439}
1440
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001441static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1442 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001443{
1444 void *ptr;
1445 const unsigned char *buf = _data;
1446 unsigned xfer;
1447 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001448 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001449
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001450 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001451 if (len < 0)
1452 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001453 else if (len == 0)
1454 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001455
1456 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001457 if (!ch_is_open(ch)) {
1458 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001459 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001460 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001461 if (xfer > len)
1462 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001463 if (user_buf) {
1464 r = copy_from_user(ptr, buf, xfer);
1465 if (r > 0) {
1466 pr_err("%s: "
1467 "copy_from_user could not copy %i "
1468 "bytes.\n",
1469 __func__,
1470 r);
1471 }
1472 } else
1473 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001474 ch_write_done(ch, xfer);
1475 len -= xfer;
1476 buf += xfer;
1477 if (len == 0)
1478 break;
1479 }
1480
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001481 if (orig_len - len)
1482 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001483
1484 return orig_len - len;
1485}
1486
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001487static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1488 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001489{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001490 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001491 unsigned hdr[5];
1492
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001493 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001494 if (len < 0)
1495 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001496 else if (len == 0)
1497 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001498
1499 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1500 return -ENOMEM;
1501
1502 hdr[0] = len;
1503 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1504
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001505
1506 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1507 if (ret < 0 || ret != sizeof(hdr)) {
1508 SMD_DBG("%s failed to write pkt header: "
1509 "%d returned\n", __func__, ret);
1510 return -1;
1511 }
1512
1513
1514 ret = smd_stream_write(ch, _data, len, user_buf);
1515 if (ret < 0 || ret != len) {
1516 SMD_DBG("%s failed to write pkt data: "
1517 "%d returned\n", __func__, ret);
1518 return ret;
1519 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001520
1521 return len;
1522}
1523
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001524static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001525{
1526 int r;
1527
1528 if (len < 0)
1529 return -EINVAL;
1530
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001531 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001532 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001533 if (!read_intr_blocked(ch))
1534 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001535
1536 return r;
1537}
1538
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001539static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001540{
1541 unsigned long flags;
1542 int r;
1543
1544 if (len < 0)
1545 return -EINVAL;
1546
1547 if (len > ch->current_packet)
1548 len = ch->current_packet;
1549
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001550 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001551 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001552 if (!read_intr_blocked(ch))
1553 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001554
1555 spin_lock_irqsave(&smd_lock, flags);
1556 ch->current_packet -= r;
1557 update_packet_state(ch);
1558 spin_unlock_irqrestore(&smd_lock, flags);
1559
1560 return r;
1561}
1562
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001563static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1564 int user_buf)
1565{
1566 int r;
1567
1568 if (len < 0)
1569 return -EINVAL;
1570
1571 if (len > ch->current_packet)
1572 len = ch->current_packet;
1573
1574 r = ch_read(ch, data, len, user_buf);
1575 if (r > 0)
1576 if (!read_intr_blocked(ch))
1577 ch->notify_other_cpu();
1578
1579 ch->current_packet -= r;
1580 update_packet_state(ch);
1581
1582 return r;
1583}
1584
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301585#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001586static int smd_alloc_v2(struct smd_channel *ch)
1587{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001588 void *buffer;
1589 unsigned buffer_sz;
1590
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001591 if (is_word_access_ch(ch->type)) {
1592 struct smd_shared_v2_word_access *shared2;
1593 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1594 sizeof(*shared2));
1595 if (!shared2) {
1596 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1597 return -EINVAL;
1598 }
1599 ch->send = &shared2->ch0;
1600 ch->recv = &shared2->ch1;
1601 } else {
1602 struct smd_shared_v2 *shared2;
1603 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1604 sizeof(*shared2));
1605 if (!shared2) {
1606 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1607 return -EINVAL;
1608 }
1609 ch->send = &shared2->ch0;
1610 ch->recv = &shared2->ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001611 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001612 ch->half_ch = get_half_ch_funcs(ch->type);
1613
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001614 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1615 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301616 SMD_INFO("smem_get_entry failed\n");
1617 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001618 }
1619
1620 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301621 if (buffer_sz & (buffer_sz - 1)) {
1622 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1623 return -EINVAL;
1624 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001625 buffer_sz /= 2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001626 ch->send_data = buffer;
1627 ch->recv_data = buffer + buffer_sz;
1628 ch->fifo_size = buffer_sz;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001629
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001630 return 0;
1631}
1632
1633static int smd_alloc_v1(struct smd_channel *ch)
1634{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301635 return -EINVAL;
1636}
1637
1638#else /* define v1 for older targets */
1639static int smd_alloc_v2(struct smd_channel *ch)
1640{
1641 return -EINVAL;
1642}
1643
1644static int smd_alloc_v1(struct smd_channel *ch)
1645{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001646 struct smd_shared_v1 *shared1;
1647 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1648 if (!shared1) {
1649 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301650 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001651 }
1652 ch->send = &shared1->ch0;
1653 ch->recv = &shared1->ch1;
1654 ch->send_data = shared1->data0;
1655 ch->recv_data = shared1->data1;
1656 ch->fifo_size = SMD_BUF_SIZE;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001657 ch->half_ch = get_half_ch_funcs(ch->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001658 return 0;
1659}
1660
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301661#endif
1662
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001663static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001664{
1665 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001666
1667 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1668 if (ch == 0) {
1669 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001670 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001671 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001672 ch->n = alloc_elm->cid;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001673 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001674
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001675 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001676 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001677 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001678 }
1679
1680 ch->fifo_mask = ch->fifo_size - 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001681
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001682 /* probe_worker guarentees ch->type will be a valid type */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001683 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001684 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001685 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001686 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001687 else if (ch->type == SMD_APPS_DSPS)
1688 ch->notify_other_cpu = notify_dsps_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001689 else if (ch->type == SMD_APPS_WCNSS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001690 ch->notify_other_cpu = notify_wcnss_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001691 else if (ch->type == SMD_APPS_RPM)
1692 ch->notify_other_cpu = notify_rpm_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001693
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001694 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001695 ch->read = smd_packet_read;
1696 ch->write = smd_packet_write;
1697 ch->read_avail = smd_packet_read_avail;
1698 ch->write_avail = smd_packet_write_avail;
1699 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001700 ch->read_from_cb = smd_packet_read_from_cb;
1701 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001702 } else {
1703 ch->read = smd_stream_read;
1704 ch->write = smd_stream_write;
1705 ch->read_avail = smd_stream_read_avail;
1706 ch->write_avail = smd_stream_write_avail;
1707 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001708 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001709 }
1710
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001711 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1712 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001713
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001714 ch->pdev.name = ch->name;
1715 ch->pdev.id = ch->type;
1716
1717 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1718 ch->name, ch->n);
1719
1720 mutex_lock(&smd_creation_mutex);
1721 list_add(&ch->ch_list, &smd_ch_closed_list);
1722 mutex_unlock(&smd_creation_mutex);
1723
1724 platform_device_register(&ch->pdev);
1725 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1726 /* create a platform driver to be used by smd_tty driver
1727 * so that it can access the loopback port
1728 */
1729 loopback_tty_pdev.id = ch->type;
1730 platform_device_register(&loopback_tty_pdev);
1731 }
1732 return 0;
1733}
1734
1735static inline void notify_loopback_smd(void)
1736{
1737 unsigned long flags;
1738 struct smd_channel *ch;
1739
1740 spin_lock_irqsave(&smd_lock, flags);
1741 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1742 ch->notify(ch->priv, SMD_EVENT_DATA);
1743 }
1744 spin_unlock_irqrestore(&smd_lock, flags);
1745}
1746
1747static int smd_alloc_loopback_channel(void)
1748{
1749 static struct smd_half_channel smd_loopback_ctl;
1750 static char smd_loopback_data[SMD_BUF_SIZE];
1751 struct smd_channel *ch;
1752
1753 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1754 if (ch == 0) {
1755 pr_err("%s: out of memory\n", __func__);
1756 return -1;
1757 }
1758 ch->n = SMD_LOOPBACK_CID;
1759
1760 ch->send = &smd_loopback_ctl;
1761 ch->recv = &smd_loopback_ctl;
1762 ch->send_data = smd_loopback_data;
1763 ch->recv_data = smd_loopback_data;
1764 ch->fifo_size = SMD_BUF_SIZE;
1765
1766 ch->fifo_mask = ch->fifo_size - 1;
1767 ch->type = SMD_LOOPBACK_TYPE;
1768 ch->notify_other_cpu = notify_loopback_smd;
1769
1770 ch->read = smd_stream_read;
1771 ch->write = smd_stream_write;
1772 ch->read_avail = smd_stream_read_avail;
1773 ch->write_avail = smd_stream_write_avail;
1774 ch->update_state = update_stream_state;
1775 ch->read_from_cb = smd_stream_read;
1776
1777 memset(ch->name, 0, 20);
1778 memcpy(ch->name, "local_loopback", 14);
1779
1780 ch->pdev.name = ch->name;
1781 ch->pdev.id = ch->type;
1782
1783 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001784
1785 mutex_lock(&smd_creation_mutex);
1786 list_add(&ch->ch_list, &smd_ch_closed_list);
1787 mutex_unlock(&smd_creation_mutex);
1788
1789 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001790 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001791}
1792
1793static void do_nothing_notify(void *priv, unsigned flags)
1794{
1795}
1796
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001797static void finalize_channel_close_fn(struct work_struct *work)
1798{
1799 unsigned long flags;
1800 struct smd_channel *ch;
1801 struct smd_channel *index;
1802
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001803 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001804 spin_lock_irqsave(&smd_lock, flags);
1805 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1806 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001807 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001808 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1809 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001810 }
1811 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001812 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001813}
1814
1815struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001816{
1817 struct smd_channel *ch;
1818
1819 mutex_lock(&smd_creation_mutex);
1820 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001821 if (!strcmp(name, ch->name) &&
1822 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001823 list_del(&ch->ch_list);
1824 mutex_unlock(&smd_creation_mutex);
1825 return ch;
1826 }
1827 }
1828 mutex_unlock(&smd_creation_mutex);
1829
1830 return NULL;
1831}
1832
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001833int smd_named_open_on_edge(const char *name, uint32_t edge,
1834 smd_channel_t **_ch,
1835 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001836{
1837 struct smd_channel *ch;
1838 unsigned long flags;
1839
1840 if (smd_initialized == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001841 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001842 return -ENODEV;
1843 }
1844
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001845 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1846
1847 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001848 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001849 /* check closing list for port */
1850 spin_lock_irqsave(&smd_lock, flags);
1851 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1852 if (!strncmp(name, ch->name, 20) &&
1853 (edge == ch->type)) {
1854 /* channel exists, but is being closed */
1855 spin_unlock_irqrestore(&smd_lock, flags);
1856 return -EAGAIN;
1857 }
1858 }
1859
1860 /* check closing workqueue list for port */
1861 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1862 if (!strncmp(name, ch->name, 20) &&
1863 (edge == ch->type)) {
1864 /* channel exists, but is being closed */
1865 spin_unlock_irqrestore(&smd_lock, flags);
1866 return -EAGAIN;
1867 }
1868 }
1869 spin_unlock_irqrestore(&smd_lock, flags);
1870
1871 /* one final check to handle closing->closed race condition */
1872 ch = smd_get_channel(name, edge);
1873 if (!ch)
1874 return -ENODEV;
1875 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001876
1877 if (notify == 0)
1878 notify = do_nothing_notify;
1879
1880 ch->notify = notify;
1881 ch->current_packet = 0;
1882 ch->last_state = SMD_SS_CLOSED;
1883 ch->priv = priv;
1884
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001885 if (edge == SMD_LOOPBACK_TYPE) {
1886 ch->last_state = SMD_SS_OPENED;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001887 ch->half_ch->set_state(ch->send, SMD_SS_OPENED);
1888 ch->half_ch->set_fDSR(ch->send, 1);
1889 ch->half_ch->set_fCTS(ch->send, 1);
1890 ch->half_ch->set_fCD(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001891 }
1892
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001893 *_ch = ch;
1894
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001895 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1896
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001897 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001898 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001899 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001900 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001901 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001902 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1903 list_add(&ch->ch_list, &smd_ch_list_dsps);
1904 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1905 list_add(&ch->ch_list, &smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001906 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_RPM)
1907 list_add(&ch->ch_list, &smd_ch_list_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001908 else
1909 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001910
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001911 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1912
1913 if (edge != SMD_LOOPBACK_TYPE)
1914 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1915
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001916 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001917
1918 return 0;
1919}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001920EXPORT_SYMBOL(smd_named_open_on_edge);
1921
1922
1923int smd_open(const char *name, smd_channel_t **_ch,
1924 void *priv, void (*notify)(void *, unsigned))
1925{
1926 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
1927 notify);
1928}
1929EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001930
1931int smd_close(smd_channel_t *ch)
1932{
1933 unsigned long flags;
1934
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001935 if (ch == 0)
1936 return -1;
1937
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001938 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001939
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001940 spin_lock_irqsave(&smd_lock, flags);
1941 list_del(&ch->ch_list);
1942 if (ch->n == SMD_LOOPBACK_CID) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001943 ch->half_ch->set_fDSR(ch->send, 0);
1944 ch->half_ch->set_fCTS(ch->send, 0);
1945 ch->half_ch->set_fCD(ch->send, 0);
1946 ch->half_ch->set_state(ch->send, SMD_SS_CLOSED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001947 } else
1948 ch_set_state(ch, SMD_SS_CLOSED);
1949
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001950 if (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001951 list_add(&ch->ch_list, &smd_ch_closing_list);
1952 spin_unlock_irqrestore(&smd_lock, flags);
1953 } else {
1954 spin_unlock_irqrestore(&smd_lock, flags);
1955 ch->notify = do_nothing_notify;
1956 mutex_lock(&smd_creation_mutex);
1957 list_add(&ch->ch_list, &smd_ch_closed_list);
1958 mutex_unlock(&smd_creation_mutex);
1959 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001960
1961 return 0;
1962}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001963EXPORT_SYMBOL(smd_close);
1964
1965int smd_write_start(smd_channel_t *ch, int len)
1966{
1967 int ret;
1968 unsigned hdr[5];
1969
1970 if (!ch) {
1971 pr_err("%s: Invalid channel specified\n", __func__);
1972 return -ENODEV;
1973 }
1974 if (!ch->is_pkt_ch) {
1975 pr_err("%s: non-packet channel specified\n", __func__);
1976 return -EACCES;
1977 }
1978 if (len < 1) {
1979 pr_err("%s: invalid length: %d\n", __func__, len);
1980 return -EINVAL;
1981 }
1982
1983 if (ch->pending_pkt_sz) {
1984 pr_err("%s: packet of size: %d in progress\n", __func__,
1985 ch->pending_pkt_sz);
1986 return -EBUSY;
1987 }
1988 ch->pending_pkt_sz = len;
1989
1990 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
1991 ch->pending_pkt_sz = 0;
1992 SMD_DBG("%s: no space to write packet header\n", __func__);
1993 return -EAGAIN;
1994 }
1995
1996 hdr[0] = len;
1997 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1998
1999
2000 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
2001 if (ret < 0 || ret != sizeof(hdr)) {
2002 ch->pending_pkt_sz = 0;
2003 pr_err("%s: packet header failed to write\n", __func__);
2004 return -EPERM;
2005 }
2006 return 0;
2007}
2008EXPORT_SYMBOL(smd_write_start);
2009
2010int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
2011{
2012 int bytes_written;
2013
2014 if (!ch) {
2015 pr_err("%s: Invalid channel specified\n", __func__);
2016 return -ENODEV;
2017 }
2018 if (len < 1) {
2019 pr_err("%s: invalid length: %d\n", __func__, len);
2020 return -EINVAL;
2021 }
2022
2023 if (!ch->pending_pkt_sz) {
2024 pr_err("%s: no transaction in progress\n", __func__);
2025 return -ENOEXEC;
2026 }
2027 if (ch->pending_pkt_sz - len < 0) {
2028 pr_err("%s: segment of size: %d will make packet go over "
2029 "length\n", __func__, len);
2030 return -EINVAL;
2031 }
2032
2033 bytes_written = smd_stream_write(ch, data, len, user_buf);
2034
2035 ch->pending_pkt_sz -= bytes_written;
2036
2037 return bytes_written;
2038}
2039EXPORT_SYMBOL(smd_write_segment);
2040
2041int smd_write_end(smd_channel_t *ch)
2042{
2043
2044 if (!ch) {
2045 pr_err("%s: Invalid channel specified\n", __func__);
2046 return -ENODEV;
2047 }
2048 if (ch->pending_pkt_sz) {
2049 pr_err("%s: current packet not completely written\n", __func__);
2050 return -E2BIG;
2051 }
2052
2053 return 0;
2054}
2055EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002056
2057int smd_read(smd_channel_t *ch, void *data, int len)
2058{
Jack Pham1b236d12012-03-19 15:27:18 -07002059 if (!ch) {
2060 pr_err("%s: Invalid channel specified\n", __func__);
2061 return -ENODEV;
2062 }
2063
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002064 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002065}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002066EXPORT_SYMBOL(smd_read);
2067
2068int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
2069{
Jack Pham1b236d12012-03-19 15:27:18 -07002070 if (!ch) {
2071 pr_err("%s: Invalid channel specified\n", __func__);
2072 return -ENODEV;
2073 }
2074
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002075 return ch->read(ch, data, len, 1);
2076}
2077EXPORT_SYMBOL(smd_read_user_buffer);
2078
2079int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
2080{
Jack Pham1b236d12012-03-19 15:27:18 -07002081 if (!ch) {
2082 pr_err("%s: Invalid channel specified\n", __func__);
2083 return -ENODEV;
2084 }
2085
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002086 return ch->read_from_cb(ch, data, len, 0);
2087}
2088EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002089
2090int smd_write(smd_channel_t *ch, const void *data, int len)
2091{
Jack Pham1b236d12012-03-19 15:27:18 -07002092 if (!ch) {
2093 pr_err("%s: Invalid channel specified\n", __func__);
2094 return -ENODEV;
2095 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002096
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002097 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002098}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002099EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002100
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002101int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08002102{
Jack Pham1b236d12012-03-19 15:27:18 -07002103 if (!ch) {
2104 pr_err("%s: Invalid channel specified\n", __func__);
2105 return -ENODEV;
2106 }
2107
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002108 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002109}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002110EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002111
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002112int smd_read_avail(smd_channel_t *ch)
2113{
Jack Pham1b236d12012-03-19 15:27:18 -07002114 if (!ch) {
2115 pr_err("%s: Invalid channel specified\n", __func__);
2116 return -ENODEV;
2117 }
2118
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002119 return ch->read_avail(ch);
2120}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002121EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002122
2123int smd_write_avail(smd_channel_t *ch)
2124{
Jack Pham1b236d12012-03-19 15:27:18 -07002125 if (!ch) {
2126 pr_err("%s: Invalid channel specified\n", __func__);
2127 return -ENODEV;
2128 }
2129
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002130 return ch->write_avail(ch);
2131}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002132EXPORT_SYMBOL(smd_write_avail);
2133
2134void smd_enable_read_intr(smd_channel_t *ch)
2135{
2136 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002137 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002138}
2139EXPORT_SYMBOL(smd_enable_read_intr);
2140
2141void smd_disable_read_intr(smd_channel_t *ch)
2142{
2143 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002144 ch->half_ch->set_fBLOCKREADINTR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002145}
2146EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002147
Eric Holmbergdeace152012-07-25 12:17:11 -06002148/**
2149 * Enable/disable receive interrupts for the remote processor used by a
2150 * particular channel.
2151 * @ch: open channel handle to use for the edge
2152 * @mask: 1 = mask interrupts; 0 = unmask interrupts
2153 * @returns: 0 for success; < 0 for failure
2154 *
2155 * Note that this enables/disables all interrupts from the remote subsystem for
2156 * all channels. As such, it should be used with care and only for specific
2157 * use cases such as power-collapse sequencing.
2158 */
2159int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask)
2160{
2161 struct irq_chip *irq_chip;
2162 struct irq_data *irq_data;
2163 struct interrupt_config_item *int_cfg;
2164
2165 if (!ch)
2166 return -EINVAL;
2167
2168 if (ch->type >= ARRAY_SIZE(edge_to_pids))
2169 return -ENODEV;
2170
2171 int_cfg = &private_intr_config[edge_to_pids[ch->type].remote_pid].smd;
2172
2173 if (int_cfg->irq_id < 0)
2174 return -ENODEV;
2175
2176 irq_chip = irq_get_chip(int_cfg->irq_id);
2177 if (!irq_chip)
2178 return -ENODEV;
2179
2180 irq_data = irq_get_irq_data(int_cfg->irq_id);
2181 if (!irq_data)
2182 return -ENODEV;
2183
2184 if (mask) {
2185 SMx_POWER_INFO("SMD Masking interrupts from %s\n",
2186 edge_to_pids[ch->type].subsys_name);
2187 irq_chip->irq_mask(irq_data);
2188 } else {
2189 SMx_POWER_INFO("SMD Unmasking interrupts from %s\n",
2190 edge_to_pids[ch->type].subsys_name);
2191 irq_chip->irq_unmask(irq_data);
2192 }
2193
2194 return 0;
2195}
2196EXPORT_SYMBOL(smd_mask_receive_interrupt);
2197
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002198int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2199{
2200 return -1;
2201}
2202
2203int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2204{
2205 return -1;
2206}
2207
2208int smd_cur_packet_size(smd_channel_t *ch)
2209{
Jack Pham1b236d12012-03-19 15:27:18 -07002210 if (!ch) {
2211 pr_err("%s: Invalid channel specified\n", __func__);
2212 return -ENODEV;
2213 }
2214
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002215 return ch->current_packet;
2216}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002217EXPORT_SYMBOL(smd_cur_packet_size);
2218
2219int smd_tiocmget(smd_channel_t *ch)
2220{
Jack Pham1b236d12012-03-19 15:27:18 -07002221 if (!ch) {
2222 pr_err("%s: Invalid channel specified\n", __func__);
2223 return -ENODEV;
2224 }
2225
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002226 return (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) |
2227 (ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) |
2228 (ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) |
2229 (ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) |
2230 (ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) |
2231 (ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002232}
2233EXPORT_SYMBOL(smd_tiocmget);
2234
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002235/* this api will be called while holding smd_lock */
2236int
2237smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002238{
Jack Pham1b236d12012-03-19 15:27:18 -07002239 if (!ch) {
2240 pr_err("%s: Invalid channel specified\n", __func__);
2241 return -ENODEV;
2242 }
2243
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002244 if (set & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002245 ch->half_ch->set_fDSR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002246
2247 if (set & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002248 ch->half_ch->set_fCTS(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002249
2250 if (clear & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002251 ch->half_ch->set_fDSR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002252
2253 if (clear & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002254 ch->half_ch->set_fCTS(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002255
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002256 ch->half_ch->set_fSTATE(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002257 barrier();
2258 ch->notify_other_cpu();
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002259
2260 return 0;
2261}
2262EXPORT_SYMBOL(smd_tiocmset_from_cb);
2263
2264int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2265{
2266 unsigned long flags;
2267
Jack Pham1b236d12012-03-19 15:27:18 -07002268 if (!ch) {
2269 pr_err("%s: Invalid channel specified\n", __func__);
2270 return -ENODEV;
2271 }
2272
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002273 spin_lock_irqsave(&smd_lock, flags);
2274 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002275 spin_unlock_irqrestore(&smd_lock, flags);
2276
2277 return 0;
2278}
2279EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002280
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002281int smd_is_pkt_avail(smd_channel_t *ch)
2282{
Jeff Hugoa8549f12012-08-13 20:36:18 -06002283 unsigned long flags;
2284
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002285 if (!ch || !ch->is_pkt_ch)
2286 return -EINVAL;
2287
2288 if (ch->current_packet)
2289 return 1;
2290
Jeff Hugoa8549f12012-08-13 20:36:18 -06002291 spin_lock_irqsave(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002292 update_packet_state(ch);
Jeff Hugoa8549f12012-08-13 20:36:18 -06002293 spin_unlock_irqrestore(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002294
2295 return ch->current_packet ? 1 : 0;
2296}
2297EXPORT_SYMBOL(smd_is_pkt_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002298
2299
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002300/* -------------------------------------------------------------------------- */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002301
Jeff Hugobdc734d2012-03-26 16:05:39 -06002302/*
2303 * Shared Memory Range Check
2304 *
2305 * Takes a physical address and an offset and checks if the resulting physical
2306 * address would fit into one of the aux smem regions. If so, returns the
2307 * corresponding virtual address. Otherwise returns NULL. Expects the array
2308 * of smem regions to be in ascending physical address order.
2309 *
2310 * @base: physical base address to check
2311 * @offset: offset from the base to get the final address
2312 */
2313static void *smem_range_check(void *base, unsigned offset)
2314{
2315 int i;
2316 void *phys_addr;
2317 unsigned size;
2318
2319 for (i = 0; i < num_smem_areas; ++i) {
2320 phys_addr = smem_areas[i].phys_addr;
2321 size = smem_areas[i].size;
2322 if (base < phys_addr)
2323 return NULL;
2324 if (base > phys_addr + size)
2325 continue;
2326 if (base >= phys_addr && base + offset < phys_addr + size)
2327 return smem_areas[i].virt_addr + offset;
2328 }
2329
2330 return NULL;
2331}
2332
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002333/* smem_alloc returns the pointer to smem item if it is already allocated.
2334 * Otherwise, it returns NULL.
2335 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002336void *smem_alloc(unsigned id, unsigned size)
2337{
2338 return smem_find(id, size);
2339}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002340EXPORT_SYMBOL(smem_alloc);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002341
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002342/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
2343 * it allocates it and then returns the pointer to it.
2344 */
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302345void *smem_alloc2(unsigned id, unsigned size_in)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002346{
2347 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2348 struct smem_heap_entry *toc = shared->heap_toc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002349 unsigned long flags;
2350 void *ret = NULL;
2351
2352 if (!shared->heap_info.initialized) {
2353 pr_err("%s: smem heap info not initialized\n", __func__);
2354 return NULL;
2355 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002356
2357 if (id >= SMEM_NUM_ITEMS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002358 return NULL;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002359
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002360 size_in = ALIGN(size_in, 8);
2361 remote_spin_lock_irqsave(&remote_spinlock, flags);
2362 if (toc[id].allocated) {
2363 SMD_DBG("%s: %u already allocated\n", __func__, id);
2364 if (size_in != toc[id].size)
2365 pr_err("%s: wrong size %u (expected %u)\n",
2366 __func__, toc[id].size, size_in);
2367 else
2368 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2369 } else if (id > SMEM_FIXED_ITEM_LAST) {
2370 SMD_DBG("%s: allocating %u\n", __func__, id);
2371 if (shared->heap_info.heap_remaining >= size_in) {
2372 toc[id].offset = shared->heap_info.free_offset;
2373 toc[id].size = size_in;
2374 wmb();
2375 toc[id].allocated = 1;
2376
2377 shared->heap_info.free_offset += size_in;
2378 shared->heap_info.heap_remaining -= size_in;
2379 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2380 } else
2381 pr_err("%s: not enough memory %u (required %u)\n",
2382 __func__, shared->heap_info.heap_remaining,
2383 size_in);
2384 }
2385 wmb();
2386 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
2387 return ret;
2388}
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302389EXPORT_SYMBOL(smem_alloc2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002390
2391void *smem_get_entry(unsigned id, unsigned *size)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002392{
2393 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2394 struct smem_heap_entry *toc = shared->heap_toc;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302395 int use_spinlocks = spinlocks_initialized;
2396 void *ret = 0;
2397 unsigned long flags = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002398
2399 if (id >= SMEM_NUM_ITEMS)
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302400 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002401
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302402 if (use_spinlocks)
2403 remote_spin_lock_irqsave(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002404 /* toc is in device memory and cannot be speculatively accessed */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002405 if (toc[id].allocated) {
2406 *size = toc[id].size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002407 barrier();
Jeff Hugobdc734d2012-03-26 16:05:39 -06002408 if (!(toc[id].reserved & BASE_ADDR_MASK))
2409 ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
2410 else
2411 ret = smem_range_check(
2412 (void *)(toc[id].reserved & BASE_ADDR_MASK),
2413 toc[id].offset);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002414 } else {
2415 *size = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002416 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302417 if (use_spinlocks)
2418 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002419
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302420 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002421}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002422EXPORT_SYMBOL(smem_get_entry);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002423
2424void *smem_find(unsigned id, unsigned size_in)
2425{
2426 unsigned size;
2427 void *ptr;
2428
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002429 ptr = smem_get_entry(id, &size);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002430 if (!ptr)
2431 return 0;
2432
2433 size_in = ALIGN(size_in, 8);
2434 if (size_in != size) {
2435 pr_err("smem_find(%d, %d): wrong size %d\n",
2436 id, size_in, size);
2437 return 0;
2438 }
2439
2440 return ptr;
2441}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002442EXPORT_SYMBOL(smem_find);
2443
2444static int smsm_cb_init(void)
2445{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002446 struct smsm_state_info *state_info;
2447 int n;
2448 int ret = 0;
2449
2450 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2451 GFP_KERNEL);
2452
2453 if (!smsm_states) {
2454 pr_err("%s: SMSM init failed\n", __func__);
2455 return -ENOMEM;
2456 }
2457
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002458 smsm_cb_wq = create_singlethread_workqueue("smsm_cb_wq");
2459 if (!smsm_cb_wq) {
2460 pr_err("%s: smsm_cb_wq creation failed\n", __func__);
2461 kfree(smsm_states);
2462 return -EFAULT;
2463 }
2464
Eric Holmbergc8002902011-09-16 13:55:57 -06002465 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002466 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2467 state_info = &smsm_states[n];
2468 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
Eric Holmberge8a39322012-04-03 15:14:02 -06002469 state_info->intr_mask_set = 0x0;
2470 state_info->intr_mask_clear = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002471 INIT_LIST_HEAD(&state_info->callbacks);
2472 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002473 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002474
2475 return ret;
2476}
2477
2478static int smsm_init(void)
2479{
2480 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2481 int i;
2482 struct smsm_size_info_type *smsm_size_info;
2483
2484 i = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_ALLOC);
2485 if (i) {
2486 pr_err("%s: remote spinlock init failed %d\n", __func__, i);
2487 return i;
2488 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302489 spinlocks_initialized = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002490
2491 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
2492 sizeof(struct smsm_size_info_type));
2493 if (smsm_size_info) {
2494 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2495 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2496 }
2497
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002498 i = kfifo_alloc(&smsm_snapshot_fifo,
2499 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2500 GFP_KERNEL);
2501 if (i) {
2502 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2503 return i;
2504 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002505 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2506 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002507
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002508 if (!smsm_info.state) {
2509 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
2510 SMSM_NUM_ENTRIES *
2511 sizeof(uint32_t));
2512
2513 if (smsm_info.state) {
2514 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2515 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2516 __raw_writel(0, \
2517 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2518 }
2519 }
2520
2521 if (!smsm_info.intr_mask) {
2522 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
2523 SMSM_NUM_ENTRIES *
2524 SMSM_NUM_HOSTS *
2525 sizeof(uint32_t));
2526
Eric Holmberge8a39322012-04-03 15:14:02 -06002527 if (smsm_info.intr_mask) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002528 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
Eric Holmberge8a39322012-04-03 15:14:02 -06002529 __raw_writel(0x0,
2530 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2531
2532 /* Configure legacy modem bits */
2533 __raw_writel(LEGACY_MODEM_SMSM_MASK,
2534 SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
2535 SMSM_APPS));
2536 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002537 }
2538
2539 if (!smsm_info.intr_mux)
2540 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
2541 SMSM_NUM_INTR_MUX *
2542 sizeof(uint32_t));
2543
2544 i = smsm_cb_init();
2545 if (i)
2546 return i;
2547
2548 wmb();
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06002549 smsm_driver_state_notify(SMSM_INIT, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002550 return 0;
2551}
2552
2553void smsm_reset_modem(unsigned mode)
2554{
2555 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2556 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2557 } else if (mode == SMSM_MODEM_WAIT) {
2558 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2559 } else { /* reset_mode is SMSM_RESET or default */
2560 mode = SMSM_RESET;
2561 }
2562
2563 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2564}
2565EXPORT_SYMBOL(smsm_reset_modem);
2566
2567void smsm_reset_modem_cont(void)
2568{
2569 unsigned long flags;
2570 uint32_t state;
2571
2572 if (!smsm_info.state)
2573 return;
2574
2575 spin_lock_irqsave(&smem_lock, flags);
2576 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2577 & ~SMSM_MODEM_WAIT;
2578 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2579 wmb();
2580 spin_unlock_irqrestore(&smem_lock, flags);
2581}
2582EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002583
Eric Holmbergda31d042012-03-28 14:01:02 -06002584static void smsm_cb_snapshot(uint32_t use_wakelock)
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002585{
2586 int n;
2587 uint32_t new_state;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002588 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002589 int ret;
2590
2591 ret = kfifo_avail(&smsm_snapshot_fifo);
Eric Holmbergda31d042012-03-28 14:01:02 -06002592 if (ret < SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002593 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2594 return;
2595 }
2596
Eric Holmberg96b55f62012-04-03 19:10:46 -06002597 /*
2598 * To avoid a race condition with notify_smsm_cb_clients_worker, the
2599 * following sequence must be followed:
2600 * 1) increment snapshot count
2601 * 2) insert data into FIFO
2602 *
2603 * Potentially in parallel, the worker:
2604 * a) verifies >= 1 snapshots are in FIFO
2605 * b) processes snapshot
2606 * c) decrements reference count
2607 *
2608 * This order ensures that 1 will always occur before abc.
2609 */
Eric Holmbergda31d042012-03-28 14:01:02 -06002610 if (use_wakelock) {
2611 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2612 if (smsm_snapshot_count == 0) {
2613 SMx_POWER_INFO("SMSM snapshot wake lock\n");
2614 wake_lock(&smsm_snapshot_wakelock);
2615 }
2616 ++smsm_snapshot_count;
2617 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2618 }
Eric Holmberg96b55f62012-04-03 19:10:46 -06002619
2620 /* queue state entries */
2621 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2622 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2623
2624 ret = kfifo_in(&smsm_snapshot_fifo,
2625 &new_state, sizeof(new_state));
2626 if (ret != sizeof(new_state)) {
2627 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2628 goto restore_snapshot_count;
2629 }
2630 }
2631
2632 /* queue wakelock usage flag */
2633 ret = kfifo_in(&smsm_snapshot_fifo,
2634 &use_wakelock, sizeof(use_wakelock));
2635 if (ret != sizeof(use_wakelock)) {
2636 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2637 goto restore_snapshot_count;
2638 }
2639
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002640 queue_work(smsm_cb_wq, &smsm_cb_work);
Eric Holmberg96b55f62012-04-03 19:10:46 -06002641 return;
2642
2643restore_snapshot_count:
2644 if (use_wakelock) {
2645 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2646 if (smsm_snapshot_count) {
2647 --smsm_snapshot_count;
2648 if (smsm_snapshot_count == 0) {
2649 SMx_POWER_INFO("SMSM snapshot wake unlock\n");
2650 wake_unlock(&smsm_snapshot_wakelock);
2651 }
2652 } else {
2653 pr_err("%s: invalid snapshot count\n", __func__);
2654 }
2655 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2656 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002657}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002658
2659static irqreturn_t smsm_irq_handler(int irq, void *data)
2660{
2661 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002662
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002663 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002664 uint32_t mux_val;
2665 static uint32_t prev_smem_q6_apps_smsm;
2666
2667 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2668 mux_val = __raw_readl(
2669 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2670 if (mux_val != prev_smem_q6_apps_smsm)
2671 prev_smem_q6_apps_smsm = mux_val;
2672 }
2673
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002674 spin_lock_irqsave(&smem_lock, flags);
Eric Holmbergda31d042012-03-28 14:01:02 -06002675 smsm_cb_snapshot(1);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002676 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002677 return IRQ_HANDLED;
2678 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002679
2680 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002681 if (!smsm_info.state) {
2682 SMSM_INFO("<SM NO STATE>\n");
2683 } else {
2684 unsigned old_apps, apps;
2685 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002686
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002687 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002688
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002689 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2690 if (apps & SMSM_RESET) {
2691 /* If we get an interrupt and the apps SMSM_RESET
2692 bit is already set, the modem is acking the
2693 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002694 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302695 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002696 /* Issue a fake irq to handle any
2697 * smd state changes during reset
2698 */
2699 smd_fake_irq_handler(0);
Daniel Walker79848a22010-03-16 15:20:07 -07002700
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002701 /* queue modem restart notify chain */
2702 modem_queue_start_reset_notify();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002703
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002704 } else if (modm & SMSM_RESET) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002705 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
Ram Somani8b9589f2012-04-03 12:07:18 +05302706 if (!disable_smsm_reset_handshake) {
2707 apps |= SMSM_RESET;
2708 flush_cache_all();
2709 outer_flush_all();
2710 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002711 modem_queue_start_reset_notify();
2712
2713 } else if (modm & SMSM_INIT) {
2714 if (!(apps & SMSM_INIT)) {
2715 apps |= SMSM_INIT;
2716 modem_queue_smsm_init_notify();
2717 }
2718
2719 if (modm & SMSM_SMDINIT)
2720 apps |= SMSM_SMDINIT;
2721 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2722 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2723 apps |= SMSM_RUN;
2724 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2725 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2726 modem_queue_start_reset_notify();
2727 }
2728
2729 if (old_apps != apps) {
2730 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2731 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2732 do_smd_probe();
2733 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2734 }
2735
Eric Holmbergda31d042012-03-28 14:01:02 -06002736 smsm_cb_snapshot(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002737 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002738 spin_unlock_irqrestore(&smem_lock, flags);
2739 return IRQ_HANDLED;
2740}
2741
Eric Holmberg98c6c642012-02-24 11:29:35 -07002742static irqreturn_t smsm_modem_irq_handler(int irq, void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002743{
Eric Holmberg98c6c642012-02-24 11:29:35 -07002744 SMx_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002745 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002746 return smsm_irq_handler(irq, data);
2747}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002748
Eric Holmberg98c6c642012-02-24 11:29:35 -07002749static irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
2750{
2751 SMx_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002752 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002753 return smsm_irq_handler(irq, data);
2754}
2755
2756static irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
2757{
2758 SMx_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002759 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002760 return smsm_irq_handler(irq, data);
2761}
2762
2763static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
2764{
2765 SMx_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002766 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002767 return smsm_irq_handler(irq, data);
2768}
2769
Eric Holmberge8a39322012-04-03 15:14:02 -06002770/*
2771 * Changes the global interrupt mask. The set and clear masks are re-applied
2772 * every time the global interrupt mask is updated for callback registration
2773 * and de-registration.
2774 *
2775 * The clear mask is applied first, so if a bit is set to 1 in both the clear
2776 * mask and the set mask, the result will be that the interrupt is set.
2777 *
2778 * @smsm_entry SMSM entry to change
2779 * @clear_mask 1 = clear bit, 0 = no-op
2780 * @set_mask 1 = set bit, 0 = no-op
2781 *
2782 * @returns 0 for success, < 0 for error
2783 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002784int smsm_change_intr_mask(uint32_t smsm_entry,
2785 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002786{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002787 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002788 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002789
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002790 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2791 pr_err("smsm_change_state: Invalid entry %d\n",
2792 smsm_entry);
2793 return -EINVAL;
2794 }
2795
2796 if (!smsm_info.intr_mask) {
2797 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002798 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002799 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002800
2801 spin_lock_irqsave(&smem_lock, flags);
Eric Holmberge8a39322012-04-03 15:14:02 -06002802 smsm_states[smsm_entry].intr_mask_clear = clear_mask;
2803 smsm_states[smsm_entry].intr_mask_set = set_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002804
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002805 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2806 new_mask = (old_mask & ~clear_mask) | set_mask;
2807 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002808
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002809 wmb();
2810 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002811
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002812 return 0;
2813}
2814EXPORT_SYMBOL(smsm_change_intr_mask);
2815
2816int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2817{
2818 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2819 pr_err("smsm_change_state: Invalid entry %d\n",
2820 smsm_entry);
2821 return -EINVAL;
2822 }
2823
2824 if (!smsm_info.intr_mask) {
2825 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2826 return -EIO;
2827 }
2828
2829 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2830 return 0;
2831}
2832EXPORT_SYMBOL(smsm_get_intr_mask);
2833
2834int smsm_change_state(uint32_t smsm_entry,
2835 uint32_t clear_mask, uint32_t set_mask)
2836{
2837 unsigned long flags;
2838 uint32_t old_state, new_state;
2839
2840 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2841 pr_err("smsm_change_state: Invalid entry %d",
2842 smsm_entry);
2843 return -EINVAL;
2844 }
2845
2846 if (!smsm_info.state) {
2847 pr_err("smsm_change_state <SM NO STATE>\n");
2848 return -EIO;
2849 }
2850 spin_lock_irqsave(&smem_lock, flags);
2851
2852 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2853 new_state = (old_state & ~clear_mask) | set_mask;
2854 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2855 SMSM_DBG("smsm_change_state %x\n", new_state);
2856 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002857
2858 spin_unlock_irqrestore(&smem_lock, flags);
2859
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002860 return 0;
2861}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002862EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002863
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002864uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002865{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002866 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002867
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002868 /* needs interface change to return error code */
2869 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2870 pr_err("smsm_change_state: Invalid entry %d",
2871 smsm_entry);
2872 return 0;
2873 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002874
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002875 if (!smsm_info.state) {
2876 pr_err("smsm_get_state <SM NO STATE>\n");
2877 } else {
2878 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2879 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002880
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002881 return rv;
2882}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002883EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002884
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002885/**
2886 * Performs SMSM callback client notifiction.
2887 */
2888void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002889{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002890 struct smsm_state_cb_info *cb_info;
2891 struct smsm_state_info *state_info;
2892 int n;
2893 uint32_t new_state;
2894 uint32_t state_changes;
Eric Holmbergda31d042012-03-28 14:01:02 -06002895 uint32_t use_wakelock;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002896 int ret;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002897 unsigned long flags;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002898
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002899 if (!smd_initialized)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002900 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002901
Eric Holmbergda31d042012-03-28 14:01:02 -06002902 while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002903 mutex_lock(&smsm_lock);
2904 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2905 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002906
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002907 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2908 sizeof(new_state));
2909 if (ret != sizeof(new_state)) {
2910 pr_err("%s: snapshot underflow %d\n",
2911 __func__, ret);
2912 mutex_unlock(&smsm_lock);
2913 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002914 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002915
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002916 state_changes = state_info->last_value ^ new_state;
2917 if (state_changes) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07002918 SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n",
2919 n, state_info->last_value,
2920 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002921 list_for_each_entry(cb_info,
2922 &state_info->callbacks, cb_list) {
2923
2924 if (cb_info->mask & state_changes)
2925 cb_info->notify(cb_info->data,
2926 state_info->last_value,
2927 new_state);
2928 }
2929 state_info->last_value = new_state;
2930 }
2931 }
Eric Holmberg59a9f942012-03-19 10:04:22 -06002932
Eric Holmbergda31d042012-03-28 14:01:02 -06002933 /* read wakelock flag */
2934 ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock,
2935 sizeof(use_wakelock));
2936 if (ret != sizeof(use_wakelock)) {
2937 pr_err("%s: snapshot underflow %d\n",
2938 __func__, ret);
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002939 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002940 return;
Eric Holmberg59a9f942012-03-19 10:04:22 -06002941 }
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002942 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002943
2944 if (use_wakelock) {
2945 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2946 if (smsm_snapshot_count) {
2947 --smsm_snapshot_count;
2948 if (smsm_snapshot_count == 0) {
2949 SMx_POWER_INFO("SMSM snapshot"
2950 " wake unlock\n");
2951 wake_unlock(&smsm_snapshot_wakelock);
2952 }
2953 } else {
2954 pr_err("%s: invalid snapshot count\n",
2955 __func__);
2956 }
2957 spin_unlock_irqrestore(&smsm_snapshot_count_lock,
2958 flags);
2959 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002960 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002961}
2962
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002963
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002964/**
2965 * Registers callback for SMSM state notifications when the specified
2966 * bits change.
2967 *
2968 * @smsm_entry Processor entry to deregister
2969 * @mask Bits to deregister (if result is 0, callback is removed)
2970 * @notify Notification function to deregister
2971 * @data Opaque data passed in to callback
2972 *
2973 * @returns Status code
2974 * <0 error code
2975 * 0 inserted new entry
2976 * 1 updated mask of existing entry
2977 */
2978int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
2979 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002980{
Eric Holmberge8a39322012-04-03 15:14:02 -06002981 struct smsm_state_info *state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002982 struct smsm_state_cb_info *cb_info;
2983 struct smsm_state_cb_info *cb_found = 0;
Eric Holmberge8a39322012-04-03 15:14:02 -06002984 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002985 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002986
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002987 if (smsm_entry >= SMSM_NUM_ENTRIES)
2988 return -EINVAL;
2989
Eric Holmbergc8002902011-09-16 13:55:57 -06002990 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002991
2992 if (!smsm_states) {
2993 /* smsm not yet initialized */
2994 ret = -ENODEV;
2995 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002996 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002997
Eric Holmberge8a39322012-04-03 15:14:02 -06002998 state = &smsm_states[smsm_entry];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002999 list_for_each_entry(cb_info,
Eric Holmberge8a39322012-04-03 15:14:02 -06003000 &state->callbacks, cb_list) {
3001 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003002 (cb_info->data == data)) {
3003 cb_info->mask |= mask;
3004 cb_found = cb_info;
3005 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003006 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003007 new_mask |= cb_info->mask;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003008 }
3009
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003010 if (!cb_found) {
3011 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
3012 GFP_ATOMIC);
3013 if (!cb_info) {
3014 ret = -ENOMEM;
3015 goto cleanup;
3016 }
3017
3018 cb_info->mask = mask;
3019 cb_info->notify = notify;
3020 cb_info->data = data;
3021 INIT_LIST_HEAD(&cb_info->cb_list);
3022 list_add_tail(&cb_info->cb_list,
Eric Holmberge8a39322012-04-03 15:14:02 -06003023 &state->callbacks);
3024 new_mask |= mask;
3025 }
3026
3027 /* update interrupt notification mask */
3028 if (smsm_entry == SMSM_MODEM_STATE)
3029 new_mask |= LEGACY_MODEM_SMSM_MASK;
3030
3031 if (smsm_info.intr_mask) {
3032 unsigned long flags;
3033
3034 spin_lock_irqsave(&smem_lock, flags);
3035 new_mask = (new_mask & ~state->intr_mask_clear)
3036 | state->intr_mask_set;
3037 __raw_writel(new_mask,
3038 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3039 wmb();
3040 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003041 }
3042
3043cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06003044 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003045 return ret;
3046}
3047EXPORT_SYMBOL(smsm_state_cb_register);
3048
3049
3050/**
3051 * Deregisters for SMSM state notifications for the specified bits.
3052 *
3053 * @smsm_entry Processor entry to deregister
3054 * @mask Bits to deregister (if result is 0, callback is removed)
3055 * @notify Notification function to deregister
3056 * @data Opaque data passed in to callback
3057 *
3058 * @returns Status code
3059 * <0 error code
3060 * 0 not found
3061 * 1 updated mask
3062 * 2 removed callback
3063 */
3064int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
3065 void (*notify)(void *, uint32_t, uint32_t), void *data)
3066{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003067 struct smsm_state_cb_info *cb_info;
Eric Holmberge8a39322012-04-03 15:14:02 -06003068 struct smsm_state_cb_info *cb_tmp;
3069 struct smsm_state_info *state;
3070 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003071 int ret = 0;
3072
3073 if (smsm_entry >= SMSM_NUM_ENTRIES)
3074 return -EINVAL;
3075
Eric Holmbergc8002902011-09-16 13:55:57 -06003076 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003077
3078 if (!smsm_states) {
3079 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06003080 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003081 return -ENODEV;
3082 }
3083
Eric Holmberge8a39322012-04-03 15:14:02 -06003084 state = &smsm_states[smsm_entry];
3085 list_for_each_entry_safe(cb_info, cb_tmp,
3086 &state->callbacks, cb_list) {
3087 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003088 (cb_info->data == data)) {
3089 cb_info->mask &= ~mask;
3090 ret = 1;
3091 if (!cb_info->mask) {
3092 /* no mask bits set, remove callback */
3093 list_del(&cb_info->cb_list);
3094 kfree(cb_info);
3095 ret = 2;
Eric Holmberge8a39322012-04-03 15:14:02 -06003096 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003097 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003098 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003099 new_mask |= cb_info->mask;
3100 }
3101
3102 /* update interrupt notification mask */
3103 if (smsm_entry == SMSM_MODEM_STATE)
3104 new_mask |= LEGACY_MODEM_SMSM_MASK;
3105
3106 if (smsm_info.intr_mask) {
3107 unsigned long flags;
3108
3109 spin_lock_irqsave(&smem_lock, flags);
3110 new_mask = (new_mask & ~state->intr_mask_clear)
3111 | state->intr_mask_set;
3112 __raw_writel(new_mask,
3113 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3114 wmb();
3115 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003116 }
3117
Eric Holmbergc8002902011-09-16 13:55:57 -06003118 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003119 return ret;
3120}
3121EXPORT_SYMBOL(smsm_state_cb_deregister);
3122
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003123int smsm_driver_state_notifier_register(struct notifier_block *nb)
3124{
3125 int ret;
3126 if (!nb)
3127 return -EINVAL;
3128 mutex_lock(&smsm_driver_state_notifier_lock);
3129 ret = raw_notifier_chain_register(&smsm_driver_state_notifier_list, nb);
3130 mutex_unlock(&smsm_driver_state_notifier_lock);
3131 return ret;
3132}
3133EXPORT_SYMBOL(smsm_driver_state_notifier_register);
3134
3135int smsm_driver_state_notifier_unregister(struct notifier_block *nb)
3136{
3137 int ret;
3138 if (!nb)
3139 return -EINVAL;
3140 mutex_lock(&smsm_driver_state_notifier_lock);
3141 ret = raw_notifier_chain_unregister(&smsm_driver_state_notifier_list,
3142 nb);
3143 mutex_unlock(&smsm_driver_state_notifier_lock);
3144 return ret;
3145}
3146EXPORT_SYMBOL(smsm_driver_state_notifier_unregister);
3147
3148static void smsm_driver_state_notify(uint32_t state, void *data)
3149{
3150 mutex_lock(&smsm_driver_state_notifier_lock);
3151 raw_notifier_call_chain(&smsm_driver_state_notifier_list,
3152 state, data);
3153 mutex_unlock(&smsm_driver_state_notifier_lock);
3154}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003155
3156int smd_core_init(void)
3157{
3158 int r;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003159 unsigned long flags = IRQF_TRIGGER_RISING;
3160 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003161
Brian Swetland37521a32009-07-01 18:30:47 -07003162 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003163 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003164 if (r < 0)
3165 return r;
3166 r = enable_irq_wake(INT_A9_M2A_0);
3167 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003168 pr_err("smd_core_init: "
3169 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003170
Eric Holmberg98c6c642012-02-24 11:29:35 -07003171 r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003172 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003173 if (r < 0) {
3174 free_irq(INT_A9_M2A_0, 0);
3175 return r;
3176 }
3177 r = enable_irq_wake(INT_A9_M2A_5);
3178 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003179 pr_err("smd_core_init: "
3180 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003181
Brian Swetland37521a32009-07-01 18:30:47 -07003182#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003183#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
3184 flags |= IRQF_SHARED;
3185#endif
Brian Swetland37521a32009-07-01 18:30:47 -07003186 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003187 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07003188 if (r < 0) {
3189 free_irq(INT_A9_M2A_0, 0);
3190 free_irq(INT_A9_M2A_5, 0);
3191 return r;
3192 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003193
Eric Holmberg98c6c642012-02-24 11:29:35 -07003194 r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler,
3195 flags, "smsm_dev", smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003196 if (r < 0) {
3197 free_irq(INT_A9_M2A_0, 0);
3198 free_irq(INT_A9_M2A_5, 0);
3199 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
3200 return r;
3201 }
3202
3203 r = enable_irq_wake(INT_ADSP_A11);
3204 if (r < 0)
3205 pr_err("smd_core_init: "
3206 "enable_irq_wake failed for INT_ADSP_A11\n");
3207
3208#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
3209 r = enable_irq_wake(INT_ADSP_A11_SMSM);
3210 if (r < 0)
3211 pr_err("smd_core_init: enable_irq_wake "
3212 "failed for INT_ADSP_A11_SMSM\n");
3213#endif
3214 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07003215#endif
3216
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003217#if defined(CONFIG_DSPS)
3218 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
3219 flags, "smd_dev", smd_dsps_irq_handler);
3220 if (r < 0) {
3221 free_irq(INT_A9_M2A_0, 0);
3222 free_irq(INT_A9_M2A_5, 0);
3223 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003224 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003225 return r;
3226 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003227
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003228 r = enable_irq_wake(INT_DSPS_A11);
3229 if (r < 0)
3230 pr_err("smd_core_init: "
3231 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003232#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003233
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003234#if defined(CONFIG_WCNSS)
3235 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
3236 flags, "smd_dev", smd_wcnss_irq_handler);
3237 if (r < 0) {
3238 free_irq(INT_A9_M2A_0, 0);
3239 free_irq(INT_A9_M2A_5, 0);
3240 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003241 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003242 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3243 return r;
3244 }
3245
3246 r = enable_irq_wake(INT_WCNSS_A11);
3247 if (r < 0)
3248 pr_err("smd_core_init: "
3249 "enable_irq_wake failed for INT_WCNSS_A11\n");
3250
Eric Holmberg98c6c642012-02-24 11:29:35 -07003251 r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler,
3252 flags, "smsm_dev", smsm_wcnss_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003253 if (r < 0) {
3254 free_irq(INT_A9_M2A_0, 0);
3255 free_irq(INT_A9_M2A_5, 0);
3256 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003257 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003258 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3259 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
3260 return r;
3261 }
3262
3263 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
3264 if (r < 0)
3265 pr_err("smd_core_init: "
3266 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
3267#endif
3268
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003269#if defined(CONFIG_DSPS_SMSM)
Eric Holmberg98c6c642012-02-24 11:29:35 -07003270 r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler,
3271 flags, "smsm_dev", smsm_dsps_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003272 if (r < 0) {
3273 free_irq(INT_A9_M2A_0, 0);
3274 free_irq(INT_A9_M2A_5, 0);
3275 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003276 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003277 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3278 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003279 free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003280 return r;
3281 }
3282
3283 r = enable_irq_wake(INT_DSPS_A11_SMSM);
3284 if (r < 0)
3285 pr_err("smd_core_init: "
3286 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
3287#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003288 SMD_INFO("smd_core_init() done\n");
3289
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003290 return 0;
3291}
3292
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303293static int intr_init(struct interrupt_config_item *private_irq,
3294 struct smd_irq_config *platform_irq,
3295 struct platform_device *pdev
3296 )
3297{
3298 int irq_id;
3299 int ret;
3300 int ret_wake;
3301
3302 private_irq->out_bit_pos = platform_irq->out_bit_pos;
3303 private_irq->out_offset = platform_irq->out_offset;
3304 private_irq->out_base = platform_irq->out_base;
3305
3306 irq_id = platform_get_irq_byname(
3307 pdev,
3308 platform_irq->irq_name
3309 );
3310 SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__,
3311 platform_irq->irq_name, irq_id);
3312 ret = request_irq(irq_id,
3313 private_irq->irq_handler,
3314 platform_irq->flags,
3315 platform_irq->device_name,
3316 (void *)platform_irq->dev_id
3317 );
3318 if (ret < 0) {
3319 platform_irq->irq_id = ret;
Eric Holmbergdeace152012-07-25 12:17:11 -06003320 private_irq->irq_id = ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303321 } else {
3322 platform_irq->irq_id = irq_id;
Eric Holmbergdeace152012-07-25 12:17:11 -06003323 private_irq->irq_id = irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303324 ret_wake = enable_irq_wake(irq_id);
3325 if (ret_wake < 0) {
3326 pr_err("smd: enable_irq_wake failed on %s",
3327 platform_irq->irq_name);
3328 }
3329 }
3330
3331 return ret;
3332}
3333
Jeff Hugobdc734d2012-03-26 16:05:39 -06003334int sort_cmp_func(const void *a, const void *b)
3335{
3336 struct smem_area *left = (struct smem_area *)(a);
3337 struct smem_area *right = (struct smem_area *)(b);
3338
3339 return left->phys_addr - right->phys_addr;
3340}
3341
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303342int smd_core_platform_init(struct platform_device *pdev)
3343{
3344 int i;
3345 int ret;
3346 uint32_t num_ss;
3347 struct smd_platform *smd_platform_data;
3348 struct smd_subsystem_config *smd_ss_config_list;
3349 struct smd_subsystem_config *cfg;
3350 int err_ret = 0;
Jeff Hugobdc734d2012-03-26 16:05:39 -06003351 struct smd_smem_regions *smd_smem_areas;
3352 int smem_idx = 0;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303353
3354 smd_platform_data = pdev->dev.platform_data;
3355 num_ss = smd_platform_data->num_ss_configs;
3356 smd_ss_config_list = smd_platform_data->smd_ss_configs;
3357
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06003358 if (smd_platform_data->smd_ssr_config)
3359 disable_smsm_reset_handshake = smd_platform_data->
3360 smd_ssr_config->disable_smsm_reset_handshake;
3361
Jeff Hugobdc734d2012-03-26 16:05:39 -06003362 smd_smem_areas = smd_platform_data->smd_smem_areas;
3363 if (smd_smem_areas) {
3364 num_smem_areas = smd_platform_data->num_smem_areas;
3365 smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
3366 GFP_KERNEL);
3367 if (!smem_areas) {
3368 pr_err("%s: smem_areas kmalloc failed\n", __func__);
3369 err_ret = -ENOMEM;
3370 goto smem_areas_alloc_fail;
3371 }
3372
3373 for (smem_idx = 0; smem_idx < num_smem_areas; ++smem_idx) {
3374 smem_areas[smem_idx].phys_addr =
3375 smd_smem_areas[smem_idx].phys_addr;
3376 smem_areas[smem_idx].size =
3377 smd_smem_areas[smem_idx].size;
3378 smem_areas[smem_idx].virt_addr = ioremap_nocache(
3379 (unsigned long)(smem_areas[smem_idx].phys_addr),
3380 smem_areas[smem_idx].size);
3381 if (!smem_areas[smem_idx].virt_addr) {
3382 pr_err("%s: ioremap_nocache() of addr:%p"
3383 " size: %x\n", __func__,
3384 smem_areas[smem_idx].phys_addr,
3385 smem_areas[smem_idx].size);
3386 err_ret = -ENOMEM;
3387 ++smem_idx;
3388 goto smem_failed;
3389 }
3390 }
3391 sort(smem_areas, num_smem_areas,
3392 sizeof(struct smem_area),
3393 sort_cmp_func, NULL);
3394 }
3395
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303396 for (i = 0; i < num_ss; i++) {
3397 cfg = &smd_ss_config_list[i];
3398
3399 ret = intr_init(
3400 &private_intr_config[cfg->irq_config_id].smd,
3401 &cfg->smd_int,
3402 pdev
3403 );
3404
3405 if (ret < 0) {
3406 err_ret = ret;
3407 pr_err("smd: register irq failed on %s\n",
3408 cfg->smd_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003409 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303410 }
3411
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003412 /* only init smsm structs if this edge supports smsm */
3413 if (cfg->smsm_int.irq_id)
3414 ret = intr_init(
3415 &private_intr_config[cfg->irq_config_id].smsm,
3416 &cfg->smsm_int,
3417 pdev
3418 );
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303419
3420 if (ret < 0) {
3421 err_ret = ret;
3422 pr_err("smd: register irq failed on %s\n",
3423 cfg->smsm_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003424 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303425 }
Eric Holmberg17992c12012-02-29 12:54:44 -07003426
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003427 if (cfg->subsys_name)
3428 strlcpy(edge_to_pids[cfg->edge].subsys_name,
Eric Holmberg17992c12012-02-29 12:54:44 -07003429 cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303430 }
3431
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303432
3433 SMD_INFO("smd_core_platform_init() done\n");
3434 return 0;
3435
Jeff Hugobdc734d2012-03-26 16:05:39 -06003436intr_failed:
3437 pr_err("smd: deregistering IRQs\n");
3438 for (i = 0; i < num_ss; ++i) {
3439 cfg = &smd_ss_config_list[i];
3440
3441 if (cfg->smd_int.irq_id >= 0)
3442 free_irq(cfg->smd_int.irq_id,
3443 (void *)cfg->smd_int.dev_id
3444 );
3445 if (cfg->smsm_int.irq_id >= 0)
3446 free_irq(cfg->smsm_int.irq_id,
3447 (void *)cfg->smsm_int.dev_id
3448 );
3449 }
3450smem_failed:
3451 for (smem_idx = smem_idx - 1; smem_idx >= 0; --smem_idx)
3452 iounmap(smem_areas[smem_idx].virt_addr);
3453 kfree(smem_areas);
3454smem_areas_alloc_fail:
3455 return err_ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303456}
3457
Gregory Bean4416e9e2010-07-28 10:22:12 -07003458static int __devinit msm_smd_probe(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003459{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303460 int ret;
Daniel Walker0aec66d2010-03-18 12:31:08 -07003461
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303462 SMD_INFO("smd probe\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003463 INIT_WORK(&probe_work, smd_channel_probe_worker);
3464
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003465 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3466 if (IS_ERR(channel_close_wq)) {
3467 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3468 return -ENOMEM;
3469 }
3470
3471 if (smsm_init()) {
3472 pr_err("smsm_init() failed\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003473 return -1;
3474 }
3475
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303476 if (pdev) {
3477 if (pdev->dev.of_node) {
3478 pr_err("SMD: Device tree not currently supported\n");
3479 return -ENODEV;
3480 } else if (pdev->dev.platform_data) {
3481 ret = smd_core_platform_init(pdev);
3482 if (ret) {
3483 pr_err(
3484 "SMD: smd_core_platform_init() failed\n");
3485 return -ENODEV;
3486 }
3487 } else {
3488 ret = smd_core_init();
3489 if (ret) {
3490 pr_err("smd_core_init() failed\n");
3491 return -ENODEV;
3492 }
3493 }
3494 } else {
3495 pr_err("SMD: PDEV not found\n");
3496 return -ENODEV;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003497 }
Iliyan Malchev1207bab2009-11-15 18:16:43 -08003498
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003499 smd_initialized = 1;
3500
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003501 smd_alloc_loopback_channel();
Eric Holmbergc33d4ab2011-10-24 10:28:25 -06003502 smsm_irq_handler(0, 0);
3503 tasklet_schedule(&smd_fake_irq_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003504
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003505 return 0;
3506}
3507
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003508static int restart_notifier_cb(struct notifier_block *this,
3509 unsigned long code,
3510 void *data);
3511
3512static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003513 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3514 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
3515 {SMD_WCNSS, "riva", .nb.notifier_call = restart_notifier_cb},
3516 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003517 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003518};
3519
3520static int restart_notifier_cb(struct notifier_block *this,
3521 unsigned long code,
3522 void *data)
3523{
3524 if (code == SUBSYS_AFTER_SHUTDOWN) {
3525 struct restart_notifier_block *notifier;
3526
3527 notifier = container_of(this,
3528 struct restart_notifier_block, nb);
3529 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3530 __func__, notifier->processor,
3531 notifier->name);
3532
3533 smd_channel_reset(notifier->processor);
3534 }
3535
3536 return NOTIFY_DONE;
3537}
3538
3539static __init int modem_restart_late_init(void)
3540{
3541 int i;
3542 void *handle;
3543 struct restart_notifier_block *nb;
3544
3545 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
3546 nb = &restart_notifiers[i];
3547 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
3548 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
3549 __func__, nb->name, handle);
3550 }
3551 return 0;
3552}
3553late_initcall(modem_restart_late_init);
3554
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003555static struct platform_driver msm_smd_driver = {
3556 .probe = msm_smd_probe,
3557 .driver = {
3558 .name = MODULE_NAME,
3559 .owner = THIS_MODULE,
3560 },
3561};
3562
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003563int __init msm_smd_init(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003564{
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003565 static bool registered;
3566
3567 if (registered)
3568 return 0;
3569
3570 registered = true;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003571 return platform_driver_register(&msm_smd_driver);
3572}
3573
3574module_init(msm_smd_init);
3575
3576MODULE_DESCRIPTION("MSM Shared Memory Core");
3577MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
3578MODULE_LICENSE("GPL");