| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1 | /* | 
| Grant Likely | ca632f5 | 2011-06-06 01:16:30 -0600 | [diff] [blame] | 2 |  * SPI init/core code | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 3 |  * | 
 | 4 |  * Copyright (C) 2005 David Brownell | 
| Grant Likely | d57a428 | 2012-04-07 14:16:53 -0600 | [diff] [blame] | 5 |  * Copyright (C) 2008 Secret Lab Technologies Ltd. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 6 |  * | 
 | 7 |  * This program is free software; you can redistribute it and/or modify | 
 | 8 |  * it under the terms of the GNU General Public License as published by | 
 | 9 |  * the Free Software Foundation; either version 2 of the License, or | 
 | 10 |  * (at your option) any later version. | 
 | 11 |  * | 
 | 12 |  * This program is distributed in the hope that it will be useful, | 
 | 13 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 14 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 15 |  * GNU General Public License for more details. | 
 | 16 |  * | 
 | 17 |  * You should have received a copy of the GNU General Public License | 
 | 18 |  * along with this program; if not, write to the Free Software | 
 | 19 |  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 
 | 20 |  */ | 
 | 21 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 22 | #include <linux/kernel.h> | 
| Grant Likely | d57a428 | 2012-04-07 14:16:53 -0600 | [diff] [blame] | 23 | #include <linux/kmod.h> | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 24 | #include <linux/device.h> | 
 | 25 | #include <linux/init.h> | 
 | 26 | #include <linux/cache.h> | 
| Matthias Kaehlcke | 9404082 | 2007-07-17 04:04:16 -0700 | [diff] [blame] | 27 | #include <linux/mutex.h> | 
| Sinan Akman | 2b7a32f | 2010-10-02 21:28:29 -0600 | [diff] [blame] | 28 | #include <linux/of_device.h> | 
| Grant Likely | d57a428 | 2012-04-07 14:16:53 -0600 | [diff] [blame] | 29 | #include <linux/of_irq.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 30 | #include <linux/slab.h> | 
| Anton Vorontsov | e0626e3 | 2009-09-22 16:46:08 -0700 | [diff] [blame] | 31 | #include <linux/mod_devicetable.h> | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 32 | #include <linux/spi/spi.h> | 
| Mark Brown | 3ae22e8 | 2010-12-25 15:32:27 +0100 | [diff] [blame] | 33 | #include <linux/pm_runtime.h> | 
| Paul Gortmaker | 025ed13 | 2011-07-10 12:57:55 -0400 | [diff] [blame] | 34 | #include <linux/export.h> | 
| Linus Walleij | ffbbdd2 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 35 | #include <linux/sched.h> | 
 | 36 | #include <linux/delay.h> | 
 | 37 | #include <linux/kthread.h> | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 38 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 39 | static void spidev_release(struct device *dev) | 
 | 40 | { | 
| Hans-Peter Nilsson | 0ffa028 | 2007-02-12 00:52:45 -0800 | [diff] [blame] | 41 | 	struct spi_device	*spi = to_spi_device(dev); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 42 |  | 
 | 43 | 	/* spi masters may cleanup for released devices */ | 
 | 44 | 	if (spi->master->cleanup) | 
 | 45 | 		spi->master->cleanup(spi); | 
 | 46 |  | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 47 | 	spi_master_put(spi->master); | 
| Roman Tereshonkov | 07a389f | 2010-04-12 09:56:35 +0000 | [diff] [blame] | 48 | 	kfree(spi); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 49 | } | 
 | 50 |  | 
 | 51 | static ssize_t | 
 | 52 | modalias_show(struct device *dev, struct device_attribute *a, char *buf) | 
 | 53 | { | 
 | 54 | 	const struct spi_device	*spi = to_spi_device(dev); | 
 | 55 |  | 
| Grant Likely | d8e328b | 2012-05-20 00:08:13 -0600 | [diff] [blame] | 56 | 	return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 57 | } | 
 | 58 |  | 
 | 59 | static struct device_attribute spi_dev_attrs[] = { | 
 | 60 | 	__ATTR_RO(modalias), | 
 | 61 | 	__ATTR_NULL, | 
 | 62 | }; | 
 | 63 |  | 
 | 64 | /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, | 
 | 65 |  * and the sysfs version makes coldplug work too. | 
 | 66 |  */ | 
 | 67 |  | 
| Anton Vorontsov | 75368bf | 2009-09-22 16:46:04 -0700 | [diff] [blame] | 68 | static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, | 
 | 69 | 						const struct spi_device *sdev) | 
 | 70 | { | 
 | 71 | 	while (id->name[0]) { | 
 | 72 | 		if (!strcmp(sdev->modalias, id->name)) | 
 | 73 | 			return id; | 
 | 74 | 		id++; | 
 | 75 | 	} | 
 | 76 | 	return NULL; | 
 | 77 | } | 
 | 78 |  | 
 | 79 | const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) | 
 | 80 | { | 
 | 81 | 	const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); | 
 | 82 |  | 
 | 83 | 	return spi_match_id(sdrv->id_table, sdev); | 
 | 84 | } | 
 | 85 | EXPORT_SYMBOL_GPL(spi_get_device_id); | 
 | 86 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 87 | static int spi_match_device(struct device *dev, struct device_driver *drv) | 
 | 88 | { | 
 | 89 | 	const struct spi_device	*spi = to_spi_device(dev); | 
| Anton Vorontsov | 75368bf | 2009-09-22 16:46:04 -0700 | [diff] [blame] | 90 | 	const struct spi_driver	*sdrv = to_spi_driver(drv); | 
 | 91 |  | 
| Sinan Akman | 2b7a32f | 2010-10-02 21:28:29 -0600 | [diff] [blame] | 92 | 	/* Attempt an OF style match */ | 
 | 93 | 	if (of_driver_match_device(dev, drv)) | 
 | 94 | 		return 1; | 
 | 95 |  | 
| Anton Vorontsov | 75368bf | 2009-09-22 16:46:04 -0700 | [diff] [blame] | 96 | 	if (sdrv->id_table) | 
 | 97 | 		return !!spi_match_id(sdrv->id_table, spi); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 98 |  | 
| Kay Sievers | 35f74fc | 2009-01-06 10:44:37 -0800 | [diff] [blame] | 99 | 	return strcmp(spi->modalias, drv->name) == 0; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 100 | } | 
 | 101 |  | 
| Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 102 | static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 103 | { | 
 | 104 | 	const struct spi_device		*spi = to_spi_device(dev); | 
 | 105 |  | 
| Anton Vorontsov | e0626e3 | 2009-09-22 16:46:08 -0700 | [diff] [blame] | 106 | 	add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 107 | 	return 0; | 
 | 108 | } | 
 | 109 |  | 
| Mark Brown | 3ae22e8 | 2010-12-25 15:32:27 +0100 | [diff] [blame] | 110 | #ifdef CONFIG_PM_SLEEP | 
 | 111 | static int spi_legacy_suspend(struct device *dev, pm_message_t message) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 112 | { | 
| David Brownell | 3c72426 | 2008-02-06 01:38:10 -0800 | [diff] [blame] | 113 | 	int			value = 0; | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 114 | 	struct spi_driver	*drv = to_spi_driver(dev->driver); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 115 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 116 | 	/* suspend will stop irqs and dma; no more i/o */ | 
| David Brownell | 3c72426 | 2008-02-06 01:38:10 -0800 | [diff] [blame] | 117 | 	if (drv) { | 
 | 118 | 		if (drv->suspend) | 
 | 119 | 			value = drv->suspend(to_spi_device(dev), message); | 
 | 120 | 		else | 
 | 121 | 			dev_dbg(dev, "... can't suspend\n"); | 
 | 122 | 	} | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 123 | 	return value; | 
 | 124 | } | 
 | 125 |  | 
| Mark Brown | 3ae22e8 | 2010-12-25 15:32:27 +0100 | [diff] [blame] | 126 | static int spi_legacy_resume(struct device *dev) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 127 | { | 
| David Brownell | 3c72426 | 2008-02-06 01:38:10 -0800 | [diff] [blame] | 128 | 	int			value = 0; | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 129 | 	struct spi_driver	*drv = to_spi_driver(dev->driver); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 130 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 131 | 	/* resume may restart the i/o queue */ | 
| David Brownell | 3c72426 | 2008-02-06 01:38:10 -0800 | [diff] [blame] | 132 | 	if (drv) { | 
 | 133 | 		if (drv->resume) | 
 | 134 | 			value = drv->resume(to_spi_device(dev)); | 
 | 135 | 		else | 
 | 136 | 			dev_dbg(dev, "... can't resume\n"); | 
 | 137 | 	} | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 138 | 	return value; | 
 | 139 | } | 
 | 140 |  | 
| Mark Brown | 3ae22e8 | 2010-12-25 15:32:27 +0100 | [diff] [blame] | 141 | static int spi_pm_suspend(struct device *dev) | 
 | 142 | { | 
 | 143 | 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 
 | 144 |  | 
 | 145 | 	if (pm) | 
 | 146 | 		return pm_generic_suspend(dev); | 
 | 147 | 	else | 
 | 148 | 		return spi_legacy_suspend(dev, PMSG_SUSPEND); | 
 | 149 | } | 
 | 150 |  | 
 | 151 | static int spi_pm_resume(struct device *dev) | 
 | 152 | { | 
 | 153 | 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 
 | 154 |  | 
 | 155 | 	if (pm) | 
 | 156 | 		return pm_generic_resume(dev); | 
 | 157 | 	else | 
 | 158 | 		return spi_legacy_resume(dev); | 
 | 159 | } | 
 | 160 |  | 
 | 161 | static int spi_pm_freeze(struct device *dev) | 
 | 162 | { | 
 | 163 | 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 
 | 164 |  | 
 | 165 | 	if (pm) | 
 | 166 | 		return pm_generic_freeze(dev); | 
 | 167 | 	else | 
 | 168 | 		return spi_legacy_suspend(dev, PMSG_FREEZE); | 
 | 169 | } | 
 | 170 |  | 
 | 171 | static int spi_pm_thaw(struct device *dev) | 
 | 172 | { | 
 | 173 | 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 
 | 174 |  | 
 | 175 | 	if (pm) | 
 | 176 | 		return pm_generic_thaw(dev); | 
 | 177 | 	else | 
 | 178 | 		return spi_legacy_resume(dev); | 
 | 179 | } | 
 | 180 |  | 
 | 181 | static int spi_pm_poweroff(struct device *dev) | 
 | 182 | { | 
 | 183 | 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 
 | 184 |  | 
 | 185 | 	if (pm) | 
 | 186 | 		return pm_generic_poweroff(dev); | 
 | 187 | 	else | 
 | 188 | 		return spi_legacy_suspend(dev, PMSG_HIBERNATE); | 
 | 189 | } | 
 | 190 |  | 
 | 191 | static int spi_pm_restore(struct device *dev) | 
 | 192 | { | 
 | 193 | 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 
 | 194 |  | 
 | 195 | 	if (pm) | 
 | 196 | 		return pm_generic_restore(dev); | 
 | 197 | 	else | 
 | 198 | 		return spi_legacy_resume(dev); | 
 | 199 | } | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 200 | #else | 
| Mark Brown | 3ae22e8 | 2010-12-25 15:32:27 +0100 | [diff] [blame] | 201 | #define spi_pm_suspend	NULL | 
 | 202 | #define spi_pm_resume	NULL | 
 | 203 | #define spi_pm_freeze	NULL | 
 | 204 | #define spi_pm_thaw	NULL | 
 | 205 | #define spi_pm_poweroff	NULL | 
 | 206 | #define spi_pm_restore	NULL | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 207 | #endif | 
 | 208 |  | 
| Mark Brown | 3ae22e8 | 2010-12-25 15:32:27 +0100 | [diff] [blame] | 209 | static const struct dev_pm_ops spi_pm = { | 
 | 210 | 	.suspend = spi_pm_suspend, | 
 | 211 | 	.resume = spi_pm_resume, | 
 | 212 | 	.freeze = spi_pm_freeze, | 
 | 213 | 	.thaw = spi_pm_thaw, | 
 | 214 | 	.poweroff = spi_pm_poweroff, | 
 | 215 | 	.restore = spi_pm_restore, | 
 | 216 | 	SET_RUNTIME_PM_OPS( | 
 | 217 | 		pm_generic_runtime_suspend, | 
 | 218 | 		pm_generic_runtime_resume, | 
 | 219 | 		pm_generic_runtime_idle | 
 | 220 | 	) | 
 | 221 | }; | 
 | 222 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 223 | struct bus_type spi_bus_type = { | 
 | 224 | 	.name		= "spi", | 
 | 225 | 	.dev_attrs	= spi_dev_attrs, | 
 | 226 | 	.match		= spi_match_device, | 
 | 227 | 	.uevent		= spi_uevent, | 
| Mark Brown | 3ae22e8 | 2010-12-25 15:32:27 +0100 | [diff] [blame] | 228 | 	.pm		= &spi_pm, | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 229 | }; | 
 | 230 | EXPORT_SYMBOL_GPL(spi_bus_type); | 
 | 231 |  | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 232 |  | 
 | 233 | static int spi_drv_probe(struct device *dev) | 
 | 234 | { | 
 | 235 | 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver); | 
 | 236 |  | 
 | 237 | 	return sdrv->probe(to_spi_device(dev)); | 
 | 238 | } | 
 | 239 |  | 
 | 240 | static int spi_drv_remove(struct device *dev) | 
 | 241 | { | 
 | 242 | 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver); | 
 | 243 |  | 
 | 244 | 	return sdrv->remove(to_spi_device(dev)); | 
 | 245 | } | 
 | 246 |  | 
 | 247 | static void spi_drv_shutdown(struct device *dev) | 
 | 248 | { | 
 | 249 | 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver); | 
 | 250 |  | 
 | 251 | 	sdrv->shutdown(to_spi_device(dev)); | 
 | 252 | } | 
 | 253 |  | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 254 | /** | 
 | 255 |  * spi_register_driver - register a SPI driver | 
 | 256 |  * @sdrv: the driver to register | 
 | 257 |  * Context: can sleep | 
 | 258 |  */ | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 259 | int spi_register_driver(struct spi_driver *sdrv) | 
 | 260 | { | 
 | 261 | 	sdrv->driver.bus = &spi_bus_type; | 
 | 262 | 	if (sdrv->probe) | 
 | 263 | 		sdrv->driver.probe = spi_drv_probe; | 
 | 264 | 	if (sdrv->remove) | 
 | 265 | 		sdrv->driver.remove = spi_drv_remove; | 
 | 266 | 	if (sdrv->shutdown) | 
 | 267 | 		sdrv->driver.shutdown = spi_drv_shutdown; | 
 | 268 | 	return driver_register(&sdrv->driver); | 
 | 269 | } | 
 | 270 | EXPORT_SYMBOL_GPL(spi_register_driver); | 
 | 271 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 272 | /*-------------------------------------------------------------------------*/ | 
 | 273 |  | 
 | 274 | /* SPI devices should normally not be created by SPI device drivers; that | 
 | 275 |  * would make them board-specific.  Similarly with SPI master drivers. | 
 | 276 |  * Device registration normally goes into like arch/.../mach.../board-YYY.c | 
 | 277 |  * with other readonly (flashable) information about mainboard devices. | 
 | 278 |  */ | 
 | 279 |  | 
 | 280 | struct boardinfo { | 
 | 281 | 	struct list_head	list; | 
| Feng Tang | 2b9603a | 2010-08-02 15:52:15 +0800 | [diff] [blame] | 282 | 	struct spi_board_info	board_info; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 283 | }; | 
 | 284 |  | 
 | 285 | static LIST_HEAD(board_list); | 
| Feng Tang | 2b9603a | 2010-08-02 15:52:15 +0800 | [diff] [blame] | 286 | static LIST_HEAD(spi_master_list); | 
 | 287 |  | 
 | 288 | /* | 
 | 289 |  * Used to protect add/del opertion for board_info list and | 
 | 290 |  * spi_master list, and their matching process | 
 | 291 |  */ | 
| Matthias Kaehlcke | 9404082 | 2007-07-17 04:04:16 -0700 | [diff] [blame] | 292 | static DEFINE_MUTEX(board_lock); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 293 |  | 
| Grant Likely | dc87c98 | 2008-05-15 16:50:22 -0600 | [diff] [blame] | 294 | /** | 
 | 295 |  * spi_alloc_device - Allocate a new SPI device | 
 | 296 |  * @master: Controller to which device is connected | 
 | 297 |  * Context: can sleep | 
 | 298 |  * | 
 | 299 |  * Allows a driver to allocate and initialize a spi_device without | 
 | 300 |  * registering it immediately.  This allows a driver to directly | 
 | 301 |  * fill the spi_device with device parameters before calling | 
 | 302 |  * spi_add_device() on it. | 
 | 303 |  * | 
 | 304 |  * Caller is responsible to call spi_add_device() on the returned | 
 | 305 |  * spi_device structure to add it to the SPI master.  If the caller | 
 | 306 |  * needs to discard the spi_device without adding it, then it should | 
 | 307 |  * call spi_dev_put() on it. | 
 | 308 |  * | 
 | 309 |  * Returns a pointer to the new device, or NULL. | 
 | 310 |  */ | 
 | 311 | struct spi_device *spi_alloc_device(struct spi_master *master) | 
 | 312 | { | 
 | 313 | 	struct spi_device	*spi; | 
 | 314 | 	struct device		*dev = master->dev.parent; | 
 | 315 |  | 
 | 316 | 	if (!spi_master_get(master)) | 
 | 317 | 		return NULL; | 
 | 318 |  | 
 | 319 | 	spi = kzalloc(sizeof *spi, GFP_KERNEL); | 
 | 320 | 	if (!spi) { | 
 | 321 | 		dev_err(dev, "cannot alloc spi_device\n"); | 
 | 322 | 		spi_master_put(master); | 
 | 323 | 		return NULL; | 
 | 324 | 	} | 
 | 325 |  | 
 | 326 | 	spi->master = master; | 
| Laurent Pinchart | 178db7d | 2011-12-12 01:15:06 +0100 | [diff] [blame] | 327 | 	spi->dev.parent = &master->dev; | 
| Grant Likely | dc87c98 | 2008-05-15 16:50:22 -0600 | [diff] [blame] | 328 | 	spi->dev.bus = &spi_bus_type; | 
 | 329 | 	spi->dev.release = spidev_release; | 
 | 330 | 	device_initialize(&spi->dev); | 
 | 331 | 	return spi; | 
 | 332 | } | 
 | 333 | EXPORT_SYMBOL_GPL(spi_alloc_device); | 
 | 334 |  | 
 | 335 | /** | 
 | 336 |  * spi_add_device - Add spi_device allocated with spi_alloc_device | 
 | 337 |  * @spi: spi_device to register | 
 | 338 |  * | 
 | 339 |  * Companion function to spi_alloc_device.  Devices allocated with | 
 | 340 |  * spi_alloc_device can be added onto the spi bus with this function. | 
 | 341 |  * | 
| David Brownell | e48880e | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 342 |  * Returns 0 on success; negative errno on failure | 
| Grant Likely | dc87c98 | 2008-05-15 16:50:22 -0600 | [diff] [blame] | 343 |  */ | 
 | 344 | int spi_add_device(struct spi_device *spi) | 
 | 345 | { | 
| David Brownell | e48880e | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 346 | 	static DEFINE_MUTEX(spi_add_lock); | 
| Grant Likely | dc87c98 | 2008-05-15 16:50:22 -0600 | [diff] [blame] | 347 | 	struct device *dev = spi->master->dev.parent; | 
| Roman Tereshonkov | 8ec130a | 2010-04-16 09:52:59 +0000 | [diff] [blame] | 348 | 	struct device *d; | 
| Grant Likely | dc87c98 | 2008-05-15 16:50:22 -0600 | [diff] [blame] | 349 | 	int status; | 
 | 350 |  | 
 | 351 | 	/* Chipselects are numbered 0..max; validate. */ | 
 | 352 | 	if (spi->chip_select >= spi->master->num_chipselect) { | 
 | 353 | 		dev_err(dev, "cs%d >= max %d\n", | 
 | 354 | 			spi->chip_select, | 
 | 355 | 			spi->master->num_chipselect); | 
 | 356 | 		return -EINVAL; | 
 | 357 | 	} | 
 | 358 |  | 
 | 359 | 	/* Set the bus ID string */ | 
| Kay Sievers | 35f74fc | 2009-01-06 10:44:37 -0800 | [diff] [blame] | 360 | 	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), | 
| Grant Likely | dc87c98 | 2008-05-15 16:50:22 -0600 | [diff] [blame] | 361 | 			spi->chip_select); | 
 | 362 |  | 
| David Brownell | e48880e | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 363 |  | 
 | 364 | 	/* We need to make sure there's no other device with this | 
 | 365 | 	 * chipselect **BEFORE** we call setup(), else we'll trash | 
 | 366 | 	 * its configuration.  Lock against concurrent add() calls. | 
 | 367 | 	 */ | 
 | 368 | 	mutex_lock(&spi_add_lock); | 
 | 369 |  | 
| Roman Tereshonkov | 8ec130a | 2010-04-16 09:52:59 +0000 | [diff] [blame] | 370 | 	d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev)); | 
 | 371 | 	if (d != NULL) { | 
| David Brownell | e48880e | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 372 | 		dev_err(dev, "chipselect %d already in use\n", | 
 | 373 | 				spi->chip_select); | 
| Roman Tereshonkov | 8ec130a | 2010-04-16 09:52:59 +0000 | [diff] [blame] | 374 | 		put_device(d); | 
| David Brownell | e48880e | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 375 | 		status = -EBUSY; | 
 | 376 | 		goto done; | 
 | 377 | 	} | 
 | 378 |  | 
 | 379 | 	/* Drivers may modify this initial i/o setup, but will | 
 | 380 | 	 * normally rely on the device being setup.  Devices | 
 | 381 | 	 * using SPI_CS_HIGH can't coexist well otherwise... | 
 | 382 | 	 */ | 
| David Brownell | 7d07719 | 2009-06-17 16:26:03 -0700 | [diff] [blame] | 383 | 	status = spi_setup(spi); | 
| Grant Likely | dc87c98 | 2008-05-15 16:50:22 -0600 | [diff] [blame] | 384 | 	if (status < 0) { | 
| Linus Walleij | eb288a1 | 2010-10-21 21:06:44 +0200 | [diff] [blame] | 385 | 		dev_err(dev, "can't setup %s, status %d\n", | 
 | 386 | 				dev_name(&spi->dev), status); | 
| David Brownell | e48880e | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 387 | 		goto done; | 
| Grant Likely | dc87c98 | 2008-05-15 16:50:22 -0600 | [diff] [blame] | 388 | 	} | 
 | 389 |  | 
| David Brownell | e48880e | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 390 | 	/* Device may be bound to an active driver when this returns */ | 
| Grant Likely | dc87c98 | 2008-05-15 16:50:22 -0600 | [diff] [blame] | 391 | 	status = device_add(&spi->dev); | 
| David Brownell | e48880e | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 392 | 	if (status < 0) | 
| Linus Walleij | eb288a1 | 2010-10-21 21:06:44 +0200 | [diff] [blame] | 393 | 		dev_err(dev, "can't add %s, status %d\n", | 
 | 394 | 				dev_name(&spi->dev), status); | 
| David Brownell | e48880e | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 395 | 	else | 
| Kay Sievers | 35f74fc | 2009-01-06 10:44:37 -0800 | [diff] [blame] | 396 | 		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); | 
| Grant Likely | dc87c98 | 2008-05-15 16:50:22 -0600 | [diff] [blame] | 397 |  | 
| David Brownell | e48880e | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 398 | done: | 
 | 399 | 	mutex_unlock(&spi_add_lock); | 
 | 400 | 	return status; | 
| Grant Likely | dc87c98 | 2008-05-15 16:50:22 -0600 | [diff] [blame] | 401 | } | 
 | 402 | EXPORT_SYMBOL_GPL(spi_add_device); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 403 |  | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 404 | /** | 
 | 405 |  * spi_new_device - instantiate one new SPI device | 
 | 406 |  * @master: Controller to which device is connected | 
 | 407 |  * @chip: Describes the SPI device | 
 | 408 |  * Context: can sleep | 
 | 409 |  * | 
 | 410 |  * On typical mainboards, this is purely internal; and it's not needed | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 411 |  * after board init creates the hard-wired devices.  Some development | 
 | 412 |  * platforms may not be able to use spi_register_board_info though, and | 
 | 413 |  * this is exported so that for example a USB or parport based adapter | 
 | 414 |  * driver could add devices (which it would learn about out-of-band). | 
| David Brownell | 082c8cb | 2007-07-31 00:39:45 -0700 | [diff] [blame] | 415 |  * | 
 | 416 |  * Returns the new device, or NULL. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 417 |  */ | 
| Adrian Bunk | e9d5a46 | 2007-03-26 21:32:23 -0800 | [diff] [blame] | 418 | struct spi_device *spi_new_device(struct spi_master *master, | 
 | 419 | 				  struct spi_board_info *chip) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 420 | { | 
 | 421 | 	struct spi_device	*proxy; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 422 | 	int			status; | 
 | 423 |  | 
| David Brownell | 082c8cb | 2007-07-31 00:39:45 -0700 | [diff] [blame] | 424 | 	/* NOTE:  caller did any chip->bus_num checks necessary. | 
 | 425 | 	 * | 
 | 426 | 	 * Also, unless we change the return value convention to use | 
 | 427 | 	 * error-or-pointer (not NULL-or-pointer), troubleshootability | 
 | 428 | 	 * suggests syslogged diagnostics are best here (ugh). | 
 | 429 | 	 */ | 
 | 430 |  | 
| Grant Likely | dc87c98 | 2008-05-15 16:50:22 -0600 | [diff] [blame] | 431 | 	proxy = spi_alloc_device(master); | 
 | 432 | 	if (!proxy) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 433 | 		return NULL; | 
 | 434 |  | 
| Grant Likely | 102eb97 | 2008-07-23 21:29:55 -0700 | [diff] [blame] | 435 | 	WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); | 
 | 436 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 437 | 	proxy->chip_select = chip->chip_select; | 
 | 438 | 	proxy->max_speed_hz = chip->max_speed_hz; | 
| David Brownell | 980a01c | 2006-06-28 07:47:15 -0700 | [diff] [blame] | 439 | 	proxy->mode = chip->mode; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 440 | 	proxy->irq = chip->irq; | 
| Grant Likely | 102eb97 | 2008-07-23 21:29:55 -0700 | [diff] [blame] | 441 | 	strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 442 | 	proxy->dev.platform_data = (void *) chip->platform_data; | 
 | 443 | 	proxy->controller_data = chip->controller_data; | 
 | 444 | 	proxy->controller_state = NULL; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 445 |  | 
| Grant Likely | dc87c98 | 2008-05-15 16:50:22 -0600 | [diff] [blame] | 446 | 	status = spi_add_device(proxy); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 447 | 	if (status < 0) { | 
| Grant Likely | dc87c98 | 2008-05-15 16:50:22 -0600 | [diff] [blame] | 448 | 		spi_dev_put(proxy); | 
 | 449 | 		return NULL; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 450 | 	} | 
 | 451 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 452 | 	return proxy; | 
 | 453 | } | 
 | 454 | EXPORT_SYMBOL_GPL(spi_new_device); | 
 | 455 |  | 
| Feng Tang | 2b9603a | 2010-08-02 15:52:15 +0800 | [diff] [blame] | 456 | static void spi_match_master_to_boardinfo(struct spi_master *master, | 
 | 457 | 				struct spi_board_info *bi) | 
 | 458 | { | 
 | 459 | 	struct spi_device *dev; | 
 | 460 |  | 
 | 461 | 	if (master->bus_num != bi->bus_num) | 
 | 462 | 		return; | 
 | 463 |  | 
 | 464 | 	dev = spi_new_device(master, bi); | 
 | 465 | 	if (!dev) | 
 | 466 | 		dev_err(master->dev.parent, "can't create new device for %s\n", | 
 | 467 | 			bi->modalias); | 
 | 468 | } | 
 | 469 |  | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 470 | /** | 
 | 471 |  * spi_register_board_info - register SPI devices for a given board | 
 | 472 |  * @info: array of chip descriptors | 
 | 473 |  * @n: how many descriptors are provided | 
 | 474 |  * Context: can sleep | 
 | 475 |  * | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 476 |  * Board-specific early init code calls this (probably during arch_initcall) | 
 | 477 |  * with segments of the SPI device table.  Any device nodes are created later, | 
 | 478 |  * after the relevant parent SPI controller (bus_num) is defined.  We keep | 
 | 479 |  * this table of devices forever, so that reloading a controller driver will | 
 | 480 |  * not make Linux forget about these hard-wired devices. | 
 | 481 |  * | 
 | 482 |  * Other code can also call this, e.g. a particular add-on board might provide | 
 | 483 |  * SPI devices through its expansion connector, so code initializing that board | 
 | 484 |  * would naturally declare its SPI devices. | 
 | 485 |  * | 
 | 486 |  * The board info passed can safely be __initdata ... but be careful of | 
 | 487 |  * any embedded pointers (platform_data, etc), they're copied as-is. | 
 | 488 |  */ | 
| Mark Brown | 690fb11 | 2012-02-17 16:23:29 -0800 | [diff] [blame] | 489 | int __devinit | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 490 | spi_register_board_info(struct spi_board_info const *info, unsigned n) | 
 | 491 | { | 
| Feng Tang | 2b9603a | 2010-08-02 15:52:15 +0800 | [diff] [blame] | 492 | 	struct boardinfo *bi; | 
 | 493 | 	int i; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 494 |  | 
| Feng Tang | 2b9603a | 2010-08-02 15:52:15 +0800 | [diff] [blame] | 495 | 	bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 496 | 	if (!bi) | 
 | 497 | 		return -ENOMEM; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 498 |  | 
| Feng Tang | 2b9603a | 2010-08-02 15:52:15 +0800 | [diff] [blame] | 499 | 	for (i = 0; i < n; i++, bi++, info++) { | 
 | 500 | 		struct spi_master *master; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 501 |  | 
| Feng Tang | 2b9603a | 2010-08-02 15:52:15 +0800 | [diff] [blame] | 502 | 		memcpy(&bi->board_info, info, sizeof(*info)); | 
 | 503 | 		mutex_lock(&board_lock); | 
 | 504 | 		list_add_tail(&bi->list, &board_list); | 
 | 505 | 		list_for_each_entry(master, &spi_master_list, list) | 
 | 506 | 			spi_match_master_to_boardinfo(master, &bi->board_info); | 
 | 507 | 		mutex_unlock(&board_lock); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 508 | 	} | 
| Feng Tang | 2b9603a | 2010-08-02 15:52:15 +0800 | [diff] [blame] | 509 |  | 
 | 510 | 	return 0; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 511 | } | 
 | 512 |  | 
 | 513 | /*-------------------------------------------------------------------------*/ | 
 | 514 |  | 
| Linus Walleij | ffbbdd2 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 515 | /** | 
 | 516 |  * spi_pump_messages - kthread work function which processes spi message queue | 
 | 517 |  * @work: pointer to kthread work struct contained in the master struct | 
 | 518 |  * | 
 | 519 |  * This function checks if there is any spi message in the queue that | 
 | 520 |  * needs processing and if so call out to the driver to initialize hardware | 
 | 521 |  * and transfer each message. | 
 | 522 |  * | 
 | 523 |  */ | 
 | 524 | static void spi_pump_messages(struct kthread_work *work) | 
 | 525 | { | 
 | 526 | 	struct spi_master *master = | 
 | 527 | 		container_of(work, struct spi_master, pump_messages); | 
 | 528 | 	unsigned long flags; | 
 | 529 | 	bool was_busy = false; | 
 | 530 | 	int ret; | 
 | 531 |  | 
 | 532 | 	/* Lock queue and check for queue work */ | 
 | 533 | 	spin_lock_irqsave(&master->queue_lock, flags); | 
 | 534 | 	if (list_empty(&master->queue) || !master->running) { | 
| Shubhrajyoti D | 7dfd2bd | 2012-05-10 19:20:41 +0530 | [diff] [blame] | 535 | 		if (master->busy && master->unprepare_transfer_hardware) { | 
| Linus Walleij | ffbbdd2 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 536 | 			ret = master->unprepare_transfer_hardware(master); | 
 | 537 | 			if (ret) { | 
| Dan Carpenter | 9af4acc | 2012-03-10 11:57:29 +0300 | [diff] [blame] | 538 | 				spin_unlock_irqrestore(&master->queue_lock, flags); | 
| Linus Walleij | ffbbdd2 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 539 | 				dev_err(&master->dev, | 
 | 540 | 					"failed to unprepare transfer hardware\n"); | 
 | 541 | 				return; | 
 | 542 | 			} | 
 | 543 | 		} | 
 | 544 | 		master->busy = false; | 
 | 545 | 		spin_unlock_irqrestore(&master->queue_lock, flags); | 
 | 546 | 		return; | 
 | 547 | 	} | 
 | 548 |  | 
 | 549 | 	/* Make sure we are not already running a message */ | 
 | 550 | 	if (master->cur_msg) { | 
 | 551 | 		spin_unlock_irqrestore(&master->queue_lock, flags); | 
 | 552 | 		return; | 
 | 553 | 	} | 
 | 554 | 	/* Extract head of queue */ | 
 | 555 | 	master->cur_msg = | 
 | 556 | 	    list_entry(master->queue.next, struct spi_message, queue); | 
 | 557 |  | 
 | 558 | 	list_del_init(&master->cur_msg->queue); | 
 | 559 | 	if (master->busy) | 
 | 560 | 		was_busy = true; | 
 | 561 | 	else | 
 | 562 | 		master->busy = true; | 
 | 563 | 	spin_unlock_irqrestore(&master->queue_lock, flags); | 
 | 564 |  | 
| Shubhrajyoti D | 7dfd2bd | 2012-05-10 19:20:41 +0530 | [diff] [blame] | 565 | 	if (!was_busy && master->prepare_transfer_hardware) { | 
| Linus Walleij | ffbbdd2 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 566 | 		ret = master->prepare_transfer_hardware(master); | 
 | 567 | 		if (ret) { | 
 | 568 | 			dev_err(&master->dev, | 
 | 569 | 				"failed to prepare transfer hardware\n"); | 
 | 570 | 			return; | 
 | 571 | 		} | 
 | 572 | 	} | 
 | 573 |  | 
 | 574 | 	ret = master->transfer_one_message(master, master->cur_msg); | 
 | 575 | 	if (ret) { | 
 | 576 | 		dev_err(&master->dev, | 
 | 577 | 			"failed to transfer one message from queue\n"); | 
 | 578 | 		return; | 
 | 579 | 	} | 
 | 580 | } | 
 | 581 |  | 
 | 582 | static int spi_init_queue(struct spi_master *master) | 
 | 583 | { | 
 | 584 | 	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; | 
 | 585 |  | 
 | 586 | 	INIT_LIST_HEAD(&master->queue); | 
 | 587 | 	spin_lock_init(&master->queue_lock); | 
 | 588 |  | 
 | 589 | 	master->running = false; | 
 | 590 | 	master->busy = false; | 
 | 591 |  | 
 | 592 | 	init_kthread_worker(&master->kworker); | 
 | 593 | 	master->kworker_task = kthread_run(kthread_worker_fn, | 
 | 594 | 					   &master->kworker, | 
 | 595 | 					   dev_name(&master->dev)); | 
 | 596 | 	if (IS_ERR(master->kworker_task)) { | 
 | 597 | 		dev_err(&master->dev, "failed to create message pump task\n"); | 
 | 598 | 		return -ENOMEM; | 
 | 599 | 	} | 
 | 600 | 	init_kthread_work(&master->pump_messages, spi_pump_messages); | 
 | 601 |  | 
 | 602 | 	/* | 
 | 603 | 	 * Master config will indicate if this controller should run the | 
 | 604 | 	 * message pump with high (realtime) priority to reduce the transfer | 
 | 605 | 	 * latency on the bus by minimising the delay between a transfer | 
 | 606 | 	 * request and the scheduling of the message pump thread. Without this | 
 | 607 | 	 * setting the message pump thread will remain at default priority. | 
 | 608 | 	 */ | 
 | 609 | 	if (master->rt) { | 
 | 610 | 		dev_info(&master->dev, | 
 | 611 | 			"will run message pump with realtime priority\n"); | 
 | 612 | 		sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); | 
 | 613 | 	} | 
 | 614 |  | 
 | 615 | 	return 0; | 
 | 616 | } | 
 | 617 |  | 
 | 618 | /** | 
 | 619 |  * spi_get_next_queued_message() - called by driver to check for queued | 
 | 620 |  * messages | 
 | 621 |  * @master: the master to check for queued messages | 
 | 622 |  * | 
 | 623 |  * If there are more messages in the queue, the next message is returned from | 
 | 624 |  * this call. | 
 | 625 |  */ | 
 | 626 | struct spi_message *spi_get_next_queued_message(struct spi_master *master) | 
 | 627 | { | 
 | 628 | 	struct spi_message *next; | 
 | 629 | 	unsigned long flags; | 
 | 630 |  | 
 | 631 | 	/* get a pointer to the next message, if any */ | 
 | 632 | 	spin_lock_irqsave(&master->queue_lock, flags); | 
 | 633 | 	if (list_empty(&master->queue)) | 
 | 634 | 		next = NULL; | 
 | 635 | 	else | 
 | 636 | 		next = list_entry(master->queue.next, | 
 | 637 | 				  struct spi_message, queue); | 
 | 638 | 	spin_unlock_irqrestore(&master->queue_lock, flags); | 
 | 639 |  | 
 | 640 | 	return next; | 
 | 641 | } | 
 | 642 | EXPORT_SYMBOL_GPL(spi_get_next_queued_message); | 
 | 643 |  | 
 | 644 | /** | 
 | 645 |  * spi_finalize_current_message() - the current message is complete | 
 | 646 |  * @master: the master to return the message to | 
 | 647 |  * | 
 | 648 |  * Called by the driver to notify the core that the message in the front of the | 
 | 649 |  * queue is complete and can be removed from the queue. | 
 | 650 |  */ | 
 | 651 | void spi_finalize_current_message(struct spi_master *master) | 
 | 652 | { | 
 | 653 | 	struct spi_message *mesg; | 
 | 654 | 	unsigned long flags; | 
 | 655 |  | 
 | 656 | 	spin_lock_irqsave(&master->queue_lock, flags); | 
 | 657 | 	mesg = master->cur_msg; | 
 | 658 | 	master->cur_msg = NULL; | 
 | 659 |  | 
 | 660 | 	queue_kthread_work(&master->kworker, &master->pump_messages); | 
 | 661 | 	spin_unlock_irqrestore(&master->queue_lock, flags); | 
 | 662 |  | 
 | 663 | 	mesg->state = NULL; | 
 | 664 | 	if (mesg->complete) | 
 | 665 | 		mesg->complete(mesg->context); | 
 | 666 | } | 
 | 667 | EXPORT_SYMBOL_GPL(spi_finalize_current_message); | 
 | 668 |  | 
 | 669 | static int spi_start_queue(struct spi_master *master) | 
 | 670 | { | 
 | 671 | 	unsigned long flags; | 
 | 672 |  | 
 | 673 | 	spin_lock_irqsave(&master->queue_lock, flags); | 
 | 674 |  | 
 | 675 | 	if (master->running || master->busy) { | 
 | 676 | 		spin_unlock_irqrestore(&master->queue_lock, flags); | 
 | 677 | 		return -EBUSY; | 
 | 678 | 	} | 
 | 679 |  | 
 | 680 | 	master->running = true; | 
 | 681 | 	master->cur_msg = NULL; | 
 | 682 | 	spin_unlock_irqrestore(&master->queue_lock, flags); | 
 | 683 |  | 
 | 684 | 	queue_kthread_work(&master->kworker, &master->pump_messages); | 
 | 685 |  | 
 | 686 | 	return 0; | 
 | 687 | } | 
 | 688 |  | 
 | 689 | static int spi_stop_queue(struct spi_master *master) | 
 | 690 | { | 
 | 691 | 	unsigned long flags; | 
 | 692 | 	unsigned limit = 500; | 
 | 693 | 	int ret = 0; | 
 | 694 |  | 
 | 695 | 	spin_lock_irqsave(&master->queue_lock, flags); | 
 | 696 |  | 
 | 697 | 	/* | 
 | 698 | 	 * This is a bit lame, but is optimized for the common execution path. | 
 | 699 | 	 * A wait_queue on the master->busy could be used, but then the common | 
 | 700 | 	 * execution path (pump_messages) would be required to call wake_up or | 
 | 701 | 	 * friends on every SPI message. Do this instead. | 
 | 702 | 	 */ | 
 | 703 | 	while ((!list_empty(&master->queue) || master->busy) && limit--) { | 
 | 704 | 		spin_unlock_irqrestore(&master->queue_lock, flags); | 
 | 705 | 		msleep(10); | 
 | 706 | 		spin_lock_irqsave(&master->queue_lock, flags); | 
 | 707 | 	} | 
 | 708 |  | 
 | 709 | 	if (!list_empty(&master->queue) || master->busy) | 
 | 710 | 		ret = -EBUSY; | 
 | 711 | 	else | 
 | 712 | 		master->running = false; | 
 | 713 |  | 
 | 714 | 	spin_unlock_irqrestore(&master->queue_lock, flags); | 
 | 715 |  | 
 | 716 | 	if (ret) { | 
 | 717 | 		dev_warn(&master->dev, | 
 | 718 | 			 "could not stop message queue\n"); | 
 | 719 | 		return ret; | 
 | 720 | 	} | 
 | 721 | 	return ret; | 
 | 722 | } | 
 | 723 |  | 
 | 724 | static int spi_destroy_queue(struct spi_master *master) | 
 | 725 | { | 
 | 726 | 	int ret; | 
 | 727 |  | 
 | 728 | 	ret = spi_stop_queue(master); | 
 | 729 |  | 
 | 730 | 	/* | 
 | 731 | 	 * flush_kthread_worker will block until all work is done. | 
 | 732 | 	 * If the reason that stop_queue timed out is that the work will never | 
 | 733 | 	 * finish, then it does no good to call flush/stop thread, so | 
 | 734 | 	 * return anyway. | 
 | 735 | 	 */ | 
 | 736 | 	if (ret) { | 
 | 737 | 		dev_err(&master->dev, "problem destroying queue\n"); | 
 | 738 | 		return ret; | 
 | 739 | 	} | 
 | 740 |  | 
 | 741 | 	flush_kthread_worker(&master->kworker); | 
 | 742 | 	kthread_stop(master->kworker_task); | 
 | 743 |  | 
 | 744 | 	return 0; | 
 | 745 | } | 
 | 746 |  | 
 | 747 | /** | 
 | 748 |  * spi_queued_transfer - transfer function for queued transfers | 
 | 749 |  * @spi: spi device which is requesting transfer | 
 | 750 |  * @msg: spi message which is to handled is queued to driver queue | 
 | 751 |  */ | 
 | 752 | static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) | 
 | 753 | { | 
 | 754 | 	struct spi_master *master = spi->master; | 
 | 755 | 	unsigned long flags; | 
 | 756 |  | 
 | 757 | 	spin_lock_irqsave(&master->queue_lock, flags); | 
 | 758 |  | 
 | 759 | 	if (!master->running) { | 
 | 760 | 		spin_unlock_irqrestore(&master->queue_lock, flags); | 
 | 761 | 		return -ESHUTDOWN; | 
 | 762 | 	} | 
 | 763 | 	msg->actual_length = 0; | 
 | 764 | 	msg->status = -EINPROGRESS; | 
 | 765 |  | 
 | 766 | 	list_add_tail(&msg->queue, &master->queue); | 
 | 767 | 	if (master->running && !master->busy) | 
 | 768 | 		queue_kthread_work(&master->kworker, &master->pump_messages); | 
 | 769 |  | 
 | 770 | 	spin_unlock_irqrestore(&master->queue_lock, flags); | 
 | 771 | 	return 0; | 
 | 772 | } | 
 | 773 |  | 
 | 774 | static int spi_master_initialize_queue(struct spi_master *master) | 
 | 775 | { | 
 | 776 | 	int ret; | 
 | 777 |  | 
 | 778 | 	master->queued = true; | 
 | 779 | 	master->transfer = spi_queued_transfer; | 
 | 780 |  | 
 | 781 | 	/* Initialize and start queue */ | 
 | 782 | 	ret = spi_init_queue(master); | 
 | 783 | 	if (ret) { | 
 | 784 | 		dev_err(&master->dev, "problem initializing queue\n"); | 
 | 785 | 		goto err_init_queue; | 
 | 786 | 	} | 
 | 787 | 	ret = spi_start_queue(master); | 
 | 788 | 	if (ret) { | 
 | 789 | 		dev_err(&master->dev, "problem starting queue\n"); | 
 | 790 | 		goto err_start_queue; | 
 | 791 | 	} | 
 | 792 |  | 
 | 793 | 	return 0; | 
 | 794 |  | 
 | 795 | err_start_queue: | 
 | 796 | err_init_queue: | 
 | 797 | 	spi_destroy_queue(master); | 
 | 798 | 	return ret; | 
 | 799 | } | 
 | 800 |  | 
 | 801 | /*-------------------------------------------------------------------------*/ | 
 | 802 |  | 
| Grant Likely | d57a428 | 2012-04-07 14:16:53 -0600 | [diff] [blame] | 803 | #if defined(CONFIG_OF) && !defined(CONFIG_SPARC) | 
 | 804 | /** | 
 | 805 |  * of_register_spi_devices() - Register child devices onto the SPI bus | 
 | 806 |  * @master:	Pointer to spi_master device | 
 | 807 |  * | 
 | 808 |  * Registers an spi_device for each child node of master node which has a 'reg' | 
 | 809 |  * property. | 
 | 810 |  */ | 
 | 811 | static void of_register_spi_devices(struct spi_master *master) | 
 | 812 | { | 
 | 813 | 	struct spi_device *spi; | 
 | 814 | 	struct device_node *nc; | 
 | 815 | 	const __be32 *prop; | 
 | 816 | 	int rc; | 
 | 817 | 	int len; | 
 | 818 |  | 
 | 819 | 	if (!master->dev.of_node) | 
 | 820 | 		return; | 
 | 821 |  | 
 | 822 | 	for_each_child_of_node(master->dev.of_node, nc) { | 
 | 823 | 		/* Alloc an spi_device */ | 
 | 824 | 		spi = spi_alloc_device(master); | 
 | 825 | 		if (!spi) { | 
 | 826 | 			dev_err(&master->dev, "spi_device alloc error for %s\n", | 
 | 827 | 				nc->full_name); | 
 | 828 | 			spi_dev_put(spi); | 
 | 829 | 			continue; | 
 | 830 | 		} | 
 | 831 |  | 
 | 832 | 		/* Select device driver */ | 
 | 833 | 		if (of_modalias_node(nc, spi->modalias, | 
 | 834 | 				     sizeof(spi->modalias)) < 0) { | 
 | 835 | 			dev_err(&master->dev, "cannot find modalias for %s\n", | 
 | 836 | 				nc->full_name); | 
 | 837 | 			spi_dev_put(spi); | 
 | 838 | 			continue; | 
 | 839 | 		} | 
 | 840 |  | 
 | 841 | 		/* Device address */ | 
 | 842 | 		prop = of_get_property(nc, "reg", &len); | 
 | 843 | 		if (!prop || len < sizeof(*prop)) { | 
 | 844 | 			dev_err(&master->dev, "%s has no 'reg' property\n", | 
 | 845 | 				nc->full_name); | 
 | 846 | 			spi_dev_put(spi); | 
 | 847 | 			continue; | 
 | 848 | 		} | 
 | 849 | 		spi->chip_select = be32_to_cpup(prop); | 
 | 850 |  | 
 | 851 | 		/* Mode (clock phase/polarity/etc.) */ | 
 | 852 | 		if (of_find_property(nc, "spi-cpha", NULL)) | 
 | 853 | 			spi->mode |= SPI_CPHA; | 
 | 854 | 		if (of_find_property(nc, "spi-cpol", NULL)) | 
 | 855 | 			spi->mode |= SPI_CPOL; | 
 | 856 | 		if (of_find_property(nc, "spi-cs-high", NULL)) | 
 | 857 | 			spi->mode |= SPI_CS_HIGH; | 
 | 858 |  | 
 | 859 | 		/* Device speed */ | 
 | 860 | 		prop = of_get_property(nc, "spi-max-frequency", &len); | 
 | 861 | 		if (!prop || len < sizeof(*prop)) { | 
 | 862 | 			dev_err(&master->dev, "%s has no 'spi-max-frequency' property\n", | 
 | 863 | 				nc->full_name); | 
 | 864 | 			spi_dev_put(spi); | 
 | 865 | 			continue; | 
 | 866 | 		} | 
 | 867 | 		spi->max_speed_hz = be32_to_cpup(prop); | 
 | 868 |  | 
 | 869 | 		/* IRQ */ | 
 | 870 | 		spi->irq = irq_of_parse_and_map(nc, 0); | 
 | 871 |  | 
 | 872 | 		/* Store a pointer to the node in the device structure */ | 
 | 873 | 		of_node_get(nc); | 
 | 874 | 		spi->dev.of_node = nc; | 
 | 875 |  | 
 | 876 | 		/* Register the new device */ | 
 | 877 | 		request_module(spi->modalias); | 
 | 878 | 		rc = spi_add_device(spi); | 
 | 879 | 		if (rc) { | 
 | 880 | 			dev_err(&master->dev, "spi_device register error %s\n", | 
 | 881 | 				nc->full_name); | 
 | 882 | 			spi_dev_put(spi); | 
 | 883 | 		} | 
 | 884 |  | 
 | 885 | 	} | 
 | 886 | } | 
 | 887 | #else | 
 | 888 | static void of_register_spi_devices(struct spi_master *master) { } | 
 | 889 | #endif | 
 | 890 |  | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 891 | static void spi_master_release(struct device *dev) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 892 | { | 
 | 893 | 	struct spi_master *master; | 
 | 894 |  | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 895 | 	master = container_of(dev, struct spi_master, dev); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 896 | 	kfree(master); | 
 | 897 | } | 
 | 898 |  | 
 | 899 | static struct class spi_master_class = { | 
 | 900 | 	.name		= "spi_master", | 
 | 901 | 	.owner		= THIS_MODULE, | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 902 | 	.dev_release	= spi_master_release, | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 903 | }; | 
 | 904 |  | 
 | 905 |  | 
| Linus Walleij | ffbbdd2 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 906 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 907 | /** | 
 | 908 |  * spi_alloc_master - allocate SPI master controller | 
 | 909 |  * @dev: the controller, possibly using the platform_bus | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 910 |  * @size: how much zeroed driver-private data to allocate; the pointer to this | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 911 |  *	memory is in the driver_data field of the returned device, | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 912 |  *	accessible with spi_master_get_devdata(). | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 913 |  * Context: can sleep | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 914 |  * | 
 | 915 |  * This call is used only by SPI master controller drivers, which are the | 
 | 916 |  * only ones directly touching chip registers.  It's how they allocate | 
| dmitry pervushin | ba1a051 | 2006-05-20 15:00:14 -0700 | [diff] [blame] | 917 |  * an spi_master structure, prior to calling spi_register_master(). | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 918 |  * | 
 | 919 |  * This must be called from context that can sleep.  It returns the SPI | 
 | 920 |  * master structure on success, else NULL. | 
 | 921 |  * | 
 | 922 |  * The caller is responsible for assigning the bus number and initializing | 
| dmitry pervushin | ba1a051 | 2006-05-20 15:00:14 -0700 | [diff] [blame] | 923 |  * the master's methods before calling spi_register_master(); and (after errors | 
| Uwe Kleine-König | eb4af0f | 2012-02-23 10:40:14 +0100 | [diff] [blame] | 924 |  * adding the device) calling spi_master_put() and kfree() to prevent a memory | 
 | 925 |  * leak. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 926 |  */ | 
| Adrian Bunk | e9d5a46 | 2007-03-26 21:32:23 -0800 | [diff] [blame] | 927 | struct spi_master *spi_alloc_master(struct device *dev, unsigned size) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 928 | { | 
 | 929 | 	struct spi_master	*master; | 
 | 930 |  | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 931 | 	if (!dev) | 
 | 932 | 		return NULL; | 
 | 933 |  | 
| Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 934 | 	master = kzalloc(size + sizeof *master, GFP_KERNEL); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 935 | 	if (!master) | 
 | 936 | 		return NULL; | 
 | 937 |  | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 938 | 	device_initialize(&master->dev); | 
| Grant Likely | 1e8a52e | 2012-05-19 23:42:08 -0600 | [diff] [blame] | 939 | 	master->bus_num = -1; | 
 | 940 | 	master->num_chipselect = 1; | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 941 | 	master->dev.class = &spi_master_class; | 
 | 942 | 	master->dev.parent = get_device(dev); | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 943 | 	spi_master_set_devdata(master, &master[1]); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 944 |  | 
 | 945 | 	return master; | 
 | 946 | } | 
 | 947 | EXPORT_SYMBOL_GPL(spi_alloc_master); | 
 | 948 |  | 
 | 949 | /** | 
 | 950 |  * spi_register_master - register SPI master controller | 
 | 951 |  * @master: initialized master, originally from spi_alloc_master() | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 952 |  * Context: can sleep | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 953 |  * | 
 | 954 |  * SPI master controllers connect to their drivers using some non-SPI bus, | 
 | 955 |  * such as the platform bus.  The final stage of probe() in that code | 
 | 956 |  * includes calling spi_register_master() to hook up to this SPI bus glue. | 
 | 957 |  * | 
 | 958 |  * SPI controllers use board specific (often SOC specific) bus numbers, | 
 | 959 |  * and board-specific addressing for SPI devices combines those numbers | 
 | 960 |  * with chip select numbers.  Since SPI does not directly support dynamic | 
 | 961 |  * device identification, boards need configuration tables telling which | 
 | 962 |  * chip is at which address. | 
 | 963 |  * | 
 | 964 |  * This must be called from context that can sleep.  It returns zero on | 
 | 965 |  * success, else a negative error code (dropping the master's refcount). | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 966 |  * After a successful return, the caller is responsible for calling | 
 | 967 |  * spi_unregister_master(). | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 968 |  */ | 
| Adrian Bunk | e9d5a46 | 2007-03-26 21:32:23 -0800 | [diff] [blame] | 969 | int spi_register_master(struct spi_master *master) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 970 | { | 
| David Brownell | e44a45a | 2007-06-03 13:50:40 -0700 | [diff] [blame] | 971 | 	static atomic_t		dyn_bus_id = ATOMIC_INIT((1<<15) - 1); | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 972 | 	struct device		*dev = master->dev.parent; | 
| Feng Tang | 2b9603a | 2010-08-02 15:52:15 +0800 | [diff] [blame] | 973 | 	struct boardinfo	*bi; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 974 | 	int			status = -ENODEV; | 
 | 975 | 	int			dynamic = 0; | 
 | 976 |  | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 977 | 	if (!dev) | 
 | 978 | 		return -ENODEV; | 
 | 979 |  | 
| David Brownell | 082c8cb | 2007-07-31 00:39:45 -0700 | [diff] [blame] | 980 | 	/* even if it's just one always-selected device, there must | 
 | 981 | 	 * be at least one chipselect | 
 | 982 | 	 */ | 
 | 983 | 	if (master->num_chipselect == 0) | 
 | 984 | 		return -EINVAL; | 
 | 985 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 986 | 	/* convention:  dynamically assigned bus IDs count down from the max */ | 
| David Brownell | a020ed7 | 2006-04-03 15:49:04 -0700 | [diff] [blame] | 987 | 	if (master->bus_num < 0) { | 
| David Brownell | 082c8cb | 2007-07-31 00:39:45 -0700 | [diff] [blame] | 988 | 		/* FIXME switch to an IDR based scheme, something like | 
 | 989 | 		 * I2C now uses, so we can't run out of "dynamic" IDs | 
 | 990 | 		 */ | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 991 | 		master->bus_num = atomic_dec_return(&dyn_bus_id); | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 992 | 		dynamic = 1; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 993 | 	} | 
 | 994 |  | 
| Ernst Schwab | cf32b71 | 2010-06-28 17:49:29 -0700 | [diff] [blame] | 995 | 	spin_lock_init(&master->bus_lock_spinlock); | 
 | 996 | 	mutex_init(&master->bus_lock_mutex); | 
 | 997 | 	master->bus_lock_flag = 0; | 
 | 998 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 999 | 	/* register the device, then userspace will see it. | 
 | 1000 | 	 * registration fails if the bus ID is in use. | 
 | 1001 | 	 */ | 
| Kay Sievers | 35f74fc | 2009-01-06 10:44:37 -0800 | [diff] [blame] | 1002 | 	dev_set_name(&master->dev, "spi%u", master->bus_num); | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 1003 | 	status = device_add(&master->dev); | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 1004 | 	if (status < 0) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1005 | 		goto done; | 
| Kay Sievers | 35f74fc | 2009-01-06 10:44:37 -0800 | [diff] [blame] | 1006 | 	dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1007 | 			dynamic ? " (dynamic)" : ""); | 
 | 1008 |  | 
| Linus Walleij | ffbbdd2 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 1009 | 	/* If we're using a queued driver, start the queue */ | 
 | 1010 | 	if (master->transfer) | 
 | 1011 | 		dev_info(dev, "master is unqueued, this is deprecated\n"); | 
 | 1012 | 	else { | 
 | 1013 | 		status = spi_master_initialize_queue(master); | 
 | 1014 | 		if (status) { | 
 | 1015 | 			device_unregister(&master->dev); | 
 | 1016 | 			goto done; | 
 | 1017 | 		} | 
 | 1018 | 	} | 
 | 1019 |  | 
| Feng Tang | 2b9603a | 2010-08-02 15:52:15 +0800 | [diff] [blame] | 1020 | 	mutex_lock(&board_lock); | 
 | 1021 | 	list_add_tail(&master->list, &spi_master_list); | 
 | 1022 | 	list_for_each_entry(bi, &board_list, list) | 
 | 1023 | 		spi_match_master_to_boardinfo(master, &bi->board_info); | 
 | 1024 | 	mutex_unlock(&board_lock); | 
 | 1025 |  | 
| Anatolij Gustschin | 12b15e8 | 2010-07-27 22:35:58 +0200 | [diff] [blame] | 1026 | 	/* Register devices from the device tree */ | 
 | 1027 | 	of_register_spi_devices(master); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1028 | done: | 
 | 1029 | 	return status; | 
 | 1030 | } | 
 | 1031 | EXPORT_SYMBOL_GPL(spi_register_master); | 
 | 1032 |  | 
| David Lamparter | 3486008 | 2010-08-30 23:54:17 +0200 | [diff] [blame] | 1033 | static int __unregister(struct device *dev, void *null) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1034 | { | 
| David Lamparter | 3486008 | 2010-08-30 23:54:17 +0200 | [diff] [blame] | 1035 | 	spi_unregister_device(to_spi_device(dev)); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1036 | 	return 0; | 
 | 1037 | } | 
 | 1038 |  | 
 | 1039 | /** | 
 | 1040 |  * spi_unregister_master - unregister SPI master controller | 
 | 1041 |  * @master: the master being unregistered | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 1042 |  * Context: can sleep | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1043 |  * | 
 | 1044 |  * This call is used only by SPI master controller drivers, which are the | 
 | 1045 |  * only ones directly touching chip registers. | 
 | 1046 |  * | 
 | 1047 |  * This must be called from context that can sleep. | 
 | 1048 |  */ | 
 | 1049 | void spi_unregister_master(struct spi_master *master) | 
 | 1050 | { | 
| Jeff Garzik | 89fc9a1 | 2006-12-06 20:35:35 -0800 | [diff] [blame] | 1051 | 	int dummy; | 
 | 1052 |  | 
| Linus Walleij | ffbbdd2 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 1053 | 	if (master->queued) { | 
 | 1054 | 		if (spi_destroy_queue(master)) | 
 | 1055 | 			dev_err(&master->dev, "queue remove failed\n"); | 
 | 1056 | 	} | 
 | 1057 |  | 
| Feng Tang | 2b9603a | 2010-08-02 15:52:15 +0800 | [diff] [blame] | 1058 | 	mutex_lock(&board_lock); | 
 | 1059 | 	list_del(&master->list); | 
 | 1060 | 	mutex_unlock(&board_lock); | 
 | 1061 |  | 
| Sebastian Andrzej Siewior | 97dbf37 | 2010-12-21 17:24:31 -0800 | [diff] [blame] | 1062 | 	dummy = device_for_each_child(&master->dev, NULL, __unregister); | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 1063 | 	device_unregister(&master->dev); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1064 | } | 
 | 1065 | EXPORT_SYMBOL_GPL(spi_unregister_master); | 
 | 1066 |  | 
| Linus Walleij | ffbbdd2 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 1067 | int spi_master_suspend(struct spi_master *master) | 
 | 1068 | { | 
 | 1069 | 	int ret; | 
 | 1070 |  | 
 | 1071 | 	/* Basically no-ops for non-queued masters */ | 
 | 1072 | 	if (!master->queued) | 
 | 1073 | 		return 0; | 
 | 1074 |  | 
 | 1075 | 	ret = spi_stop_queue(master); | 
 | 1076 | 	if (ret) | 
 | 1077 | 		dev_err(&master->dev, "queue stop failed\n"); | 
 | 1078 |  | 
 | 1079 | 	return ret; | 
 | 1080 | } | 
 | 1081 | EXPORT_SYMBOL_GPL(spi_master_suspend); | 
 | 1082 |  | 
 | 1083 | int spi_master_resume(struct spi_master *master) | 
 | 1084 | { | 
 | 1085 | 	int ret; | 
 | 1086 |  | 
 | 1087 | 	if (!master->queued) | 
 | 1088 | 		return 0; | 
 | 1089 |  | 
 | 1090 | 	ret = spi_start_queue(master); | 
 | 1091 | 	if (ret) | 
 | 1092 | 		dev_err(&master->dev, "queue restart failed\n"); | 
 | 1093 |  | 
 | 1094 | 	return ret; | 
 | 1095 | } | 
 | 1096 | EXPORT_SYMBOL_GPL(spi_master_resume); | 
 | 1097 |  | 
| Dave Young | 5ed2c83 | 2008-01-22 15:14:18 +0800 | [diff] [blame] | 1098 | static int __spi_master_match(struct device *dev, void *data) | 
 | 1099 | { | 
 | 1100 | 	struct spi_master *m; | 
 | 1101 | 	u16 *bus_num = data; | 
 | 1102 |  | 
 | 1103 | 	m = container_of(dev, struct spi_master, dev); | 
 | 1104 | 	return m->bus_num == *bus_num; | 
 | 1105 | } | 
 | 1106 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1107 | /** | 
 | 1108 |  * spi_busnum_to_master - look up master associated with bus_num | 
 | 1109 |  * @bus_num: the master's bus number | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 1110 |  * Context: can sleep | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1111 |  * | 
 | 1112 |  * This call may be used with devices that are registered after | 
 | 1113 |  * arch init time.  It returns a refcounted pointer to the relevant | 
 | 1114 |  * spi_master (which the caller must release), or NULL if there is | 
 | 1115 |  * no such master registered. | 
 | 1116 |  */ | 
 | 1117 | struct spi_master *spi_busnum_to_master(u16 bus_num) | 
 | 1118 | { | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 1119 | 	struct device		*dev; | 
| Atsushi Nemoto | 1e9a51d | 2007-01-26 00:56:54 -0800 | [diff] [blame] | 1120 | 	struct spi_master	*master = NULL; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1121 |  | 
| Greg Kroah-Hartman | 695794a | 2008-05-22 17:21:08 -0400 | [diff] [blame] | 1122 | 	dev = class_find_device(&spi_master_class, NULL, &bus_num, | 
| Dave Young | 5ed2c83 | 2008-01-22 15:14:18 +0800 | [diff] [blame] | 1123 | 				__spi_master_match); | 
 | 1124 | 	if (dev) | 
 | 1125 | 		master = container_of(dev, struct spi_master, dev); | 
 | 1126 | 	/* reference got in class_find_device */ | 
| Atsushi Nemoto | 1e9a51d | 2007-01-26 00:56:54 -0800 | [diff] [blame] | 1127 | 	return master; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1128 | } | 
 | 1129 | EXPORT_SYMBOL_GPL(spi_busnum_to_master); | 
 | 1130 |  | 
 | 1131 |  | 
 | 1132 | /*-------------------------------------------------------------------------*/ | 
 | 1133 |  | 
| David Brownell | 7d07719 | 2009-06-17 16:26:03 -0700 | [diff] [blame] | 1134 | /* Core methods for SPI master protocol drivers.  Some of the | 
 | 1135 |  * other core methods are currently defined as inline functions. | 
 | 1136 |  */ | 
 | 1137 |  | 
 | 1138 | /** | 
 | 1139 |  * spi_setup - setup SPI mode and clock rate | 
 | 1140 |  * @spi: the device whose settings are being modified | 
 | 1141 |  * Context: can sleep, and no requests are queued to the device | 
 | 1142 |  * | 
 | 1143 |  * SPI protocol drivers may need to update the transfer mode if the | 
 | 1144 |  * device doesn't work with its default.  They may likewise need | 
 | 1145 |  * to update clock rates or word sizes from initial values.  This function | 
 | 1146 |  * changes those settings, and must be called from a context that can sleep. | 
 | 1147 |  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take | 
 | 1148 |  * effect the next time the device is selected and data is transferred to | 
 | 1149 |  * or from it.  When this function returns, the spi device is deselected. | 
 | 1150 |  * | 
 | 1151 |  * Note that this call will fail if the protocol driver specifies an option | 
 | 1152 |  * that the underlying controller or its driver does not support.  For | 
 | 1153 |  * example, not all hardware supports wire transfers using nine bit words, | 
 | 1154 |  * LSB-first wire encoding, or active-high chipselects. | 
 | 1155 |  */ | 
 | 1156 | int spi_setup(struct spi_device *spi) | 
 | 1157 | { | 
| David Brownell | e7db06b | 2009-06-17 16:26:04 -0700 | [diff] [blame] | 1158 | 	unsigned	bad_bits; | 
| David Brownell | 7d07719 | 2009-06-17 16:26:03 -0700 | [diff] [blame] | 1159 | 	int		status; | 
 | 1160 |  | 
| David Brownell | e7db06b | 2009-06-17 16:26:04 -0700 | [diff] [blame] | 1161 | 	/* help drivers fail *cleanly* when they need options | 
 | 1162 | 	 * that aren't supported with their current master | 
 | 1163 | 	 */ | 
 | 1164 | 	bad_bits = spi->mode & ~spi->master->mode_bits; | 
 | 1165 | 	if (bad_bits) { | 
| Linus Walleij | eb288a1 | 2010-10-21 21:06:44 +0200 | [diff] [blame] | 1166 | 		dev_err(&spi->dev, "setup: unsupported mode bits %x\n", | 
| David Brownell | e7db06b | 2009-06-17 16:26:04 -0700 | [diff] [blame] | 1167 | 			bad_bits); | 
 | 1168 | 		return -EINVAL; | 
 | 1169 | 	} | 
 | 1170 |  | 
| David Brownell | 7d07719 | 2009-06-17 16:26:03 -0700 | [diff] [blame] | 1171 | 	if (!spi->bits_per_word) | 
 | 1172 | 		spi->bits_per_word = 8; | 
 | 1173 |  | 
 | 1174 | 	status = spi->master->setup(spi); | 
 | 1175 |  | 
 | 1176 | 	dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s" | 
 | 1177 | 				"%u bits/w, %u Hz max --> %d\n", | 
 | 1178 | 			(int) (spi->mode & (SPI_CPOL | SPI_CPHA)), | 
 | 1179 | 			(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", | 
 | 1180 | 			(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", | 
 | 1181 | 			(spi->mode & SPI_3WIRE) ? "3wire, " : "", | 
 | 1182 | 			(spi->mode & SPI_LOOP) ? "loopback, " : "", | 
 | 1183 | 			spi->bits_per_word, spi->max_speed_hz, | 
 | 1184 | 			status); | 
 | 1185 |  | 
 | 1186 | 	return status; | 
 | 1187 | } | 
 | 1188 | EXPORT_SYMBOL_GPL(spi_setup); | 
 | 1189 |  | 
| Ernst Schwab | cf32b71 | 2010-06-28 17:49:29 -0700 | [diff] [blame] | 1190 | static int __spi_async(struct spi_device *spi, struct spi_message *message) | 
 | 1191 | { | 
 | 1192 | 	struct spi_master *master = spi->master; | 
 | 1193 |  | 
 | 1194 | 	/* Half-duplex links include original MicroWire, and ones with | 
 | 1195 | 	 * only one data pin like SPI_3WIRE (switches direction) or where | 
 | 1196 | 	 * either MOSI or MISO is missing.  They can also be caused by | 
 | 1197 | 	 * software limitations. | 
 | 1198 | 	 */ | 
 | 1199 | 	if ((master->flags & SPI_MASTER_HALF_DUPLEX) | 
 | 1200 | 			|| (spi->mode & SPI_3WIRE)) { | 
 | 1201 | 		struct spi_transfer *xfer; | 
 | 1202 | 		unsigned flags = master->flags; | 
 | 1203 |  | 
 | 1204 | 		list_for_each_entry(xfer, &message->transfers, transfer_list) { | 
 | 1205 | 			if (xfer->rx_buf && xfer->tx_buf) | 
 | 1206 | 				return -EINVAL; | 
 | 1207 | 			if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) | 
 | 1208 | 				return -EINVAL; | 
 | 1209 | 			if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) | 
 | 1210 | 				return -EINVAL; | 
 | 1211 | 		} | 
 | 1212 | 	} | 
 | 1213 |  | 
 | 1214 | 	message->spi = spi; | 
 | 1215 | 	message->status = -EINPROGRESS; | 
 | 1216 | 	return master->transfer(spi, message); | 
 | 1217 | } | 
 | 1218 |  | 
| David Brownell | 568d069 | 2009-09-22 16:46:18 -0700 | [diff] [blame] | 1219 | /** | 
 | 1220 |  * spi_async - asynchronous SPI transfer | 
 | 1221 |  * @spi: device with which data will be exchanged | 
 | 1222 |  * @message: describes the data transfers, including completion callback | 
 | 1223 |  * Context: any (irqs may be blocked, etc) | 
 | 1224 |  * | 
 | 1225 |  * This call may be used in_irq and other contexts which can't sleep, | 
 | 1226 |  * as well as from task contexts which can sleep. | 
 | 1227 |  * | 
 | 1228 |  * The completion callback is invoked in a context which can't sleep. | 
 | 1229 |  * Before that invocation, the value of message->status is undefined. | 
 | 1230 |  * When the callback is issued, message->status holds either zero (to | 
 | 1231 |  * indicate complete success) or a negative error code.  After that | 
 | 1232 |  * callback returns, the driver which issued the transfer request may | 
 | 1233 |  * deallocate the associated memory; it's no longer in use by any SPI | 
 | 1234 |  * core or controller driver code. | 
 | 1235 |  * | 
 | 1236 |  * Note that although all messages to a spi_device are handled in | 
 | 1237 |  * FIFO order, messages may go to different devices in other orders. | 
 | 1238 |  * Some device might be higher priority, or have various "hard" access | 
 | 1239 |  * time requirements, for example. | 
 | 1240 |  * | 
 | 1241 |  * On detection of any fault during the transfer, processing of | 
 | 1242 |  * the entire message is aborted, and the device is deselected. | 
 | 1243 |  * Until returning from the associated message completion callback, | 
 | 1244 |  * no other spi_message queued to that device will be processed. | 
 | 1245 |  * (This rule applies equally to all the synchronous transfer calls, | 
 | 1246 |  * which are wrappers around this core asynchronous primitive.) | 
 | 1247 |  */ | 
 | 1248 | int spi_async(struct spi_device *spi, struct spi_message *message) | 
 | 1249 | { | 
 | 1250 | 	struct spi_master *master = spi->master; | 
| Ernst Schwab | cf32b71 | 2010-06-28 17:49:29 -0700 | [diff] [blame] | 1251 | 	int ret; | 
 | 1252 | 	unsigned long flags; | 
| David Brownell | 568d069 | 2009-09-22 16:46:18 -0700 | [diff] [blame] | 1253 |  | 
| Ernst Schwab | cf32b71 | 2010-06-28 17:49:29 -0700 | [diff] [blame] | 1254 | 	spin_lock_irqsave(&master->bus_lock_spinlock, flags); | 
| David Brownell | 568d069 | 2009-09-22 16:46:18 -0700 | [diff] [blame] | 1255 |  | 
| Ernst Schwab | cf32b71 | 2010-06-28 17:49:29 -0700 | [diff] [blame] | 1256 | 	if (master->bus_lock_flag) | 
 | 1257 | 		ret = -EBUSY; | 
 | 1258 | 	else | 
 | 1259 | 		ret = __spi_async(spi, message); | 
| David Brownell | 568d069 | 2009-09-22 16:46:18 -0700 | [diff] [blame] | 1260 |  | 
| Ernst Schwab | cf32b71 | 2010-06-28 17:49:29 -0700 | [diff] [blame] | 1261 | 	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); | 
 | 1262 |  | 
 | 1263 | 	return ret; | 
| David Brownell | 568d069 | 2009-09-22 16:46:18 -0700 | [diff] [blame] | 1264 | } | 
 | 1265 | EXPORT_SYMBOL_GPL(spi_async); | 
 | 1266 |  | 
| Ernst Schwab | cf32b71 | 2010-06-28 17:49:29 -0700 | [diff] [blame] | 1267 | /** | 
 | 1268 |  * spi_async_locked - version of spi_async with exclusive bus usage | 
 | 1269 |  * @spi: device with which data will be exchanged | 
 | 1270 |  * @message: describes the data transfers, including completion callback | 
 | 1271 |  * Context: any (irqs may be blocked, etc) | 
 | 1272 |  * | 
 | 1273 |  * This call may be used in_irq and other contexts which can't sleep, | 
 | 1274 |  * as well as from task contexts which can sleep. | 
 | 1275 |  * | 
 | 1276 |  * The completion callback is invoked in a context which can't sleep. | 
 | 1277 |  * Before that invocation, the value of message->status is undefined. | 
 | 1278 |  * When the callback is issued, message->status holds either zero (to | 
 | 1279 |  * indicate complete success) or a negative error code.  After that | 
 | 1280 |  * callback returns, the driver which issued the transfer request may | 
 | 1281 |  * deallocate the associated memory; it's no longer in use by any SPI | 
 | 1282 |  * core or controller driver code. | 
 | 1283 |  * | 
 | 1284 |  * Note that although all messages to a spi_device are handled in | 
 | 1285 |  * FIFO order, messages may go to different devices in other orders. | 
 | 1286 |  * Some device might be higher priority, or have various "hard" access | 
 | 1287 |  * time requirements, for example. | 
 | 1288 |  * | 
 | 1289 |  * On detection of any fault during the transfer, processing of | 
 | 1290 |  * the entire message is aborted, and the device is deselected. | 
 | 1291 |  * Until returning from the associated message completion callback, | 
 | 1292 |  * no other spi_message queued to that device will be processed. | 
 | 1293 |  * (This rule applies equally to all the synchronous transfer calls, | 
 | 1294 |  * which are wrappers around this core asynchronous primitive.) | 
 | 1295 |  */ | 
 | 1296 | int spi_async_locked(struct spi_device *spi, struct spi_message *message) | 
 | 1297 | { | 
 | 1298 | 	struct spi_master *master = spi->master; | 
 | 1299 | 	int ret; | 
 | 1300 | 	unsigned long flags; | 
 | 1301 |  | 
 | 1302 | 	spin_lock_irqsave(&master->bus_lock_spinlock, flags); | 
 | 1303 |  | 
 | 1304 | 	ret = __spi_async(spi, message); | 
 | 1305 |  | 
 | 1306 | 	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); | 
 | 1307 |  | 
 | 1308 | 	return ret; | 
 | 1309 |  | 
 | 1310 | } | 
 | 1311 | EXPORT_SYMBOL_GPL(spi_async_locked); | 
 | 1312 |  | 
| David Brownell | 7d07719 | 2009-06-17 16:26:03 -0700 | [diff] [blame] | 1313 |  | 
 | 1314 | /*-------------------------------------------------------------------------*/ | 
 | 1315 |  | 
 | 1316 | /* Utility methods for SPI master protocol drivers, layered on | 
 | 1317 |  * top of the core.  Some other utility methods are defined as | 
 | 1318 |  * inline functions. | 
 | 1319 |  */ | 
 | 1320 |  | 
| Andrew Morton | 5d870c8 | 2006-01-11 11:23:49 -0800 | [diff] [blame] | 1321 | static void spi_complete(void *arg) | 
 | 1322 | { | 
 | 1323 | 	complete(arg); | 
 | 1324 | } | 
 | 1325 |  | 
| Ernst Schwab | cf32b71 | 2010-06-28 17:49:29 -0700 | [diff] [blame] | 1326 | static int __spi_sync(struct spi_device *spi, struct spi_message *message, | 
 | 1327 | 		      int bus_locked) | 
 | 1328 | { | 
 | 1329 | 	DECLARE_COMPLETION_ONSTACK(done); | 
 | 1330 | 	int status; | 
 | 1331 | 	struct spi_master *master = spi->master; | 
 | 1332 |  | 
 | 1333 | 	message->complete = spi_complete; | 
 | 1334 | 	message->context = &done; | 
 | 1335 |  | 
 | 1336 | 	if (!bus_locked) | 
 | 1337 | 		mutex_lock(&master->bus_lock_mutex); | 
 | 1338 |  | 
 | 1339 | 	status = spi_async_locked(spi, message); | 
 | 1340 |  | 
 | 1341 | 	if (!bus_locked) | 
 | 1342 | 		mutex_unlock(&master->bus_lock_mutex); | 
 | 1343 |  | 
 | 1344 | 	if (status == 0) { | 
 | 1345 | 		wait_for_completion(&done); | 
 | 1346 | 		status = message->status; | 
 | 1347 | 	} | 
 | 1348 | 	message->context = NULL; | 
 | 1349 | 	return status; | 
 | 1350 | } | 
 | 1351 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1352 | /** | 
 | 1353 |  * spi_sync - blocking/synchronous SPI data transfers | 
 | 1354 |  * @spi: device with which data will be exchanged | 
 | 1355 |  * @message: describes the data transfers | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 1356 |  * Context: can sleep | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1357 |  * | 
 | 1358 |  * This call may only be used from a context that may sleep.  The sleep | 
 | 1359 |  * is non-interruptible, and has no timeout.  Low-overhead controller | 
 | 1360 |  * drivers may DMA directly into and out of the message buffers. | 
 | 1361 |  * | 
 | 1362 |  * Note that the SPI device's chip select is active during the message, | 
 | 1363 |  * and then is normally disabled between messages.  Drivers for some | 
 | 1364 |  * frequently-used devices may want to minimize costs of selecting a chip, | 
 | 1365 |  * by leaving it selected in anticipation that the next message will go | 
 | 1366 |  * to the same chip.  (That may increase power usage.) | 
 | 1367 |  * | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 1368 |  * Also, the caller is guaranteeing that the memory associated with the | 
 | 1369 |  * message will not be freed before this call returns. | 
 | 1370 |  * | 
| Marc Pignat | 9b938b7 | 2007-12-04 23:45:10 -0800 | [diff] [blame] | 1371 |  * It returns zero on success, else a negative error code. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1372 |  */ | 
 | 1373 | int spi_sync(struct spi_device *spi, struct spi_message *message) | 
 | 1374 | { | 
| Ernst Schwab | cf32b71 | 2010-06-28 17:49:29 -0700 | [diff] [blame] | 1375 | 	return __spi_sync(spi, message, 0); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1376 | } | 
 | 1377 | EXPORT_SYMBOL_GPL(spi_sync); | 
 | 1378 |  | 
| Ernst Schwab | cf32b71 | 2010-06-28 17:49:29 -0700 | [diff] [blame] | 1379 | /** | 
 | 1380 |  * spi_sync_locked - version of spi_sync with exclusive bus usage | 
 | 1381 |  * @spi: device with which data will be exchanged | 
 | 1382 |  * @message: describes the data transfers | 
 | 1383 |  * Context: can sleep | 
 | 1384 |  * | 
 | 1385 |  * This call may only be used from a context that may sleep.  The sleep | 
 | 1386 |  * is non-interruptible, and has no timeout.  Low-overhead controller | 
 | 1387 |  * drivers may DMA directly into and out of the message buffers. | 
 | 1388 |  * | 
 | 1389 |  * This call should be used by drivers that require exclusive access to the | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 1390 |  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must | 
| Ernst Schwab | cf32b71 | 2010-06-28 17:49:29 -0700 | [diff] [blame] | 1391 |  * be released by a spi_bus_unlock call when the exclusive access is over. | 
 | 1392 |  * | 
 | 1393 |  * It returns zero on success, else a negative error code. | 
 | 1394 |  */ | 
 | 1395 | int spi_sync_locked(struct spi_device *spi, struct spi_message *message) | 
 | 1396 | { | 
 | 1397 | 	return __spi_sync(spi, message, 1); | 
 | 1398 | } | 
 | 1399 | EXPORT_SYMBOL_GPL(spi_sync_locked); | 
 | 1400 |  | 
 | 1401 | /** | 
 | 1402 |  * spi_bus_lock - obtain a lock for exclusive SPI bus usage | 
 | 1403 |  * @master: SPI bus master that should be locked for exclusive bus access | 
 | 1404 |  * Context: can sleep | 
 | 1405 |  * | 
 | 1406 |  * This call may only be used from a context that may sleep.  The sleep | 
 | 1407 |  * is non-interruptible, and has no timeout. | 
 | 1408 |  * | 
 | 1409 |  * This call should be used by drivers that require exclusive access to the | 
 | 1410 |  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the | 
 | 1411 |  * exclusive access is over. Data transfer must be done by spi_sync_locked | 
 | 1412 |  * and spi_async_locked calls when the SPI bus lock is held. | 
 | 1413 |  * | 
 | 1414 |  * It returns zero on success, else a negative error code. | 
 | 1415 |  */ | 
 | 1416 | int spi_bus_lock(struct spi_master *master) | 
 | 1417 | { | 
 | 1418 | 	unsigned long flags; | 
 | 1419 |  | 
 | 1420 | 	mutex_lock(&master->bus_lock_mutex); | 
 | 1421 |  | 
 | 1422 | 	spin_lock_irqsave(&master->bus_lock_spinlock, flags); | 
 | 1423 | 	master->bus_lock_flag = 1; | 
 | 1424 | 	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); | 
 | 1425 |  | 
 | 1426 | 	/* mutex remains locked until spi_bus_unlock is called */ | 
 | 1427 |  | 
 | 1428 | 	return 0; | 
 | 1429 | } | 
 | 1430 | EXPORT_SYMBOL_GPL(spi_bus_lock); | 
 | 1431 |  | 
 | 1432 | /** | 
 | 1433 |  * spi_bus_unlock - release the lock for exclusive SPI bus usage | 
 | 1434 |  * @master: SPI bus master that was locked for exclusive bus access | 
 | 1435 |  * Context: can sleep | 
 | 1436 |  * | 
 | 1437 |  * This call may only be used from a context that may sleep.  The sleep | 
 | 1438 |  * is non-interruptible, and has no timeout. | 
 | 1439 |  * | 
 | 1440 |  * This call releases an SPI bus lock previously obtained by an spi_bus_lock | 
 | 1441 |  * call. | 
 | 1442 |  * | 
 | 1443 |  * It returns zero on success, else a negative error code. | 
 | 1444 |  */ | 
 | 1445 | int spi_bus_unlock(struct spi_master *master) | 
 | 1446 | { | 
 | 1447 | 	master->bus_lock_flag = 0; | 
 | 1448 |  | 
 | 1449 | 	mutex_unlock(&master->bus_lock_mutex); | 
 | 1450 |  | 
 | 1451 | 	return 0; | 
 | 1452 | } | 
 | 1453 | EXPORT_SYMBOL_GPL(spi_bus_unlock); | 
 | 1454 |  | 
| David Brownell | a9948b6 | 2006-04-02 10:37:40 -0800 | [diff] [blame] | 1455 | /* portable code must never pass more than 32 bytes */ | 
 | 1456 | #define	SPI_BUFSIZ	max(32,SMP_CACHE_BYTES) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1457 |  | 
 | 1458 | static u8	*buf; | 
 | 1459 |  | 
 | 1460 | /** | 
 | 1461 |  * spi_write_then_read - SPI synchronous write followed by read | 
 | 1462 |  * @spi: device with which data will be exchanged | 
 | 1463 |  * @txbuf: data to be written (need not be dma-safe) | 
 | 1464 |  * @n_tx: size of txbuf, in bytes | 
| Jiri Pirko | 2757049 | 2009-06-17 16:26:06 -0700 | [diff] [blame] | 1465 |  * @rxbuf: buffer into which data will be read (need not be dma-safe) | 
 | 1466 |  * @n_rx: size of rxbuf, in bytes | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 1467 |  * Context: can sleep | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1468 |  * | 
 | 1469 |  * This performs a half duplex MicroWire style transaction with the | 
 | 1470 |  * device, sending txbuf and then reading rxbuf.  The return value | 
 | 1471 |  * is zero for success, else a negative errno status code. | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 1472 |  * This call may only be used from a context that may sleep. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1473 |  * | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 1474 |  * Parameters to this routine are always copied using a small buffer; | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 1475 |  * portable code should never use this for more than 32 bytes. | 
 | 1476 |  * Performance-sensitive or bulk transfer code should instead use | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 1477 |  * spi_{async,sync}() calls with dma-safe buffers. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1478 |  */ | 
 | 1479 | int spi_write_then_read(struct spi_device *spi, | 
| Mark Brown | 0c4a159 | 2011-05-11 00:09:30 +0200 | [diff] [blame] | 1480 | 		const void *txbuf, unsigned n_tx, | 
 | 1481 | 		void *rxbuf, unsigned n_rx) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1482 | { | 
| David Brownell | 068f407 | 2007-12-04 23:45:09 -0800 | [diff] [blame] | 1483 | 	static DEFINE_MUTEX(lock); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1484 |  | 
 | 1485 | 	int			status; | 
 | 1486 | 	struct spi_message	message; | 
| David Brownell | bdff549 | 2009-04-13 14:39:57 -0700 | [diff] [blame] | 1487 | 	struct spi_transfer	x[2]; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1488 | 	u8			*local_buf; | 
 | 1489 |  | 
 | 1490 | 	/* Use preallocated DMA-safe buffer.  We can't avoid copying here, | 
 | 1491 | 	 * (as a pure convenience thing), but we can keep heap costs | 
 | 1492 | 	 * out of the hot path ... | 
 | 1493 | 	 */ | 
 | 1494 | 	if ((n_tx + n_rx) > SPI_BUFSIZ) | 
 | 1495 | 		return -EINVAL; | 
 | 1496 |  | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 1497 | 	spi_message_init(&message); | 
| David Brownell | bdff549 | 2009-04-13 14:39:57 -0700 | [diff] [blame] | 1498 | 	memset(x, 0, sizeof x); | 
 | 1499 | 	if (n_tx) { | 
 | 1500 | 		x[0].len = n_tx; | 
 | 1501 | 		spi_message_add_tail(&x[0], &message); | 
 | 1502 | 	} | 
 | 1503 | 	if (n_rx) { | 
 | 1504 | 		x[1].len = n_rx; | 
 | 1505 | 		spi_message_add_tail(&x[1], &message); | 
 | 1506 | 	} | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 1507 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1508 | 	/* ... unless someone else is using the pre-allocated buffer */ | 
| David Brownell | 068f407 | 2007-12-04 23:45:09 -0800 | [diff] [blame] | 1509 | 	if (!mutex_trylock(&lock)) { | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1510 | 		local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); | 
 | 1511 | 		if (!local_buf) | 
 | 1512 | 			return -ENOMEM; | 
 | 1513 | 	} else | 
 | 1514 | 		local_buf = buf; | 
 | 1515 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1516 | 	memcpy(local_buf, txbuf, n_tx); | 
| David Brownell | bdff549 | 2009-04-13 14:39:57 -0700 | [diff] [blame] | 1517 | 	x[0].tx_buf = local_buf; | 
 | 1518 | 	x[1].rx_buf = local_buf + n_tx; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1519 |  | 
 | 1520 | 	/* do the i/o */ | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1521 | 	status = spi_sync(spi, &message); | 
| Marc Pignat | 9b938b7 | 2007-12-04 23:45:10 -0800 | [diff] [blame] | 1522 | 	if (status == 0) | 
| David Brownell | bdff549 | 2009-04-13 14:39:57 -0700 | [diff] [blame] | 1523 | 		memcpy(rxbuf, x[1].rx_buf, n_rx); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1524 |  | 
| David Brownell | bdff549 | 2009-04-13 14:39:57 -0700 | [diff] [blame] | 1525 | 	if (x[0].tx_buf == buf) | 
| David Brownell | 068f407 | 2007-12-04 23:45:09 -0800 | [diff] [blame] | 1526 | 		mutex_unlock(&lock); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1527 | 	else | 
 | 1528 | 		kfree(local_buf); | 
 | 1529 |  | 
 | 1530 | 	return status; | 
 | 1531 | } | 
 | 1532 | EXPORT_SYMBOL_GPL(spi_write_then_read); | 
 | 1533 |  | 
 | 1534 | /*-------------------------------------------------------------------------*/ | 
 | 1535 |  | 
 | 1536 | static int __init spi_init(void) | 
 | 1537 | { | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 1538 | 	int	status; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1539 |  | 
| Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 1540 | 	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 1541 | 	if (!buf) { | 
 | 1542 | 		status = -ENOMEM; | 
 | 1543 | 		goto err0; | 
 | 1544 | 	} | 
 | 1545 |  | 
 | 1546 | 	status = bus_register(&spi_bus_type); | 
 | 1547 | 	if (status < 0) | 
 | 1548 | 		goto err1; | 
 | 1549 |  | 
 | 1550 | 	status = class_register(&spi_master_class); | 
 | 1551 | 	if (status < 0) | 
 | 1552 | 		goto err2; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1553 | 	return 0; | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 1554 |  | 
 | 1555 | err2: | 
 | 1556 | 	bus_unregister(&spi_bus_type); | 
 | 1557 | err1: | 
 | 1558 | 	kfree(buf); | 
 | 1559 | 	buf = NULL; | 
 | 1560 | err0: | 
 | 1561 | 	return status; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1562 | } | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 1563 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1564 | /* board_info is normally registered in arch_initcall(), | 
 | 1565 |  * but even essential drivers wait till later | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 1566 |  * | 
 | 1567 |  * REVISIT only boardinfo really needs static linking. the rest (device and | 
 | 1568 |  * driver registration) _could_ be dynamically linked (modular) ... costs | 
 | 1569 |  * include needing to have boardinfo data structures be much more public. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1570 |  */ | 
| David Brownell | 673c0c0 | 2008-10-15 22:02:46 -0700 | [diff] [blame] | 1571 | postcore_initcall(spi_init); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1572 |  |