blob: 3ac8e0a11c7473d9059f22b826ef9a1e2b50f724 [file] [log] [blame]
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/rbtree.h>
18#include <linux/idr.h>
19#include <linux/genalloc.h>
20#include <linux/of.h>
21#include <linux/io.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/seq_file.h>
25#include <mach/ocmem_priv.h>
26
27enum request_states {
28 R_FREE = 0x0, /* request is not allocated */
29 R_PENDING, /* request has a pending operation */
30 R_ALLOCATED, /* request has been allocated */
31 R_MUST_GROW, /* request must grow as a part of pending operation */
32 R_MUST_SHRINK, /* request must shrink as a part of pending operation */
33 R_MUST_MAP, /* request must be mapped before being used */
34 R_MUST_UNMAP, /* request must be unmapped when not being used */
35 R_MAPPED, /* request is mapped and actively used by client */
36 R_UNMAPPED, /* request is not mapped, so it's not in active use */
37 R_EVICTED, /* request is evicted and must be restored */
38};
39
40#define SET_STATE(x, val) (set_bit((val), &(x)->state))
41#define CLEAR_STATE(x, val) (clear_bit((val), &(x)->state))
42#define TEST_STATE(x, val) (test_bit((val), &(x)->state))
43
44enum op_res {
45 OP_COMPLETE = 0x0,
46 OP_RESCHED,
47 OP_PARTIAL,
48 OP_FAIL = ~0x0,
49};
50
51/* Represents various client priorities */
52/* Note: More than one client can share a priority level */
53enum client_prio {
54 MIN_PRIO = 0x0,
55 NO_PRIO = MIN_PRIO,
56 PRIO_SENSORS = 0x1,
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070057 PRIO_OTHER_OS = 0x1,
Naveen Ramarajb9da05782012-05-07 09:07:35 -070058 PRIO_LP_AUDIO = 0x1,
59 PRIO_HP_AUDIO = 0x2,
60 PRIO_VOICE = 0x3,
61 PRIO_GFX_GROWTH = 0x4,
62 PRIO_VIDEO = 0x5,
63 PRIO_GFX = 0x6,
64 PRIO_OCMEM = 0x7,
65 MAX_OCMEM_PRIO = PRIO_OCMEM + 1,
66};
67
68static struct list_head sched_queue[MAX_OCMEM_PRIO];
69static struct mutex sched_queue_mutex;
70
71/* The duration in msecs before a pending operation is scheduled
72 * This allows an idle window between use case boundaries where various
73 * hardware state changes can occur. The value will be tweaked on actual
74 * hardware.
75*/
76#define SCHED_DELAY 10
77
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070078static struct list_head rdm_queue;
79static struct mutex rdm_mutex;
80static struct workqueue_struct *ocmem_rdm_wq;
81static struct workqueue_struct *ocmem_eviction_wq;
82
83static struct ocmem_eviction_data *evictions[OCMEM_CLIENT_MAX];
84
85struct ocmem_rdm_work {
86 int id;
87 struct ocmem_map_list *list;
88 struct ocmem_handle *handle;
89 int direction;
90 struct work_struct work;
91};
92
Naveen Ramarajb9da05782012-05-07 09:07:35 -070093/* OCMEM Operational modes */
94enum ocmem_client_modes {
95 OCMEM_PERFORMANCE = 1,
96 OCMEM_PASSIVE,
97 OCMEM_LOW_POWER,
98 OCMEM_MODE_MAX = OCMEM_LOW_POWER
99};
100
101/* OCMEM Addressing modes */
102enum ocmem_interconnects {
103 OCMEM_BLOCKED = 0,
104 OCMEM_PORT = 1,
105 OCMEM_OCMEMNOC = 2,
106 OCMEM_SYSNOC = 3,
107};
108
109/**
110 * Primary OCMEM Arbitration Table
111 **/
112struct ocmem_table {
113 int client_id;
114 int priority;
115 int mode;
116 int hw_interconnect;
117} ocmem_client_table[OCMEM_CLIENT_MAX] = {
118 {OCMEM_GRAPHICS, PRIO_GFX, OCMEM_PERFORMANCE, OCMEM_PORT},
Naveen Ramaraje9f11f32012-08-13 22:46:50 -0700119 {OCMEM_VIDEO, PRIO_VIDEO, OCMEM_PERFORMANCE, OCMEM_PORT},
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700120 {OCMEM_CAMERA, NO_PRIO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC},
121 {OCMEM_HP_AUDIO, PRIO_HP_AUDIO, OCMEM_PASSIVE, OCMEM_BLOCKED},
122 {OCMEM_VOICE, PRIO_VOICE, OCMEM_PASSIVE, OCMEM_BLOCKED},
123 {OCMEM_LP_AUDIO, PRIO_LP_AUDIO, OCMEM_LOW_POWER, OCMEM_SYSNOC},
124 {OCMEM_SENSORS, PRIO_SENSORS, OCMEM_LOW_POWER, OCMEM_SYSNOC},
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700125 {OCMEM_OTHER_OS, PRIO_OTHER_OS, OCMEM_LOW_POWER, OCMEM_SYSNOC},
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700126};
127
128static struct rb_root sched_tree;
129static struct mutex sched_mutex;
130
131/* A region represents a continuous interval in OCMEM address space */
132struct ocmem_region {
133 /* Chain in Interval Tree */
134 struct rb_node region_rb;
135 /* Hash map of requests */
136 struct idr region_idr;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700137 /* Chain in eviction list */
138 struct list_head eviction_list;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700139 unsigned long r_start;
140 unsigned long r_end;
141 unsigned long r_sz;
142 /* Highest priority of all requests served by this region */
143 int max_prio;
144};
145
146/* Is OCMEM tightly coupled to the client ?*/
147static inline int is_tcm(int id)
148{
149 if (ocmem_client_table[id].hw_interconnect == OCMEM_PORT ||
150 ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC)
151 return 1;
152 else
153 return 0;
154}
155
156static inline int is_blocked(int id)
157{
158 return ocmem_client_table[id].hw_interconnect == OCMEM_BLOCKED ? 1 : 0;
159}
160
161/* Returns the address that can be used by a device core to access OCMEM */
162static unsigned long device_address(int id, unsigned long addr)
163{
164 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
165 unsigned long ret_addr = 0x0;
166
167 switch (hw_interconnect) {
168 case OCMEM_PORT:
169 ret_addr = phys_to_offset(addr);
170 break;
171 case OCMEM_OCMEMNOC:
172 case OCMEM_SYSNOC:
173 ret_addr = addr;
174 break;
175 case OCMEM_BLOCKED:
176 ret_addr = 0x0;
177 break;
178 }
179 return ret_addr;
180}
181
182/* Returns the address as viewed by the core */
183static unsigned long core_address(int id, unsigned long addr)
184{
185 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
186 unsigned long ret_addr = 0x0;
187
188 switch (hw_interconnect) {
189 case OCMEM_PORT:
190 ret_addr = offset_to_phys(addr);
191 break;
192 case OCMEM_OCMEMNOC:
193 case OCMEM_SYSNOC:
194 ret_addr = addr;
195 break;
196 case OCMEM_BLOCKED:
197 ret_addr = 0x0;
198 break;
199 }
200 return ret_addr;
201}
202
203static int insert_region(struct ocmem_region *region)
204{
205
206 struct rb_root *root = &sched_tree;
207 struct rb_node **p = &root->rb_node;
208 struct rb_node *parent = NULL;
209 struct ocmem_region *tmp = NULL;
210 unsigned long addr = region->r_start;
211
212 while (*p) {
213 parent = *p;
214 tmp = rb_entry(parent, struct ocmem_region, region_rb);
215
216 if (tmp->r_end > addr) {
217 if (tmp->r_start <= addr)
218 break;
219 p = &(*p)->rb_left;
220 } else if (tmp->r_end <= addr)
221 p = &(*p)->rb_right;
222 }
223 rb_link_node(&region->region_rb, parent, p);
224 rb_insert_color(&region->region_rb, root);
225 return 0;
226}
227
228static int remove_region(struct ocmem_region *region)
229{
230 struct rb_root *root = &sched_tree;
231 rb_erase(&region->region_rb, root);
232 return 0;
233}
234
235static struct ocmem_req *ocmem_create_req(void)
236{
237 struct ocmem_req *p = NULL;
238
239 p = kzalloc(sizeof(struct ocmem_req), GFP_KERNEL);
240 if (!p)
241 return NULL;
242
243 INIT_LIST_HEAD(&p->zone_list);
244 INIT_LIST_HEAD(&p->sched_list);
245 init_rwsem(&p->rw_sem);
246 SET_STATE(p, R_FREE);
247 return p;
248}
249
250static int ocmem_destroy_req(struct ocmem_req *req)
251{
252 kfree(req);
253 return 0;
254}
255
256static struct ocmem_region *create_region(void)
257{
258 struct ocmem_region *p = NULL;
259
260 p = kzalloc(sizeof(struct ocmem_region), GFP_KERNEL);
261 if (!p)
262 return NULL;
263 idr_init(&p->region_idr);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700264 INIT_LIST_HEAD(&p->eviction_list);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700265 p->r_start = p->r_end = p->r_sz = 0x0;
266 p->max_prio = NO_PRIO;
267 return p;
268}
269
270static int destroy_region(struct ocmem_region *region)
271{
272 kfree(region);
273 return 0;
274}
275
276static int attach_req(struct ocmem_region *region, struct ocmem_req *req)
277{
278 int ret, id;
279
280 while (1) {
281 if (idr_pre_get(&region->region_idr, GFP_KERNEL) == 0)
282 return -ENOMEM;
283
284 ret = idr_get_new_above(&region->region_idr, req, 1, &id);
285
286 if (ret != -EAGAIN)
287 break;
288 }
289
290 if (!ret) {
291 req->req_id = id;
292 pr_debug("ocmem: request %p(id:%d) attached to region %p\n",
293 req, id, region);
294 return 0;
295 }
296 return -EINVAL;
297}
298
299static int detach_req(struct ocmem_region *region, struct ocmem_req *req)
300{
301 idr_remove(&region->region_idr, req->req_id);
302 return 0;
303}
304
305static int populate_region(struct ocmem_region *region, struct ocmem_req *req)
306{
307 region->r_start = req->req_start;
308 region->r_end = req->req_end;
309 region->r_sz = req->req_end - req->req_start + 1;
310 return 0;
311}
312
313static int region_req_count(int id, void *ptr, void *data)
314{
315 int *count = data;
316 *count = *count + 1;
317 return 0;
318}
319
320static int req_count(struct ocmem_region *region)
321{
322 int count = 0;
323 idr_for_each(&region->region_idr, region_req_count, &count);
324 return count;
325}
326
327static int compute_max_prio(int id, void *ptr, void *data)
328{
329 int *max = data;
330 struct ocmem_req *req = ptr;
331
332 if (req->prio > *max)
333 *max = req->prio;
334 return 0;
335}
336
337static int update_region_prio(struct ocmem_region *region)
338{
339 int max_prio;
340 if (req_count(region) != 0) {
341 idr_for_each(&region->region_idr, compute_max_prio, &max_prio);
342 region->max_prio = max_prio;
343 } else {
344 region->max_prio = NO_PRIO;
345 }
346 pr_debug("ocmem: Updating prio of region %p as %d\n",
347 region, max_prio);
348
349 return 0;
350}
351
352static struct ocmem_region *find_region(unsigned long addr)
353{
354 struct ocmem_region *region = NULL;
355 struct rb_node *rb_node = NULL;
356
357 rb_node = sched_tree.rb_node;
358
359 while (rb_node) {
360 struct ocmem_region *tmp_region = NULL;
361 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
362
363 if (tmp_region->r_end > addr) {
364 region = tmp_region;
365 if (tmp_region->r_start <= addr)
366 break;
367 rb_node = rb_node->rb_left;
368 } else {
369 rb_node = rb_node->rb_right;
370 }
371 }
372 return region;
373}
374
375static struct ocmem_region *find_region_intersection(unsigned long start,
376 unsigned long end)
377{
378
379 struct ocmem_region *region = NULL;
380 region = find_region(start);
381 if (region && end <= region->r_start)
382 region = NULL;
383 return region;
384}
385
386static struct ocmem_region *find_region_match(unsigned long start,
387 unsigned long end)
388{
389
390 struct ocmem_region *region = NULL;
391 region = find_region(start);
392 if (region && start == region->r_start && end == region->r_end)
393 return region;
394 return NULL;
395}
396
397static struct ocmem_req *find_req_match(int owner, struct ocmem_region *region)
398{
399 struct ocmem_req *req = NULL;
400
401 if (!region)
402 return NULL;
403
404 req = idr_find(&region->region_idr, owner);
405
406 return req;
407}
408
409/* Must be called with req->sem held */
410static inline int is_mapped(struct ocmem_req *req)
411{
412 return TEST_STATE(req, R_MAPPED);
413}
414
415/* Must be called with sched_mutex held */
416static int __sched_unmap(struct ocmem_req *req)
417{
418 struct ocmem_req *matched_req = NULL;
419 struct ocmem_region *matched_region = NULL;
420
421 matched_region = find_region_match(req->req_start, req->req_end);
422 matched_req = find_req_match(req->req_id, matched_region);
423
424 if (!matched_region || !matched_req) {
425 pr_err("Could not find backing region for req");
426 goto invalid_op_error;
427 }
428
429 if (matched_req != req) {
430 pr_err("Request does not match backing req");
431 goto invalid_op_error;
432 }
433
434 if (!is_mapped(req)) {
435 pr_err("Request is not currently mapped");
436 goto invalid_op_error;
437 }
438
439 /* Update the request state */
440 CLEAR_STATE(req, R_MAPPED);
441 SET_STATE(req, R_MUST_MAP);
442
443 return OP_COMPLETE;
444
445invalid_op_error:
446 return OP_FAIL;
447}
448
449/* Must be called with sched_mutex held */
450static int __sched_map(struct ocmem_req *req)
451{
452 struct ocmem_req *matched_req = NULL;
453 struct ocmem_region *matched_region = NULL;
454
455 matched_region = find_region_match(req->req_start, req->req_end);
456 matched_req = find_req_match(req->req_id, matched_region);
457
458 if (!matched_region || !matched_req) {
459 pr_err("Could not find backing region for req");
460 goto invalid_op_error;
461 }
462
463 if (matched_req != req) {
464 pr_err("Request does not match backing req");
465 goto invalid_op_error;
466 }
467
468 /* Update the request state */
469 CLEAR_STATE(req, R_MUST_MAP);
470 SET_STATE(req, R_MAPPED);
471
472 return OP_COMPLETE;
473
474invalid_op_error:
475 return OP_FAIL;
476}
477
478static int do_map(struct ocmem_req *req)
479{
480 int rc = 0;
481
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700482 down_write(&req->rw_sem);
483
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700484 mutex_lock(&sched_mutex);
485 rc = __sched_map(req);
486 mutex_unlock(&sched_mutex);
487
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700488 up_write(&req->rw_sem);
489
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700490 if (rc == OP_FAIL)
491 return -EINVAL;
492
493 return 0;
494}
495
496static int do_unmap(struct ocmem_req *req)
497{
498 int rc = 0;
499
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700500 down_write(&req->rw_sem);
501
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700502 mutex_lock(&sched_mutex);
503 rc = __sched_unmap(req);
504 mutex_unlock(&sched_mutex);
505
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700506 up_write(&req->rw_sem);
507
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700508 if (rc == OP_FAIL)
509 return -EINVAL;
510
511 return 0;
512}
513
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700514static int process_map(struct ocmem_req *req, unsigned long start,
515 unsigned long end)
516{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700517 int rc = 0;
518
519 rc = ocmem_enable_core_clock();
520
521 if (rc < 0)
522 goto core_clock_fail;
523
524 rc = ocmem_enable_iface_clock();
525
526 if (rc < 0)
527 goto process_map_fail;
528
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700529 return do_map(req);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700530
531process_map_fail:
532 ocmem_disable_core_clock();
533core_clock_fail:
534 pr_err("ocmem: Failed to map ocmem request\n");
535 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700536}
537
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700538static int process_unmap(struct ocmem_req *req, unsigned long start,
539 unsigned long end)
540{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700541 int rc = 0;
542
543 rc = do_unmap(req);
544
545 if (rc < 0)
546 goto process_unmap_fail;
547
548 ocmem_disable_iface_clock();
549 ocmem_disable_core_clock();
550
551 return 0;
552
553process_unmap_fail:
554 pr_err("ocmem: Failed to unmap ocmem request\n");
555 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700556}
557
558static int __sched_grow(struct ocmem_req *req, bool can_block)
559{
560 unsigned long min = req->req_min;
561 unsigned long max = req->req_max;
562 unsigned long step = req->req_step;
563 int owner = req->owner;
564 unsigned long curr_sz = 0;
565 unsigned long growth_sz = 0;
566 unsigned long curr_start = 0;
567 enum client_prio prio = req->prio;
568 unsigned long alloc_addr = 0x0;
569 bool retry;
570 struct ocmem_region *spanned_r = NULL;
571 struct ocmem_region *overlap_r = NULL;
572
573 struct ocmem_req *matched_req = NULL;
574 struct ocmem_region *matched_region = NULL;
575
576 struct ocmem_zone *zone = get_zone(owner);
577 struct ocmem_region *region = NULL;
578
579 matched_region = find_region_match(req->req_start, req->req_end);
580 matched_req = find_req_match(req->req_id, matched_region);
581
582 if (!matched_region || !matched_req) {
583 pr_err("Could not find backing region for req");
584 goto invalid_op_error;
585 }
586
587 if (matched_req != req) {
588 pr_err("Request does not match backing req");
589 goto invalid_op_error;
590 }
591
592 curr_sz = matched_req->req_sz;
593 curr_start = matched_req->req_start;
594 growth_sz = matched_req->req_max - matched_req->req_sz;
595
596 pr_debug("Attempting to grow req %p from %lx to %lx\n",
597 req, matched_req->req_sz, matched_req->req_max);
598
599 retry = false;
600
601 pr_debug("ocmem: GROW: growth size %lx\n", growth_sz);
602
603retry_next_step:
604
605 spanned_r = NULL;
606 overlap_r = NULL;
607
608 spanned_r = find_region(zone->z_head);
609 overlap_r = find_region_intersection(zone->z_head,
610 zone->z_head + growth_sz);
611
612 if (overlap_r == NULL) {
613 /* no conflicting regions, schedule this region */
614 zone->z_ops->free(zone, curr_start, curr_sz);
615 alloc_addr = zone->z_ops->allocate(zone, curr_sz + growth_sz);
616
617 if (alloc_addr < 0) {
618 pr_err("ocmem: zone allocation operation failed\n");
619 goto internal_error;
620 }
621
622 curr_sz += growth_sz;
623 /* Detach the region from the interval tree */
624 /* This is to guarantee that any change in size
625 * causes the tree to be rebalanced if required */
626
627 detach_req(matched_region, req);
628 if (req_count(matched_region) == 0) {
629 remove_region(matched_region);
630 region = matched_region;
631 } else {
632 region = create_region();
633 if (!region) {
634 pr_err("ocmem: Unable to create region\n");
635 goto region_error;
636 }
637 }
638
639 /* update the request */
640 req->req_start = alloc_addr;
641 /* increment the size to reflect new length */
642 req->req_sz = curr_sz;
643 req->req_end = alloc_addr + req->req_sz - 1;
644
645 /* update request state */
646 CLEAR_STATE(req, R_MUST_GROW);
647 SET_STATE(req, R_ALLOCATED);
648 SET_STATE(req, R_MUST_MAP);
649 req->op = SCHED_MAP;
650
651 /* update the region with new req */
652 attach_req(region, req);
653 populate_region(region, req);
654 update_region_prio(region);
655
656 /* update the tree with new region */
657 if (insert_region(region)) {
658 pr_err("ocmem: Failed to insert the region\n");
659 goto region_error;
660 }
661
662 if (retry) {
663 SET_STATE(req, R_MUST_GROW);
664 SET_STATE(req, R_PENDING);
665 req->op = SCHED_GROW;
666 return OP_PARTIAL;
667 }
668 } else if (spanned_r != NULL && overlap_r != NULL) {
669 /* resolve conflicting regions based on priority */
670 if (overlap_r->max_prio < prio) {
671 /* Growth cannot be triggered unless a previous
672 * client of lower priority was evicted */
673 pr_err("ocmem: Invalid growth scheduled\n");
674 /* This is serious enough to fail */
675 BUG();
676 return OP_FAIL;
677 } else if (overlap_r->max_prio > prio) {
678 if (min == max) {
679 /* Cannot grow at this time, try later */
680 SET_STATE(req, R_PENDING);
681 SET_STATE(req, R_MUST_GROW);
682 return OP_RESCHED;
683 } else {
684 /* Try to grow in steps */
685 growth_sz -= step;
686 /* We are OOM at this point so need to retry */
687 if (growth_sz <= curr_sz) {
688 SET_STATE(req, R_PENDING);
689 SET_STATE(req, R_MUST_GROW);
690 return OP_RESCHED;
691 }
692 retry = true;
693 pr_debug("ocmem: Attempting with reduced size %lx\n",
694 growth_sz);
695 goto retry_next_step;
696 }
697 } else {
698 pr_err("ocmem: grow: New Region %p Existing %p\n",
699 matched_region, overlap_r);
700 pr_err("ocmem: Undetermined behavior\n");
701 /* This is serious enough to fail */
702 BUG();
703 }
704 } else if (spanned_r == NULL && overlap_r != NULL) {
705 goto err_not_supported;
706 }
707
708 return OP_COMPLETE;
709
710err_not_supported:
711 pr_err("ocmem: Scheduled unsupported operation\n");
712 return OP_FAIL;
713region_error:
714 zone->z_ops->free(zone, alloc_addr, curr_sz);
715 detach_req(region, req);
716 update_region_prio(region);
717 /* req is going to be destroyed by the caller anyways */
718internal_error:
719 destroy_region(region);
720invalid_op_error:
721 return OP_FAIL;
722}
723
724/* Must be called with sched_mutex held */
725static int __sched_free(struct ocmem_req *req)
726{
727 int owner = req->owner;
728 int ret = 0;
729
730 struct ocmem_req *matched_req = NULL;
731 struct ocmem_region *matched_region = NULL;
732
733 struct ocmem_zone *zone = get_zone(owner);
734
735 BUG_ON(!zone);
736
737 matched_region = find_region_match(req->req_start, req->req_end);
738 matched_req = find_req_match(req->req_id, matched_region);
739
740 if (!matched_region || !matched_req)
741 goto invalid_op_error;
742 if (matched_req != req)
743 goto invalid_op_error;
744
745 ret = zone->z_ops->free(zone,
746 matched_req->req_start, matched_req->req_sz);
747
748 if (ret < 0)
749 goto err_op_fail;
750
751 detach_req(matched_region, matched_req);
752 update_region_prio(matched_region);
753 if (req_count(matched_region) == 0) {
754 remove_region(matched_region);
755 destroy_region(matched_region);
756 }
757
758 /* Update the request */
759 req->req_start = 0x0;
760 req->req_sz = 0x0;
761 req->req_end = 0x0;
762 SET_STATE(req, R_FREE);
763 return OP_COMPLETE;
764invalid_op_error:
765 pr_err("ocmem: free: Failed to find matching region\n");
766err_op_fail:
767 pr_err("ocmem: free: Failed\n");
768 return OP_FAIL;
769}
770
771/* Must be called with sched_mutex held */
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700772static int __sched_shrink(struct ocmem_req *req, unsigned long new_sz)
773{
774 int owner = req->owner;
775 int ret = 0;
776
777 struct ocmem_req *matched_req = NULL;
778 struct ocmem_region *matched_region = NULL;
779 struct ocmem_region *region = NULL;
780 unsigned long alloc_addr = 0x0;
781
782 struct ocmem_zone *zone = get_zone(owner);
783
784 BUG_ON(!zone);
785
786 /* The shrink should not be called for zero size */
787 BUG_ON(new_sz == 0);
788
789 matched_region = find_region_match(req->req_start, req->req_end);
790 matched_req = find_req_match(req->req_id, matched_region);
791
792 if (!matched_region || !matched_req)
793 goto invalid_op_error;
794 if (matched_req != req)
795 goto invalid_op_error;
796
797
798 ret = zone->z_ops->free(zone,
799 matched_req->req_start, matched_req->req_sz);
800
801 if (ret < 0) {
802 pr_err("Zone Allocation operation failed\n");
803 goto internal_error;
804 }
805
806 alloc_addr = zone->z_ops->allocate(zone, new_sz);
807
808 if (alloc_addr < 0) {
809 pr_err("Zone Allocation operation failed\n");
810 goto internal_error;
811 }
812
813 /* Detach the region from the interval tree */
814 /* This is to guarantee that the change in size
815 * causes the tree to be rebalanced if required */
816
817 detach_req(matched_region, req);
818 if (req_count(matched_region) == 0) {
819 remove_region(matched_region);
820 region = matched_region;
821 } else {
822 region = create_region();
823 if (!region) {
824 pr_err("ocmem: Unable to create region\n");
825 goto internal_error;
826 }
827 }
828 /* update the request */
829 req->req_start = alloc_addr;
830 req->req_sz = new_sz;
831 req->req_end = alloc_addr + req->req_sz;
832
833 if (req_count(region) == 0) {
834 remove_region(matched_region);
835 destroy_region(matched_region);
836 }
837
838 /* update request state */
839 SET_STATE(req, R_MUST_GROW);
840 SET_STATE(req, R_MUST_MAP);
841 req->op = SCHED_MAP;
842
843 /* attach the request to the region */
844 attach_req(region, req);
845 populate_region(region, req);
846 update_region_prio(region);
847
848 /* update the tree with new region */
849 if (insert_region(region)) {
850 pr_err("ocmem: Failed to insert the region\n");
851 zone->z_ops->free(zone, alloc_addr, new_sz);
852 detach_req(region, req);
853 update_region_prio(region);
854 /* req will be destroyed by the caller */
855 goto region_error;
856 }
857 return OP_COMPLETE;
858
859region_error:
860 destroy_region(region);
861internal_error:
862 pr_err("ocmem: shrink: Failed\n");
863 return OP_FAIL;
864invalid_op_error:
865 pr_err("ocmem: shrink: Failed to find matching region\n");
866 return OP_FAIL;
867}
868
869/* Must be called with sched_mutex held */
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700870static int __sched_allocate(struct ocmem_req *req, bool can_block,
871 bool can_wait)
872{
873 unsigned long min = req->req_min;
874 unsigned long max = req->req_max;
875 unsigned long step = req->req_step;
876 int owner = req->owner;
877 unsigned long sz = max;
878 enum client_prio prio = req->prio;
879 unsigned long alloc_addr = 0x0;
880 bool retry;
881
882 struct ocmem_region *spanned_r = NULL;
883 struct ocmem_region *overlap_r = NULL;
884
885 struct ocmem_zone *zone = get_zone(owner);
886 struct ocmem_region *region = NULL;
887
888 BUG_ON(!zone);
889
890 if (min > (zone->z_end - zone->z_start)) {
891 pr_err("ocmem: requested minimum size exceeds quota\n");
892 goto invalid_op_error;
893 }
894
895 if (max > (zone->z_end - zone->z_start)) {
896 pr_err("ocmem: requested maximum size exceeds quota\n");
897 goto invalid_op_error;
898 }
899
900 if (min > zone->z_free) {
901 pr_err("ocmem: out of memory for zone %d\n", owner);
902 goto invalid_op_error;
903 }
904
905 region = create_region();
906
907 if (!region) {
908 pr_err("ocmem: Unable to create region\n");
909 goto invalid_op_error;
910 }
911
912 retry = false;
913
914 pr_debug("ocmem: ALLOCATE: request size %lx\n", sz);
915
916retry_next_step:
917
918 spanned_r = NULL;
919 overlap_r = NULL;
920
921 spanned_r = find_region(zone->z_head);
922 overlap_r = find_region_intersection(zone->z_head, zone->z_head + sz);
923
924 if (overlap_r == NULL) {
925 /* no conflicting regions, schedule this region */
926 alloc_addr = zone->z_ops->allocate(zone, sz);
927
928 if (alloc_addr < 0) {
929 pr_err("Zone Allocation operation failed\n");
930 goto internal_error;
931 }
932
933 /* update the request */
934 req->req_start = alloc_addr;
935 req->req_end = alloc_addr + sz - 1;
936 req->req_sz = sz;
937 req->zone = zone;
938
939 /* update request state */
940 CLEAR_STATE(req, R_FREE);
941 SET_STATE(req, R_ALLOCATED);
942 SET_STATE(req, R_MUST_MAP);
943 req->op = SCHED_NOP;
944
945 /* attach the request to the region */
946 attach_req(region, req);
947 populate_region(region, req);
948 update_region_prio(region);
949
950 /* update the tree with new region */
951 if (insert_region(region)) {
952 pr_err("ocmem: Failed to insert the region\n");
953 zone->z_ops->free(zone, alloc_addr, sz);
954 detach_req(region, req);
955 update_region_prio(region);
956 /* req will be destroyed by the caller */
957 goto internal_error;
958 }
959
960 if (retry) {
961 SET_STATE(req, R_MUST_GROW);
962 SET_STATE(req, R_PENDING);
963 req->op = SCHED_GROW;
964 return OP_PARTIAL;
965 }
966 } else if (spanned_r != NULL && overlap_r != NULL) {
967 /* resolve conflicting regions based on priority */
968 if (overlap_r->max_prio < prio) {
969 if (min == max) {
970 pr_err("ocmem: Requires eviction support\n");
971 goto err_not_supported;
972 } else {
973 /* Try to allocate atleast >= 'min' immediately */
974 sz -= step;
975 if (sz < min)
976 goto err_out_of_mem;
977 retry = true;
978 pr_debug("ocmem: Attempting with reduced size %lx\n",
979 sz);
980 goto retry_next_step;
981 }
982 } else if (overlap_r->max_prio > prio) {
983 if (can_block == true) {
984 SET_STATE(req, R_PENDING);
985 SET_STATE(req, R_MUST_GROW);
986 return OP_RESCHED;
987 } else {
988 if (min == max) {
989 pr_err("Cannot allocate %lx synchronously\n",
990 sz);
991 goto err_out_of_mem;
992 } else {
993 sz -= step;
994 if (sz < min)
995 goto err_out_of_mem;
996 retry = true;
997 pr_debug("ocmem: Attempting reduced size %lx\n",
998 sz);
999 goto retry_next_step;
1000 }
1001 }
1002 } else {
1003 pr_err("ocmem: Undetermined behavior\n");
1004 pr_err("ocmem: New Region %p Existing %p\n", region,
1005 overlap_r);
1006 /* This is serious enough to fail */
1007 BUG();
1008 }
1009 } else if (spanned_r == NULL && overlap_r != NULL)
1010 goto err_not_supported;
1011
1012 return OP_COMPLETE;
1013
1014err_not_supported:
1015 pr_err("ocmem: Scheduled unsupported operation\n");
1016 return OP_FAIL;
1017
1018err_out_of_mem:
1019 pr_err("ocmem: Out of memory during allocation\n");
1020internal_error:
1021 destroy_region(region);
1022invalid_op_error:
1023 return OP_FAIL;
1024}
1025
1026static int sched_enqueue(struct ocmem_req *priv)
1027{
1028 struct ocmem_req *next = NULL;
1029 mutex_lock(&sched_queue_mutex);
1030 list_add_tail(&priv->sched_list, &sched_queue[priv->owner]);
1031 pr_debug("enqueued req %p\n", priv);
1032 list_for_each_entry(next, &sched_queue[priv->owner], sched_list) {
1033 pr_debug("pending requests for client %p\n", next);
1034 }
1035 mutex_unlock(&sched_queue_mutex);
1036 return 0;
1037}
1038
1039static struct ocmem_req *ocmem_fetch_req(void)
1040{
1041 int i;
1042 struct ocmem_req *req = NULL;
1043 struct ocmem_req *next = NULL;
1044
1045 mutex_lock(&sched_queue_mutex);
1046 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) {
1047 if (list_empty(&sched_queue[i]))
1048 continue;
1049 list_for_each_entry_safe(req, next, &sched_queue[i], sched_list)
1050 {
1051 if (req) {
1052 pr_debug("ocmem: Fetched pending request %p\n",
1053 req);
1054 list_del(&req->sched_list);
1055 break;
1056 }
1057 }
1058 }
1059 mutex_unlock(&sched_queue_mutex);
1060 return req;
1061}
1062
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001063
1064unsigned long process_quota(int id)
1065{
1066 struct ocmem_zone *zone = NULL;
1067
1068 if (is_blocked(id))
1069 return 0;
1070
1071 zone = get_zone(id);
1072
1073 if (zone && zone->z_pool)
1074 return zone->z_end - zone->z_start;
1075 else
1076 return 0;
1077}
1078
1079static int do_grow(struct ocmem_req *req)
1080{
1081 struct ocmem_buf *buffer = NULL;
1082 bool can_block = true;
1083 int rc = 0;
1084
1085 down_write(&req->rw_sem);
1086 buffer = req->buffer;
1087
1088 /* Take the scheduler mutex */
1089 mutex_lock(&sched_mutex);
1090 rc = __sched_grow(req, can_block);
1091 mutex_unlock(&sched_mutex);
1092
1093 if (rc == OP_FAIL)
1094 goto err_op_fail;
1095
1096 if (rc == OP_RESCHED) {
1097 pr_debug("ocmem: Enqueue this allocation");
1098 sched_enqueue(req);
1099 }
1100
1101 else if (rc == OP_COMPLETE || rc == OP_PARTIAL) {
1102 buffer->addr = device_address(req->owner, req->req_start);
1103 buffer->len = req->req_sz;
1104 }
1105
1106 up_write(&req->rw_sem);
1107 return 0;
1108err_op_fail:
1109 up_write(&req->rw_sem);
1110 return -EINVAL;
1111}
1112
1113static int process_grow(struct ocmem_req *req)
1114{
1115 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001116 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001117
1118 /* Attempt to grow the region */
1119 rc = do_grow(req);
1120
1121 if (rc < 0)
1122 return -EINVAL;
1123
1124 /* Map the newly grown region */
1125 if (is_tcm(req->owner)) {
1126 rc = process_map(req, req->req_start, req->req_end);
1127 if (rc < 0)
1128 return -EINVAL;
1129 }
1130
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001131 offset = phys_to_offset(req->req_start);
1132
1133 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
1134
1135 if (rc < 0) {
1136 pr_err("Failed to switch ON memory macros\n");
1137 goto power_ctl_error;
1138 }
1139
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001140 /* Notify the client about the buffer growth */
1141 rc = dispatch_notification(req->owner, OCMEM_ALLOC_GROW, req->buffer);
1142 if (rc < 0) {
1143 pr_err("No notifier callback to cater for req %p event: %d\n",
1144 req, OCMEM_ALLOC_GROW);
1145 BUG();
1146 }
1147 return 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001148power_ctl_error:
1149 return -EINVAL;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001150}
1151
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001152static int do_shrink(struct ocmem_req *req, unsigned long shrink_size)
1153{
1154
1155 int rc = 0;
1156 struct ocmem_buf *buffer = NULL;
1157
1158 down_write(&req->rw_sem);
1159 buffer = req->buffer;
1160
1161 /* Take the scheduler mutex */
1162 mutex_lock(&sched_mutex);
1163 rc = __sched_shrink(req, shrink_size);
1164 mutex_unlock(&sched_mutex);
1165
1166 if (rc == OP_FAIL)
1167 goto err_op_fail;
1168
1169 else if (rc == OP_COMPLETE) {
1170 buffer->addr = device_address(req->owner, req->req_start);
1171 buffer->len = req->req_sz;
1172 }
1173
1174 up_write(&req->rw_sem);
1175 return 0;
1176err_op_fail:
1177 up_write(&req->rw_sem);
1178 return -EINVAL;
1179}
1180
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001181static void ocmem_sched_wk_func(struct work_struct *work);
1182DECLARE_DELAYED_WORK(ocmem_sched_thread, ocmem_sched_wk_func);
1183
1184static int ocmem_schedule_pending(void)
1185{
1186 schedule_delayed_work(&ocmem_sched_thread,
1187 msecs_to_jiffies(SCHED_DELAY));
1188 return 0;
1189}
1190
1191static int do_free(struct ocmem_req *req)
1192{
1193 int rc = 0;
1194 struct ocmem_buf *buffer = req->buffer;
1195
1196 down_write(&req->rw_sem);
1197
1198 if (is_mapped(req)) {
1199 pr_err("ocmem: Buffer needs to be unmapped before free\n");
1200 goto err_free_fail;
1201 }
1202
1203 /* Grab the sched mutex */
1204 mutex_lock(&sched_mutex);
1205 rc = __sched_free(req);
1206 mutex_unlock(&sched_mutex);
1207
1208 switch (rc) {
1209
1210 case OP_COMPLETE:
1211 buffer->addr = 0x0;
1212 buffer->len = 0x0;
1213 break;
1214 case OP_FAIL:
1215 default:
1216 goto err_free_fail;
1217 break;
1218 }
1219
1220 up_write(&req->rw_sem);
1221 return 0;
1222err_free_fail:
1223 up_write(&req->rw_sem);
1224 pr_err("ocmem: freeing req %p failed\n", req);
1225 return -EINVAL;
1226}
1227
1228int process_free(int id, struct ocmem_handle *handle)
1229{
1230 struct ocmem_req *req = NULL;
1231 struct ocmem_buf *buffer = NULL;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001232 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001233 int rc = 0;
1234
1235 if (is_blocked(id)) {
1236 pr_err("Client %d cannot request free\n", id);
1237 return -EINVAL;
1238 }
1239
1240 req = handle_to_req(handle);
1241 buffer = handle_to_buffer(handle);
1242
1243 if (!req)
1244 return -EINVAL;
1245
1246 if (req->req_start != core_address(id, buffer->addr)) {
1247 pr_err("Invalid buffer handle passed for free\n");
1248 return -EINVAL;
1249 }
1250
1251 if (is_tcm(req->owner)) {
1252 rc = process_unmap(req, req->req_start, req->req_end);
1253 if (rc < 0)
1254 return -EINVAL;
1255 }
1256
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001257
1258 if (req->req_sz != 0) {
1259
1260 offset = phys_to_offset(req->req_start);
1261
1262 rc = ocmem_memory_off(req->owner, offset, req->req_sz);
1263
1264 if (rc < 0) {
1265 pr_err("Failed to switch OFF memory macros\n");
1266 return -EINVAL;
1267 }
1268
1269 }
1270
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001271 rc = do_free(req);
1272
1273 if (rc < 0)
1274 return -EINVAL;
1275
1276 ocmem_destroy_req(req);
1277 handle->req = NULL;
1278
1279 ocmem_schedule_pending();
1280 return 0;
1281}
1282
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001283static void ocmem_rdm_worker(struct work_struct *work)
1284{
1285 int offset = 0;
1286 int rc = 0;
1287 int event;
1288 struct ocmem_rdm_work *work_data = container_of(work,
1289 struct ocmem_rdm_work, work);
1290 int id = work_data->id;
1291 struct ocmem_map_list *list = work_data->list;
1292 int direction = work_data->direction;
1293 struct ocmem_handle *handle = work_data->handle;
1294 struct ocmem_req *req = handle_to_req(handle);
1295 struct ocmem_buf *buffer = handle_to_buffer(handle);
1296
1297 down_write(&req->rw_sem);
1298 offset = phys_to_offset(req->req_start);
1299 rc = ocmem_rdm_transfer(id, list, offset, direction);
1300 if (work_data->direction == TO_OCMEM)
1301 event = (rc == 0) ? OCMEM_MAP_DONE : OCMEM_MAP_FAIL;
1302 else
1303 event = (rc == 0) ? OCMEM_UNMAP_DONE : OCMEM_UNMAP_FAIL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001304 up_write(&req->rw_sem);
1305 kfree(work_data);
1306 dispatch_notification(id, event, buffer);
1307}
1308
1309int queue_transfer(struct ocmem_req *req, struct ocmem_handle *handle,
1310 struct ocmem_map_list *list, int direction)
1311{
1312 struct ocmem_rdm_work *work_data = NULL;
1313
1314 down_write(&req->rw_sem);
1315
1316 work_data = kzalloc(sizeof(struct ocmem_rdm_work), GFP_ATOMIC);
1317 if (!work_data)
1318 BUG();
1319
1320 work_data->handle = handle;
1321 work_data->list = list;
1322 work_data->id = req->owner;
1323 work_data->direction = direction;
1324 INIT_WORK(&work_data->work, ocmem_rdm_worker);
1325 up_write(&req->rw_sem);
1326 queue_work(ocmem_rdm_wq, &work_data->work);
1327 return 0;
1328}
1329
1330int process_xfer_out(int id, struct ocmem_handle *handle,
1331 struct ocmem_map_list *list)
1332{
1333 struct ocmem_req *req = NULL;
1334 int rc = 0;
1335
1336 req = handle_to_req(handle);
1337
1338 if (!req)
1339 return -EINVAL;
1340
1341 if (!is_mapped(req)) {
1342 pr_err("Buffer is not already mapped\n");
1343 goto transfer_out_error;
1344 }
1345
1346 rc = process_unmap(req, req->req_start, req->req_end);
1347 if (rc < 0) {
1348 pr_err("Unmapping the buffer failed\n");
1349 goto transfer_out_error;
1350 }
1351
1352 rc = queue_transfer(req, handle, list, TO_DDR);
1353
1354 if (rc < 0) {
1355 pr_err("Failed to queue rdm transfer to DDR\n");
1356 goto transfer_out_error;
1357 }
1358
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001359
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001360 return 0;
1361
1362transfer_out_error:
1363 return -EINVAL;
1364}
1365
1366int process_xfer_in(int id, struct ocmem_handle *handle,
1367 struct ocmem_map_list *list)
1368{
1369 struct ocmem_req *req = NULL;
1370 int rc = 0;
1371
1372 req = handle_to_req(handle);
1373
1374 if (!req)
1375 return -EINVAL;
1376
1377 if (is_mapped(req)) {
1378 pr_err("Buffer is already mapped\n");
1379 goto transfer_in_error;
1380 }
1381
1382 rc = process_map(req, req->req_start, req->req_end);
1383 if (rc < 0) {
1384 pr_err("Mapping the buffer failed\n");
1385 goto transfer_in_error;
1386 }
1387
1388 rc = queue_transfer(req, handle, list, TO_OCMEM);
1389
1390 if (rc < 0) {
1391 pr_err("Failed to queue rdm transfer to OCMEM\n");
1392 goto transfer_in_error;
1393 }
1394
1395 return 0;
1396transfer_in_error:
1397 return -EINVAL;
1398}
1399
1400int process_shrink(int id, struct ocmem_handle *handle, unsigned long size)
1401{
1402 struct ocmem_req *req = NULL;
1403 struct ocmem_buf *buffer = NULL;
1404 struct ocmem_eviction_data *edata = NULL;
1405 int rc = 0;
1406
1407 if (is_blocked(id)) {
1408 pr_err("Client %d cannot request free\n", id);
1409 return -EINVAL;
1410 }
1411
1412 req = handle_to_req(handle);
1413 buffer = handle_to_buffer(handle);
1414
1415 if (!req)
1416 return -EINVAL;
1417
1418 if (req->req_start != core_address(id, buffer->addr)) {
1419 pr_err("Invalid buffer handle passed for shrink\n");
1420 return -EINVAL;
1421 }
1422
1423 edata = req->edata;
1424
1425 if (is_tcm(req->owner))
1426 do_unmap(req);
1427
1428 if (size == 0) {
1429 pr_info("req %p being shrunk to zero\n", req);
1430 rc = do_free(req);
1431 if (rc < 0)
1432 return -EINVAL;
1433 } else {
1434 rc = do_shrink(req, size);
1435 if (rc < 0)
1436 return -EINVAL;
1437 }
1438
1439 edata->pending--;
1440 if (edata->pending == 0) {
1441 pr_debug("All regions evicted");
1442 complete(&edata->completion);
1443 }
1444
1445 return 0;
1446}
1447
1448int process_xfer(int id, struct ocmem_handle *handle,
1449 struct ocmem_map_list *list, int direction)
1450{
1451 int rc = 0;
1452
1453 if (is_tcm(id)) {
1454 WARN(1, "Mapping operation is invalid for client\n");
1455 return -EINVAL;
1456 }
1457
1458 if (direction == TO_DDR)
1459 rc = process_xfer_out(id, handle, list);
1460 else if (direction == TO_OCMEM)
1461 rc = process_xfer_in(id, handle, list);
1462 return rc;
1463}
1464
1465int ocmem_eviction_thread(struct work_struct *work)
1466{
1467 return 0;
1468}
1469
1470int process_evict(int id)
1471{
1472 struct ocmem_eviction_data *edata = NULL;
1473 int prio = ocmem_client_table[id].priority;
1474 struct rb_node *rb_node = NULL;
1475 struct ocmem_req *req = NULL;
1476 struct ocmem_buf buffer;
1477 int j = 0;
1478
1479 edata = kzalloc(sizeof(struct ocmem_eviction_data), GFP_ATOMIC);
1480
1481 INIT_LIST_HEAD(&edata->victim_list);
1482 INIT_LIST_HEAD(&edata->req_list);
1483 edata->prio = prio;
1484 edata->pending = 0;
1485 edata->passive = 1;
1486 evictions[id] = edata;
1487
1488 mutex_lock(&sched_mutex);
1489
1490 for (rb_node = rb_first(&sched_tree); rb_node;
1491 rb_node = rb_next(rb_node)) {
1492 struct ocmem_region *tmp_region = NULL;
1493 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
1494 if (tmp_region->max_prio < prio) {
1495 for (j = id - 1; j > NO_PRIO; j--) {
1496 req = find_req_match(j, tmp_region);
1497 if (req) {
1498 pr_info("adding %p to eviction list\n",
1499 tmp_region);
1500 list_add_tail(
1501 &tmp_region->eviction_list,
1502 &edata->victim_list);
1503 list_add_tail(
1504 &req->eviction_list,
1505 &edata->req_list);
1506 edata->pending++;
1507 req->edata = edata;
1508 buffer.addr = req->req_start;
1509 buffer.len = 0x0;
1510 dispatch_notification(req->owner,
1511 OCMEM_ALLOC_SHRINK, &buffer);
1512 }
1513 }
1514 } else {
1515 pr_info("skipping %p from eviction\n", tmp_region);
1516 }
1517 }
1518 mutex_unlock(&sched_mutex);
1519 pr_debug("Waiting for all regions to be shrunk\n");
1520 if (edata->pending > 0) {
1521 init_completion(&edata->completion);
1522 wait_for_completion(&edata->completion);
1523 }
1524 return 0;
1525}
1526
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001527static int do_allocate(struct ocmem_req *req, bool can_block, bool can_wait)
1528{
1529 int rc = 0;
1530 struct ocmem_buf *buffer = req->buffer;
1531
1532 down_write(&req->rw_sem);
1533
1534 /* Take the scheduler mutex */
1535 mutex_lock(&sched_mutex);
1536 rc = __sched_allocate(req, can_block, can_wait);
1537 mutex_unlock(&sched_mutex);
1538
1539 if (rc == OP_FAIL)
1540 goto err_allocate_fail;
1541
1542 if (rc == OP_RESCHED) {
1543 buffer->addr = 0x0;
1544 buffer->len = 0x0;
1545 pr_debug("ocmem: Enqueuing req %p\n", req);
1546 sched_enqueue(req);
1547 } else if (rc == OP_PARTIAL) {
1548 buffer->addr = device_address(req->owner, req->req_start);
1549 buffer->len = req->req_sz;
1550 pr_debug("ocmem: Enqueuing req %p\n", req);
1551 sched_enqueue(req);
1552 } else if (rc == OP_COMPLETE) {
1553 buffer->addr = device_address(req->owner, req->req_start);
1554 buffer->len = req->req_sz;
1555 }
1556
1557 up_write(&req->rw_sem);
1558 return 0;
1559err_allocate_fail:
1560 up_write(&req->rw_sem);
1561 return -EINVAL;
1562}
1563
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001564int process_restore(int id)
1565{
1566 struct ocmem_req *req = NULL;
1567 struct ocmem_req *next = NULL;
1568 struct ocmem_eviction_data *edata = evictions[id];
1569
1570 if (!edata)
1571 return 0;
1572
1573 list_for_each_entry_safe(req, next, &edata->req_list, eviction_list)
1574 {
1575 if (req) {
1576 pr_debug("ocmem: Fetched evicted request %p\n",
1577 req);
1578 list_del(&req->sched_list);
1579 req->op = SCHED_ALLOCATE;
1580 sched_enqueue(req);
1581 }
1582 }
1583 kfree(edata);
1584 evictions[id] = NULL;
1585 pr_debug("Restore all evicted regions\n");
1586 ocmem_schedule_pending();
1587 return 0;
1588}
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001589
1590int process_allocate(int id, struct ocmem_handle *handle,
1591 unsigned long min, unsigned long max,
1592 unsigned long step, bool can_block, bool can_wait)
1593{
1594
1595 struct ocmem_req *req = NULL;
1596 struct ocmem_buf *buffer = NULL;
1597 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001598 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001599
1600 /* sanity checks */
1601 if (is_blocked(id)) {
1602 pr_err("Client %d cannot request allocation\n", id);
1603 return -EINVAL;
1604 }
1605
1606 if (handle->req != NULL) {
1607 pr_err("Invalid handle passed in\n");
1608 return -EINVAL;
1609 }
1610
1611 buffer = handle_to_buffer(handle);
1612 BUG_ON(buffer == NULL);
1613
1614 /* prepare a request structure to represent this transaction */
1615 req = ocmem_create_req();
1616 if (!req)
1617 return -ENOMEM;
1618
1619 req->owner = id;
1620 req->req_min = min;
1621 req->req_max = max;
1622 req->req_step = step;
1623 req->prio = ocmem_client_table[id].priority;
1624 req->op = SCHED_ALLOCATE;
1625 req->buffer = buffer;
1626
1627 rc = do_allocate(req, can_block, can_wait);
1628
1629 if (rc < 0)
1630 goto do_allocate_error;
1631
1632 handle->req = req;
1633
1634 if (is_tcm(id)) {
1635 rc = process_map(req, req->req_start, req->req_end);
1636 if (rc < 0)
1637 goto map_error;
1638 }
1639
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001640 if (req->req_sz != 0) {
1641
1642 offset = phys_to_offset(req->req_start);
1643
1644 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
1645
1646 if (rc < 0) {
1647 pr_err("Failed to switch ON memory macros\n");
1648 goto power_ctl_error;
1649 }
1650 }
1651
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001652 return 0;
1653
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001654power_ctl_error:
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001655map_error:
1656 handle->req = NULL;
1657 do_free(req);
1658do_allocate_error:
1659 ocmem_destroy_req(req);
1660 return -EINVAL;
1661}
1662
1663int process_delayed_allocate(struct ocmem_req *req)
1664{
1665
1666 struct ocmem_handle *handle = NULL;
1667 int rc = 0;
1668 int id = req->owner;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001669 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001670
1671 handle = req_to_handle(req);
1672 BUG_ON(handle == NULL);
1673
1674 rc = do_allocate(req, true, false);
1675
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001676
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001677 if (rc < 0)
1678 goto do_allocate_error;
1679
1680 if (is_tcm(id)) {
1681 rc = process_map(req, req->req_start, req->req_end);
1682 if (rc < 0)
1683 goto map_error;
1684 }
1685
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001686 if (req->req_sz != 0) {
1687
1688 offset = phys_to_offset(req->req_start);
1689
1690 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
1691
1692 if (rc < 0) {
1693 pr_err("Failed to switch ON memory macros\n");
1694 goto power_ctl_error;
1695 }
1696 }
1697
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001698 /* Notify the client about the buffer growth */
1699 rc = dispatch_notification(id, OCMEM_ALLOC_GROW, req->buffer);
1700 if (rc < 0) {
1701 pr_err("No notifier callback to cater for req %p event: %d\n",
1702 req, OCMEM_ALLOC_GROW);
1703 BUG();
1704 }
1705 return 0;
1706
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001707power_ctl_error:
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001708map_error:
1709 handle->req = NULL;
1710 do_free(req);
1711do_allocate_error:
1712 ocmem_destroy_req(req);
1713 return -EINVAL;
1714}
1715
1716static void ocmem_sched_wk_func(struct work_struct *work)
1717{
1718
1719 struct ocmem_buf *buffer = NULL;
1720 struct ocmem_handle *handle = NULL;
1721 struct ocmem_req *req = ocmem_fetch_req();
1722
1723 if (!req) {
1724 pr_debug("No Pending Requests found\n");
1725 return;
1726 }
1727
1728 pr_debug("ocmem: sched_wk pending req %p\n", req);
1729 handle = req_to_handle(req);
1730 buffer = handle_to_buffer(handle);
1731 BUG_ON(req->op == SCHED_NOP);
1732
1733 switch (req->op) {
1734 case SCHED_GROW:
1735 process_grow(req);
1736 break;
1737 case SCHED_ALLOCATE:
1738 process_delayed_allocate(req);
1739 break;
1740 default:
1741 pr_err("ocmem: Unknown operation encountered\n");
1742 break;
1743 }
1744 return;
1745}
1746
1747int ocmem_sched_init(void)
1748{
1749 int i = 0;
1750 sched_tree = RB_ROOT;
1751 mutex_init(&sched_mutex);
1752 mutex_init(&sched_queue_mutex);
1753 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++)
1754 INIT_LIST_HEAD(&sched_queue[i]);
1755
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001756 mutex_init(&rdm_mutex);
1757 INIT_LIST_HEAD(&rdm_queue);
1758 ocmem_rdm_wq = alloc_workqueue("ocmem_rdm_wq", 0, 0);
1759 if (!ocmem_rdm_wq)
1760 return -ENOMEM;
1761 ocmem_eviction_wq = alloc_workqueue("ocmem_eviction_wq", 0, 0);
1762 if (!ocmem_eviction_wq)
1763 return -ENOMEM;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001764 return 0;
1765}