Indirect-to-direct-scheduling experiment
This commit is contained in:
parent
098b690e4d
commit
76280ef51b
11
TODO
11
TODO
|
@ -14,3 +14,14 @@ last-value-cache type thing.
|
|||
|
||||
Website
|
||||
- http://www.flickr.com/photos/elemishra/158211069/
|
||||
|
||||
Switch from indirect scheduling to direct scheduling
|
||||
- before, on walk: 173kHz test1/test3
|
||||
- after, no change. Probably because we only have a single thread
|
||||
active in the system when running test1/test3, so every time some
|
||||
work comes in, it baton-passes to the next guy in the chain. The
|
||||
change *would* be an improvement but we have a separate worklist
|
||||
(to process now) and runlist (to process after polling for events
|
||||
once) so it always goes back to the scheduler because the worklist
|
||||
is never longer than one.
|
||||
- Revisit once we start testing multiple concurrent streams.
|
||||
|
|
|
@ -0,0 +1,74 @@
|
|||
diff --git a/harness.c b/harness.c
|
||||
index 9c891b3..74061af 100644
|
||||
--- a/harness.c
|
||||
+++ b/harness.c
|
||||
@@ -50,18 +50,38 @@ Process *current_process = NULL;
|
||||
static ucontext_t scheduler;
|
||||
static queue_t runlist = EMPTY_PROCESS_QUEUE;
|
||||
static queue_t deadlist = EMPTY_PROCESS_QUEUE;
|
||||
+static queue_t current_worklist = EMPTY_PROCESS_QUEUE;
|
||||
|
||||
static void enqueue_runlist(Process *p) {
|
||||
p->state = PROCESS_RUNNING;
|
||||
enqueue(&runlist, p);
|
||||
}
|
||||
|
||||
+static void clean_dead_processes(void) {
|
||||
+ Process *deadp;
|
||||
+ while ((deadp = dequeue(&deadlist)) != NULL) {
|
||||
+ free(deadp->stack_base);
|
||||
+ free(deadp);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
static void schedule(void) {
|
||||
- //info("schedule %p\n", current_process);
|
||||
if (current_process == NULL) {
|
||||
ICHECK(setcontext(&scheduler), "schedule setcontext");
|
||||
} else {
|
||||
- ICHECK(swapcontext(¤t_process->context, &scheduler), "schedule swapcontext");
|
||||
+ Process *current = current_process;
|
||||
+ Process *target_process = dequeue(¤t_worklist);
|
||||
+ ucontext_t *target;
|
||||
+
|
||||
+ if (target_process == NULL) {
|
||||
+ target = &scheduler;
|
||||
+ } else {
|
||||
+ target = &target_process->context;
|
||||
+ current_process = target_process;
|
||||
+ }
|
||||
+
|
||||
+ clean_dead_processes(); /* safe because we know we're not dead ourselves at this point */
|
||||
+ ICHECK(swapcontext(¤t->context, target), "schedule swapcontext");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -255,14 +275,6 @@ void iohandle_settimeout(IOHandle *h, int timeout_read, int timeout_write) {
|
||||
bufferevent_settimeout(h->io, timeout_read, timeout_write);
|
||||
}
|
||||
|
||||
-static void clean_dead_processes(void) {
|
||||
- Process *deadp;
|
||||
- while ((deadp = dequeue(&deadlist)) != NULL) {
|
||||
- free(deadp->stack_base);
|
||||
- free(deadp);
|
||||
- }
|
||||
-}
|
||||
-
|
||||
void boot_harness(void) {
|
||||
stdin_h = new_iohandle(0);
|
||||
stdout_h = new_iohandle(1);
|
||||
@@ -272,10 +284,10 @@ void boot_harness(void) {
|
||||
|
||||
while (1) {
|
||||
while (runlist.count) {
|
||||
- queue_t work = runlist;
|
||||
+ current_worklist = runlist;
|
||||
runlist = EMPTY_PROCESS_QUEUE;
|
||||
- //info("Processing %d jobs\n", work.count);
|
||||
- while ((current_process = dequeue(&work)) != NULL) {
|
||||
+ //info("Processing %d jobs\n", current_worklist.count);
|
||||
+ while ((current_process = dequeue(¤t_worklist)) != NULL) {
|
||||
//info("entering %p\n", current_process);
|
||||
ICHECK(swapcontext(&scheduler, ¤t_process->context), "boot_harness swapcontext");
|
||||
clean_dead_processes();
|
Loading…
Reference in New Issue