source: libcfa/src/concurrency/clib/cfathread.cfa@ fe8c31e

ADT ast-experimental enum forall-pointer-decay pthread-emulation qualifiedEnum
Last change on this file since fe8c31e was 75c7252, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

CPU workstealing now has correct remote unpark.

  • Property mode set to 100644
File size: 16.3 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// clib/cfathread.cfa --
8//
9// Author : Thierry Delisle
10// Created On : Tue Sep 22 15:31:20 2020
11// Last Modified By :
12// Last Modified On :
13// Update Count :
14//
15
16// #define EPOLL_FOR_SOCKETS
17
18#include "fstream.hfa"
19#include "locks.hfa"
20#include "kernel.hfa"
21#include "stats.hfa"
22#include "thread.hfa"
23#include "time.hfa"
24
25#include "cfathread.h"
26
27extern "C" {
28 #include <string.h>
29 #include <errno.h>
30}
31
32extern void ?{}(processor &, const char[], cluster &, thread$ *);
33extern "C" {
34 extern void __cfactx_invoke_thread(void (*main)(void *), void * this);
35 extern int accept4(int sockfd, struct sockaddr *addr, socklen_t *addrlen, int flags);
36}
37
38extern Time __kernel_get_time();
39extern unsigned register_proc_id( void );
40
41//================================================================================
42// Epoll support for sockets
43
44#if defined(EPOLL_FOR_SOCKETS)
45 extern "C" {
46 #include <sys/epoll.h>
47 #include <sys/resource.h>
48 }
49
50 static pthread_t master_poller;
51 static int master_epollfd = 0;
52 static size_t poller_cnt = 0;
53 static int * poller_fds = 0p;
54 static struct leaf_poller * pollers = 0p;
55
56 struct __attribute__((aligned)) fd_info_t {
57 int pollid;
58 size_t rearms;
59 };
60 rlim_t fd_limit = 0;
61 static fd_info_t * volatile * fd_map = 0p;
62
63 void * master_epoll( __attribute__((unused)) void * args ) {
64 unsigned id = register_proc_id();
65
66 enum { MAX_EVENTS = 5 };
67 struct epoll_event events[MAX_EVENTS];
68 for() {
69 int ret = epoll_wait(master_epollfd, events, MAX_EVENTS, -1);
70 if ( ret < 0 ) {
71 abort | "Master epoll error: " | strerror(errno);
72 }
73
74 for(i; ret) {
75 thread$ * thrd = (thread$ *)events[i].data.u64;
76 unpark( thrd );
77 }
78 }
79
80 return 0p;
81 }
82
83 static inline int epoll_rearm(int epollfd, int fd, uint32_t event) {
84 struct epoll_event eevent;
85 eevent.events = event | EPOLLET | EPOLLONESHOT;
86 eevent.data.u64 = (uint64_t)active_thread();
87
88 if(0 != epoll_ctl(epollfd, EPOLL_CTL_MOD, fd, &eevent))
89 {
90 if(errno == ENOENT) return -1;
91 abort | acquire | "epoll" | epollfd | "ctl rearm" | fd | "error: " | errno | strerror(errno);
92 }
93
94 park();
95 return 0;
96 }
97
98 thread leaf_poller {
99 int epollfd;
100 };
101
102 void ?{}(leaf_poller & this, int fd) { this.epollfd = fd; }
103
104 void main(leaf_poller & this) {
105 enum { MAX_EVENTS = 1024 };
106 struct epoll_event events[MAX_EVENTS];
107 const int max_retries = 5;
108 int retries = max_retries;
109
110 struct epoll_event event;
111 event.events = EPOLLIN | EPOLLET | EPOLLONESHOT;
112 event.data.u64 = (uint64_t)&(thread&)this;
113
114 if(0 != epoll_ctl(master_epollfd, EPOLL_CTL_ADD, this.epollfd, &event))
115 {
116 abort | "master epoll ctl add leaf: " | errno | strerror(errno);
117 }
118
119 park();
120
121 for() {
122 yield();
123 int ret = epoll_wait(this.epollfd, events, MAX_EVENTS, 0);
124 if ( ret < 0 ) {
125 abort | "Leaf epoll error: " | errno | strerror(errno);
126 }
127
128 if(ret) {
129 for(i; ret) {
130 thread$ * thrd = (thread$ *)events[i].data.u64;
131 unpark( thrd, UNPARK_REMOTE );
132 }
133 }
134 else if(0 >= --retries) {
135 epoll_rearm(master_epollfd, this.epollfd, EPOLLIN);
136 }
137 }
138 }
139
140 void setup_epoll( void ) __attribute__(( constructor ));
141 void setup_epoll( void ) {
142 if(master_epollfd) abort | "Master epoll already setup";
143
144 master_epollfd = epoll_create1(0);
145 if(master_epollfd == -1) {
146 abort | "failed to create master epoll: " | errno | strerror(errno);
147 }
148
149 struct rlimit rlim;
150 if(int ret = getrlimit(RLIMIT_NOFILE, &rlim); 0 != ret) {
151 abort | "failed to get nofile limit: " | errno | strerror(errno);
152 }
153
154 fd_limit = rlim.rlim_cur;
155 fd_map = alloc(fd_limit);
156 for(i;fd_limit) {
157 fd_map[i] = 0p;
158 }
159
160 poller_cnt = 2;
161 poller_fds = alloc(poller_cnt);
162 pollers = alloc(poller_cnt);
163 for(i; poller_cnt) {
164 poller_fds[i] = epoll_create1(0);
165 if(poller_fds[i] == -1) {
166 abort | "failed to create leaf epoll [" | i | "]: " | errno | strerror(errno);
167 }
168
169 (pollers[i]){ poller_fds[i] };
170 }
171
172 pthread_attr_t attr;
173 if (int ret = pthread_attr_init(&attr); 0 != ret) {
174 abort | "failed to create master epoll thread attr: " | ret | strerror(ret);
175 }
176
177 if (int ret = pthread_create(&master_poller, &attr, master_epoll, 0p); 0 != ret) {
178 abort | "failed to create master epoll thread: " | ret | strerror(ret);
179 }
180 }
181
182 static inline int epoll_wait(int fd, uint32_t event) {
183 if(fd_map[fd] >= 1p) {
184 fd_map[fd]->rearms++;
185 epoll_rearm(poller_fds[fd_map[fd]->pollid], fd, event);
186 return 0;
187 }
188
189 for() {
190 fd_info_t * expected = 0p;
191 fd_info_t * sentinel = 1p;
192 if(__atomic_compare_exchange_n( &(fd_map[fd]), &expected, sentinel, true, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED)) {
193 struct epoll_event eevent;
194 eevent.events = event | EPOLLET | EPOLLONESHOT;
195 eevent.data.u64 = (uint64_t)active_thread();
196
197 int id = thread_rand() % poller_cnt;
198 if(0 != epoll_ctl(poller_fds[id], EPOLL_CTL_ADD, fd, &eevent))
199 {
200 abort | "epoll ctl add" | poller_fds[id] | fd | fd_map[fd] | expected | "error: " | errno | strerror(errno);
201 }
202
203 fd_info_t * ninfo = alloc();
204 ninfo->pollid = id;
205 ninfo->rearms = 0;
206 __atomic_store_n( &fd_map[fd], ninfo, __ATOMIC_SEQ_CST);
207
208 park();
209 return 0;
210 }
211
212 if(expected >= 0) {
213 fd_map[fd]->rearms++;
214 epoll_rearm(poller_fds[fd_map[fd]->pollid], fd, event);
215 return 0;
216 }
217
218 Pause();
219 }
220 }
221#endif
222
223//================================================================================
224// Thread run by the C Interface
225
226struct cfathread_object {
227 thread$ self;
228 void * (*themain)( void * );
229 void * arg;
230 void * ret;
231};
232void main(cfathread_object & this);
233void ^?{}(cfathread_object & mutex this);
234
235static inline thread$ * get_thread( cfathread_object & this ) { return &this.self; }
236
237typedef ThreadCancelled(cfathread_object) cfathread_exception;
238typedef ThreadCancelled_vtable(cfathread_object) cfathread_vtable;
239
240void defaultResumptionHandler(ThreadCancelled(cfathread_object) & except) {
241 abort | "A thread was cancelled";
242}
243
244cfathread_vtable _cfathread_vtable_instance;
245
246cfathread_vtable & const _default_vtable = _cfathread_vtable_instance;
247
248cfathread_vtable const & get_exception_vtable(cfathread_exception *) {
249 return _cfathread_vtable_instance;
250}
251
252static void ?{}( cfathread_object & this, cluster & cl, void *(*themain)( void * ), void * arg ) {
253 this.themain = themain;
254 this.arg = arg;
255 (this.self){"C-thread", cl};
256 __thrd_start(this, main);
257}
258
259void ^?{}(cfathread_object & mutex this) {
260 ^(this.self){};
261}
262
263void main( cfathread_object & this ) {
264 __attribute__((unused)) void * const thrd_obj = (void*)&this;
265 __attribute__((unused)) void * const thrd_hdl = (void*)active_thread();
266 /* paranoid */ verify( thrd_obj == thrd_hdl );
267
268 this.ret = this.themain( this.arg );
269}
270
271//================================================================================
272// Special Init Thread responsible for the initialization or processors
273struct __cfainit {
274 thread$ self;
275 void (*init)( void * );
276 void * arg;
277};
278void main(__cfainit & this);
279void ^?{}(__cfainit & mutex this);
280
281static inline thread$ * get_thread( __cfainit & this ) { return &this.self; }
282
283typedef ThreadCancelled(__cfainit) __cfainit_exception;
284typedef ThreadCancelled_vtable(__cfainit) __cfainit_vtable;
285
286void defaultResumptionHandler(ThreadCancelled(__cfainit) & except) {
287 abort | "The init thread was cancelled";
288}
289
290__cfainit_vtable ___cfainit_vtable_instance;
291
292__cfainit_vtable const & get_exception_vtable(__cfainit_exception *) {
293 return ___cfainit_vtable_instance;
294}
295
296static void ?{}( __cfainit & this, void (*init)( void * ), void * arg ) {
297 this.init = init;
298 this.arg = arg;
299 (this.self){"Processir Init"};
300
301 // Don't use __thrd_start! just prep the context manually
302 thread$ * this_thrd = get_thread(this);
303 void (*main_p)(__cfainit &) = main;
304
305 disable_interrupts();
306 __cfactx_start(main_p, get_coroutine(this), this, __cfactx_invoke_thread);
307
308 this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP];
309 /* paranoid */ verify( this_thrd->context.SP );
310
311 this_thrd->state = Ready;
312 enable_interrupts();
313}
314
315void ^?{}(__cfainit & mutex this) {
316 ^(this.self){};
317}
318
319void main( __cfainit & this ) {
320 __attribute__((unused)) void * const thrd_obj = (void*)&this;
321 __attribute__((unused)) void * const thrd_hdl = (void*)active_thread();
322 /* paranoid */ verify( thrd_obj == thrd_hdl );
323
324 this.init( this.arg );
325}
326
327//================================================================================
328// Main Api
329extern "C" {
330 int cfathread_cluster_create(cfathread_cluster_t * cl) __attribute__((nonnull(1))) {
331 *cl = new();
332 return 0;
333 }
334
335 cfathread_cluster_t cfathread_cluster_self(void) {
336 return active_cluster();
337 }
338
339 int cfathread_cluster_print_stats( cfathread_cluster_t cl ) {
340 #if !defined(__CFA_NO_STATISTICS__)
341 print_stats_at_exit( *cl, CFA_STATS_READY_Q | CFA_STATS_IO );
342 print_stats_now( *cl, CFA_STATS_READY_Q | CFA_STATS_IO );
343 #endif
344 return 0;
345 }
346
347 int cfathread_cluster_add_worker(cfathread_cluster_t cl, pthread_t* tid, void (*init_routine) (void *), void * arg) {
348 __cfainit * it = 0p;
349 if(init_routine) {
350 it = alloc();
351 (*it){init_routine, arg};
352 }
353 processor * proc = alloc();
354 (*proc){ "C-processor", *cl, get_thread(*it) };
355
356 // Wait for the init thread to return before continuing
357 if(it) {
358 ^(*it){};
359 free(it);
360 }
361
362 if(tid) *tid = proc->kernel_thread;
363 return 0;
364 }
365
366 int cfathread_cluster_pause (cfathread_cluster_t) {
367 abort | "Pausing clusters is not supported";
368 exit(1);
369 }
370
371 int cfathread_cluster_resume(cfathread_cluster_t) {
372 abort | "Resuming clusters is not supported";
373 exit(1);
374 }
375
376 //--------------------
377 // Thread attributes
378 int cfathread_attr_init(cfathread_attr_t *attr) __attribute__((nonnull (1))) {
379 attr->cl = active_cluster();
380 return 0;
381 }
382
383 //--------------------
384 // Thread
385 int cfathread_create( cfathread_t * handle, const cfathread_attr_t * attr, void *(*main)( void * ), void * arg ) __attribute__((nonnull (1))) {
386 cluster * cl = attr ? attr->cl : active_cluster();
387 cfathread_t thrd = alloc();
388 (*thrd){ *cl, main, arg };
389 *handle = thrd;
390 return 0;
391 }
392
393 int cfathread_join( cfathread_t thrd, void ** retval ) {
394 void * ret = join( *thrd ).ret;
395 ^( *thrd ){};
396 free(thrd);
397 if(retval) {
398 *retval = ret;
399 }
400 return 0;
401 }
402
403 int cfathread_get_errno(void) {
404 return errno;
405 }
406
407 cfathread_t cfathread_self(void) {
408 return (cfathread_t)active_thread();
409 }
410
411 int cfathread_usleep(useconds_t usecs) {
412 sleep(usecs`us);
413 return 0;
414 }
415
416 int cfathread_sleep(unsigned int secs) {
417 sleep(secs`s);
418 return 0;
419 }
420
421 void cfathread_park( void ) {
422 park();
423 }
424
425 void cfathread_unpark( cfathread_t thrd ) {
426 unpark( *thrd );
427 }
428
429 void cfathread_yield( void ) {
430 yield();
431 }
432
433 typedef struct cfathread_mutex * cfathread_mutex_t;
434
435 //--------------------
436 // Mutex
437 struct cfathread_mutex {
438 linear_backoff_then_block_lock impl;
439 };
440 int cfathread_mutex_init(cfathread_mutex_t *restrict mut, const cfathread_mutexattr_t *restrict) __attribute__((nonnull (1))) { *mut = new(); return 0; }
441 int cfathread_mutex_destroy(cfathread_mutex_t *mut) __attribute__((nonnull (1))) { delete( *mut ); return 0; }
442 int cfathread_mutex_lock (cfathread_mutex_t *mut) __attribute__((nonnull (1))) { lock( (*mut)->impl ); return 0; }
443 int cfathread_mutex_unlock (cfathread_mutex_t *mut) __attribute__((nonnull (1))) { unlock( (*mut)->impl ); return 0; }
444 int cfathread_mutex_trylock(cfathread_mutex_t *mut) __attribute__((nonnull (1))) {
445 bool ret = try_lock( (*mut)->impl );
446 if( ret ) return 0;
447 else return EBUSY;
448 }
449
450 //--------------------
451 // Condition
452 struct cfathread_condition {
453 condition_variable(linear_backoff_then_block_lock) impl;
454 };
455 int cfathread_cond_init(cfathread_cond_t *restrict cond, const cfathread_condattr_t *restrict) __attribute__((nonnull (1))) { *cond = new(); return 0; }
456 int cfathread_cond_signal(cfathread_cond_t *cond) __attribute__((nonnull (1))) { notify_one( (*cond)->impl ); return 0; }
457 int cfathread_cond_wait(cfathread_cond_t *restrict cond, cfathread_mutex_t *restrict mut) __attribute__((nonnull (1,2))) { wait( (*cond)->impl, (*mut)->impl ); return 0; }
458 int cfathread_cond_timedwait(cfathread_cond_t *restrict cond, cfathread_mutex_t *restrict mut, const struct timespec *restrict abstime) __attribute__((nonnull (1,2,3))) {
459 Time t = { *abstime };
460 timespec curr;
461 clock_gettime( CLOCK_REALTIME, &curr );
462 Time c = { curr };
463 if( wait( (*cond)->impl, (*mut)->impl, t - c ) ) {
464 return 0;
465 }
466 errno = ETIMEDOUT;
467 return ETIMEDOUT;
468 }
469}
470
471#include <iofwd.hfa>
472
473extern "C" {
474 #include <unistd.h>
475 #include <sys/types.h>
476 #include <sys/socket.h>
477
478 //--------------------
479 // IO operations
480 int cfathread_socket(int domain, int type, int protocol) {
481 return socket(domain, type
482 #if defined(EPOLL_FOR_SOCKETS)
483 | SOCK_NONBLOCK
484 #endif
485 , protocol);
486 }
487 int cfathread_bind(int socket, const struct sockaddr *address, socklen_t address_len) {
488 return bind(socket, address, address_len);
489 }
490
491 int cfathread_listen(int socket, int backlog) {
492 return listen(socket, backlog);
493 }
494
495 int cfathread_accept(int socket, struct sockaddr *restrict address, socklen_t *restrict address_len) {
496 #if defined(EPOLL_FOR_SOCKETS)
497 int ret;
498 for() {
499 yield();
500 ret = accept4(socket, address, address_len, SOCK_NONBLOCK);
501 if(ret >= 0) break;
502 if(errno != EAGAIN && errno != EWOULDBLOCK) break;
503
504 epoll_wait(socket, EPOLLIN);
505 }
506 return ret;
507 #else
508 return cfa_accept4(socket, address, address_len, 0, CFA_IO_LAZY);
509 #endif
510 }
511
512 int cfathread_connect(int socket, const struct sockaddr *address, socklen_t address_len) {
513 #if defined(EPOLL_FOR_SOCKETS)
514 int ret;
515 for() {
516 ret = connect(socket, address, address_len);
517 if(ret >= 0) break;
518 if(errno != EAGAIN && errno != EWOULDBLOCK) break;
519
520 epoll_wait(socket, EPOLLIN);
521 }
522 return ret;
523 #else
524 return cfa_connect(socket, address, address_len, CFA_IO_LAZY);
525 #endif
526 }
527
528 int cfathread_dup(int fildes) {
529 return dup(fildes);
530 }
531
532 int cfathread_close(int fildes) {
533 return cfa_close(fildes, CFA_IO_LAZY);
534 }
535
536 ssize_t cfathread_sendmsg(int socket, const struct msghdr *message, int flags) {
537 #if defined(EPOLL_FOR_SOCKETS)
538 ssize_t ret;
539 __STATS__( false, io.ops.sockwrite++; )
540 for() {
541 ret = sendmsg(socket, message, flags);
542 if(ret >= 0) break;
543 if(errno != EAGAIN && errno != EWOULDBLOCK) break;
544
545 __STATS__( false, io.ops.epllwrite++; )
546 epoll_wait(socket, EPOLLOUT);
547 }
548 #else
549 ssize_t ret = cfa_sendmsg(socket, message, flags, CFA_IO_LAZY);
550 #endif
551 return ret;
552 }
553
554 ssize_t cfathread_write(int fildes, const void *buf, size_t nbyte) {
555 // Use send rather then write for socket since it's faster
556 #if defined(EPOLL_FOR_SOCKETS)
557 ssize_t ret;
558 // __STATS__( false, io.ops.sockwrite++; )
559 for() {
560 ret = send(fildes, buf, nbyte, 0);
561 if(ret >= 0) break;
562 if(errno != EAGAIN && errno != EWOULDBLOCK) break;
563
564 // __STATS__( false, io.ops.epllwrite++; )
565 epoll_wait(fildes, EPOLLOUT);
566 }
567 #else
568 ssize_t ret = cfa_send(fildes, buf, nbyte, 0, CFA_IO_LAZY);
569 #endif
570 return ret;
571 }
572
573 ssize_t cfathread_recvfrom(int socket, void *restrict buffer, size_t length, int flags, struct sockaddr *restrict address, socklen_t *restrict address_len) {
574 struct iovec iov;
575 iov.iov_base = buffer;
576 iov.iov_len = length;
577
578 struct msghdr msg;
579 msg.msg_name = address;
580 msg.msg_namelen = address_len ? (socklen_t)*address_len : (socklen_t)0;
581 msg.msg_iov = &iov;
582 msg.msg_iovlen = 1;
583 msg.msg_control = 0p;
584 msg.msg_controllen = 0;
585
586 #if defined(EPOLL_FOR_SOCKETS)
587 ssize_t ret;
588 yield();
589 for() {
590 ret = recvmsg(socket, &msg, flags);
591 if(ret >= 0) break;
592 if(errno != EAGAIN && errno != EWOULDBLOCK) break;
593
594 epoll_wait(socket, EPOLLIN);
595 }
596 #else
597 ssize_t ret = cfa_recvmsg(socket, &msg, flags, CFA_IO_LAZY);
598 #endif
599
600 if(address_len) *address_len = msg.msg_namelen;
601 return ret;
602 }
603
604 ssize_t cfathread_read(int fildes, void *buf, size_t nbyte) {
605 // Use recv rather then read for socket since it's faster
606 #if defined(EPOLL_FOR_SOCKETS)
607 ssize_t ret;
608 __STATS__( false, io.ops.sockread++; )
609 yield();
610 for() {
611 ret = recv(fildes, buf, nbyte, 0);
612 if(ret >= 0) break;
613 if(errno != EAGAIN && errno != EWOULDBLOCK) break;
614
615 __STATS__( false, io.ops.epllread++; )
616 epoll_wait(fildes, EPOLLIN);
617 }
618 #else
619 ssize_t ret = cfa_recv(fildes, buf, nbyte, 0, CFA_IO_LAZY);
620 #endif
621 return ret;
622 }
623
624}
Note: See TracBrowser for help on using the repository browser.