Changeset 2489d31
- Timestamp:
- Apr 23, 2020, 4:21:49 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- ecf6b46
- Parents:
- 8962722
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
benchmark/io/readv.cfa
r8962722 r2489d31 18 18 19 19 extern bool traceHeapOn(); 20 extern ssize_t async_preadv2(int fd, const struct iovec *iov, int iovcnt, off_t offset, int flags);20 extern ssize_t cfa_preadv2(int fd, const struct iovec *iov, int iovcnt, off_t offset, int flags); 21 21 22 22 int fd; … … 34 34 35 35 while(__atomic_load_n(&run, __ATOMIC_RELAXED)) { 36 async_preadv2(fd, &iov, 1, 0, 0);36 cfa_preadv2(fd, &iov, 1, 0, 0); 37 37 __atomic_fetch_add( &count, 1, __ATOMIC_SEQ_CST ); 38 38 } -
examples/io/simple/server.cfa
r8962722 r2489d31 51 51 52 52 //---------- 53 extern ssize_t async_recvmsg(int sockfd, struct msghdr *msg, int flags);54 extern int async_accept4(int sockfd, struct sockaddr *addr, socklen_t *addrlen, int flags);55 extern int async_close(int fd);53 extern ssize_t cfa_recvmsg(int sockfd, struct msghdr *msg, int flags); 54 extern int cfa_accept4(int sockfd, struct sockaddr *addr, socklen_t *addrlen, int flags); 55 extern int cfa_close(int fd); 56 56 57 57 //---------- … … 88 88 struct sockaddr_in cli_addr; 89 89 __socklen_t clilen = sizeof(cli_addr); 90 int newsock = async_accept4(sock, (struct sockaddr *) &cli_addr, &clilen, 0);90 int newsock = cfa_accept4(sock, (struct sockaddr *) &cli_addr, &clilen, 0); 91 91 if (newsock < 0) { 92 92 error( printer, "accept", -newsock); … … 97 97 98 98 while(1) { 99 int res = async_recvmsg(newsock, &msg, 0);99 int res = cfa_recvmsg(newsock, &msg, 0); 100 100 if(res == 0) break; 101 101 if(res < 0) { … … 107 107 } 108 108 109 ret = async_close(newsock);109 ret = cfa_close(newsock); 110 110 if(ret < 0) { 111 111 error( printer, "close new", -ret); … … 113 113 } 114 114 115 ret = async_close(sock);115 ret = cfa_close(sock); 116 116 if(ret < 0) { 117 117 error( printer, "close old", -ret); -
libcfa/src/concurrency/io.cfa
r8962722 r2489d31 293 293 // 294 294 295 static inline [* struct io_uring_sqe, uint32_t] __submit_alloc( struct io_ring & ring ) { 296 // Wait for a spot to be available 297 P(ring.submit); 298 299 // Allocate the sqe 300 uint32_t idx = __atomic_fetch_add(&ring.submit_q.alloc, 1ul32, __ATOMIC_SEQ_CST); 301 302 // Validate that we didn't overflow anything 303 // Check that nothing overflowed 304 /* paranoid */ verify( true ); 305 306 // Check that it goes head -> tail -> alloc and never head -> alloc -> tail 307 /* paranoid */ verify( true ); 308 309 // Return the sqe 310 return [&ring.submit_q.sqes[ idx & (*ring.submit_q.mask)], idx]; 311 } 312 313 static inline void __submit( struct io_ring & ring, uint32_t idx ) { 314 // get mutual exclusion 315 lock(ring.submit_q.lock __cfaabi_dbg_ctx2); 316 317 // Append to the list of ready entries 318 uint32_t * tail = ring.submit_q.tail; 319 const uint32_t mask = *ring.submit_q.mask; 320 321 ring.submit_q.array[ (*tail) & mask ] = idx & mask; 322 __atomic_fetch_add(tail, 1ul32, __ATOMIC_SEQ_CST); 323 324 // Submit however, many entries need to be submitted 325 int ret = syscall( __NR_io_uring_enter, ring.fd, 1, 0, 0, 0p, 0); 326 // __cfaabi_bits_print_safe( STDERR_FILENO, "Performed io_submit, returned %d\n", ret ); 327 if( ret < 0 ) { 328 switch((int)errno) { 329 default: 330 abort( "KERNEL ERROR: IO_URING SUBMIT - %s\n", strerror(errno) ); 295 static inline [* struct io_uring_sqe, uint32_t] __submit_alloc( struct io_ring & ring ) { 296 // Wait for a spot to be available 297 P(ring.submit); 298 299 // Allocate the sqe 300 uint32_t idx = __atomic_fetch_add(&ring.submit_q.alloc, 1ul32, __ATOMIC_SEQ_CST); 301 302 // Validate that we didn't overflow anything 303 // Check that nothing overflowed 304 /* paranoid */ verify( true ); 305 306 // Check that it goes head -> tail -> alloc and never head -> alloc -> tail 307 /* paranoid */ verify( true ); 308 309 // Return the sqe 310 return [&ring.submit_q.sqes[ idx & (*ring.submit_q.mask)], idx]; 311 } 312 313 static inline void __submit( struct io_ring & ring, uint32_t idx ) { 314 // get mutual exclusion 315 lock(ring.submit_q.lock __cfaabi_dbg_ctx2); 316 317 // Append to the list of ready entries 318 uint32_t * tail = ring.submit_q.tail; 319 const uint32_t mask = *ring.submit_q.mask; 320 321 ring.submit_q.array[ (*tail) & mask ] = idx & mask; 322 __atomic_fetch_add(tail, 1ul32, __ATOMIC_SEQ_CST); 323 324 // Submit however, many entries need to be submitted 325 int ret = syscall( __NR_io_uring_enter, ring.fd, 1, 0, 0, 0p, 0); 326 // __cfaabi_bits_print_safe( STDERR_FILENO, "Performed io_submit, returned %d\n", ret ); 327 if( ret < 0 ) { 328 switch((int)errno) { 329 default: 330 abort( "KERNEL ERROR: IO_URING SUBMIT - %s\n", strerror(errno) ); 331 } 331 332 } 332 } 333 334 unlock(ring.submit_q.lock); 335 // Make sure that idx was submitted 336 // Be careful to not get false positive if we cycled the entire list or that someone else submitted for us 337 } 338 339 static inline void ?{}(struct io_uring_sqe & this, uint8_t opcode, int fd) { 340 this.opcode = opcode; 341 #if !defined(IOSQE_ASYNC) 342 this.flags = 0; 343 #else 344 this.flags = IOSQE_ASYNC; 345 #endif 346 this.ioprio = 0; 347 this.fd = fd; 348 this.off = 0; 349 this.addr = 0; 350 this.len = 0; 351 this.rw_flags = 0; 352 this.__pad2[0] = this.__pad2[1] = this.__pad2[2] = 0; 353 } 354 355 static inline void ?{}(struct io_uring_sqe & this, uint8_t opcode, int fd, void * addr, uint32_t len, uint64_t off ) { 356 (this){ opcode, fd }; 357 this.off = off; 358 this.addr = (uint64_t)addr; 359 this.len = len; 360 } 333 334 unlock(ring.submit_q.lock); 335 // Make sure that idx was submitted 336 // Be careful to not get false positive if we cycled the entire list or that someone else submitted for us 337 } 338 339 static inline void ?{}(struct io_uring_sqe & this, uint8_t opcode, int fd) { 340 this.opcode = opcode; 341 #if !defined(IOSQE_ASYNC) 342 this.flags = 0; 343 #else 344 this.flags = IOSQE_ASYNC; 345 #endif 346 this.ioprio = 0; 347 this.fd = fd; 348 this.off = 0; 349 this.addr = 0; 350 this.len = 0; 351 this.rw_flags = 0; 352 this.__pad2[0] = this.__pad2[1] = this.__pad2[2] = 0; 353 } 354 355 static inline void ?{}(struct io_uring_sqe & this, uint8_t opcode, int fd, void * addr, uint32_t len, uint64_t off ) { 356 (this){ opcode, fd }; 357 this.off = off; 358 this.addr = (uint64_t)addr; 359 this.len = len; 360 } 361 361 362 362 //============================================================================================= … … 388 388 //----------------------------------------------------------------------------- 389 389 // Asynchronous operations 390 ssize_t async_preadv2(int fd, const struct iovec *iov, int iovcnt, off_t offset, int flags) {390 ssize_t cfa_preadv2(int fd, const struct iovec *iov, int iovcnt, off_t offset, int flags) { 391 391 #if !defined(IORING_OP_READV) 392 392 return preadv2(fd, iov, iovcnt, offset, flags); … … 400 400 } 401 401 402 ssize_t async_pwritev2(int fd, const struct iovec *iov, int iovcnt, off_t offset, int flags) {402 ssize_t cfa_pwritev2(int fd, const struct iovec *iov, int iovcnt, off_t offset, int flags) { 403 403 #if !defined(IORING_OP_WRITEV) 404 404 return pwritev2(fd, iov, iovcnt, offset, flags); … … 412 412 } 413 413 414 int async_fsync(int fd) {414 int cfa_fsync(int fd) { 415 415 #if !defined(IORING_OP_FSYNC) 416 416 return fsync(fd); … … 424 424 } 425 425 426 int async_sync_file_range(int fd, int64_t offset, int64_t nbytes, unsigned int flags) {426 int cfa_sync_file_range(int fd, int64_t offset, int64_t nbytes, unsigned int flags) { 427 427 #if !defined(IORING_OP_SYNC_FILE_RANGE) 428 428 return sync_file_range(fd, offset, nbytes, flags); … … 440 440 441 441 442 ssize_t async_sendmsg(int sockfd, const struct msghdr *msg, int flags) {442 ssize_t cfa_sendmsg(int sockfd, const struct msghdr *msg, int flags) { 443 443 #if !defined(IORING_OP_SENDMSG) 444 444 return recv(sockfd, msg, flags); … … 453 453 } 454 454 455 ssize_t async_recvmsg(int sockfd, struct msghdr *msg, int flags) {455 ssize_t cfa_recvmsg(int sockfd, struct msghdr *msg, int flags) { 456 456 #if !defined(IORING_OP_RECVMSG) 457 457 return recv(sockfd, msg, flags); … … 466 466 } 467 467 468 ssize_t async_send(int sockfd, const void *buf, size_t len, int flags) {468 ssize_t cfa_send(int sockfd, const void *buf, size_t len, int flags) { 469 469 #if !defined(IORING_OP_SEND) 470 470 return send( sockfd, buf, len, flags ); … … 481 481 } 482 482 483 ssize_t async_recv(int sockfd, void *buf, size_t len, int flags) {483 ssize_t cfa_recv(int sockfd, void *buf, size_t len, int flags) { 484 484 #if !defined(IORING_OP_RECV) 485 485 return recv( sockfd, buf, len, flags ); … … 496 496 } 497 497 498 int async_accept4(int sockfd, struct sockaddr *addr, socklen_t *addrlen, int flags) {498 int cfa_accept4(int sockfd, struct sockaddr *addr, socklen_t *addrlen, int flags) { 499 499 #if !defined(IORING_OP_ACCEPT) 500 500 __SOCKADDR_ARG _addr; … … 513 513 } 514 514 515 int async_connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen) {515 int cfa_connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen) { 516 516 #if !defined(IORING_OP_CONNECT) 517 517 __CONST_SOCKADDR_ARG _addr; … … 529 529 } 530 530 531 int async_fallocate(int fd, int mode, uint64_t offset, uint64_t len) {531 int cfa_fallocate(int fd, int mode, uint64_t offset, uint64_t len) { 532 532 #if !defined(IORING_OP_FALLOCATE) 533 533 return fallocate( fd, mode, offset, len ); … … 544 544 } 545 545 546 int async_fadvise(int fd, uint64_t offset, uint64_t len, int advice) {546 int cfa_fadvise(int fd, uint64_t offset, uint64_t len, int advice) { 547 547 #if !defined(IORING_OP_FADVISE) 548 548 return posix_fadvise( fd, offset, len, advice ); … … 559 559 } 560 560 561 int async_madvise(void *addr, size_t length, int advice) {561 int cfa_madvise(void *addr, size_t length, int advice) { 562 562 #if !defined(IORING_OP_MADVISE) 563 563 return madvise( addr, length, advice ); … … 574 574 } 575 575 576 int async_openat(int dirfd, const char *pathname, int flags, mode_t mode) {576 int cfa_openat(int dirfd, const char *pathname, int flags, mode_t mode) { 577 577 #if !defined(IORING_OP_OPENAT) 578 578 return openat( dirfd, pathname, flags, mode ); … … 589 589 } 590 590 591 int async_close(int fd) {591 int cfa_close(int fd) { 592 592 #if !defined(IORING_OP_CLOSE) 593 593 return close( fd ); … … 601 601 } 602 602 603 int async_statx(int dirfd, const char *pathname, int flags, unsigned int mask, struct statx *statxbuf) {603 int cfa_statx(int dirfd, const char *pathname, int flags, unsigned int mask, struct statx *statxbuf) { 604 604 #if !defined(IORING_OP_STATX) 605 605 //return statx( dirfd, pathname, flags, mask, statxbuf ); … … 619 619 620 620 621 ssize_t async_read(int fd, void *buf, size_t count) {621 ssize_t cfa_read(int fd, void *buf, size_t count) { 622 622 #if !defined(IORING_OP_READ) 623 623 return read( fd, buf, count ); … … 631 631 } 632 632 633 ssize_t async_write(int fd, void *buf, size_t count) {633 ssize_t cfa_write(int fd, void *buf, size_t count) { 634 634 #if !defined(IORING_OP_WRITE) 635 635 return read( fd, buf, count ); … … 655 655 656 656 if( /*func == (fptr_t)preadv2 || */ 657 func == (fptr_t) async_preadv2 )657 func == (fptr_t)cfa_preadv2 ) 658 658 #define _CFA_IO_FEATURE_IORING_OP_READV , 659 659 return IS_DEFINED(IORING_OP_READV); 660 660 661 661 if( /*func == (fptr_t)pwritev2 || */ 662 func == (fptr_t) async_pwritev2 )662 func == (fptr_t)cfa_pwritev2 ) 663 663 #define _CFA_IO_FEATURE_IORING_OP_WRITEV , 664 664 return IS_DEFINED(IORING_OP_WRITEV); 665 665 666 666 if( /*func == (fptr_t)fsync || */ 667 func == (fptr_t) async_fsync )667 func == (fptr_t)cfa_fsync ) 668 668 #define _CFA_IO_FEATURE_IORING_OP_FSYNC , 669 669 return IS_DEFINED(IORING_OP_FSYNC); 670 670 671 671 if( /*func == (fptr_t)ync_file_range || */ 672 func == (fptr_t) async_sync_file_range )672 func == (fptr_t)cfa_sync_file_range ) 673 673 #define _CFA_IO_FEATURE_IORING_OP_SYNC_FILE_RANGE , 674 674 return IS_DEFINED(IORING_OP_SYNC_FILE_RANGE); 675 675 676 676 if( /*func == (fptr_t)sendmsg || */ 677 func == (fptr_t) async_sendmsg )677 func == (fptr_t)cfa_sendmsg ) 678 678 #define _CFA_IO_FEATURE_IORING_OP_SENDMSG , 679 679 return IS_DEFINED(IORING_OP_SENDMSG); 680 680 681 681 if( /*func == (fptr_t)recvmsg || */ 682 func == (fptr_t) async_recvmsg )682 func == (fptr_t)cfa_recvmsg ) 683 683 #define _CFA_IO_FEATURE_IORING_OP_RECVMSG , 684 684 return IS_DEFINED(IORING_OP_RECVMSG); 685 685 686 686 if( /*func == (fptr_t)send || */ 687 func == (fptr_t) async_send )687 func == (fptr_t)cfa_send ) 688 688 #define _CFA_IO_FEATURE_IORING_OP_SEND , 689 689 return IS_DEFINED(IORING_OP_SEND); 690 690 691 691 if( /*func == (fptr_t)recv || */ 692 func == (fptr_t) async_recv )692 func == (fptr_t)cfa_recv ) 693 693 #define _CFA_IO_FEATURE_IORING_OP_RECV , 694 694 return IS_DEFINED(IORING_OP_RECV); 695 695 696 696 if( /*func == (fptr_t)accept4 || */ 697 func == (fptr_t) async_accept4 )697 func == (fptr_t)cfa_accept4 ) 698 698 #define _CFA_IO_FEATURE_IORING_OP_ACCEPT , 699 699 return IS_DEFINED(IORING_OP_ACCEPT); 700 700 701 701 if( /*func == (fptr_t)connect || */ 702 func == (fptr_t) async_connect )702 func == (fptr_t)cfa_connect ) 703 703 #define _CFA_IO_FEATURE_IORING_OP_CONNECT , 704 704 return IS_DEFINED(IORING_OP_CONNECT); 705 705 706 706 if( /*func == (fptr_t)fallocate || */ 707 func == (fptr_t) async_fallocate )707 func == (fptr_t)cfa_fallocate ) 708 708 #define _CFA_IO_FEATURE_IORING_OP_FALLOCATE , 709 709 return IS_DEFINED(IORING_OP_FALLOCATE); 710 710 711 711 if( /*func == (fptr_t)fadvise || */ 712 func == (fptr_t) async_fadvise )712 func == (fptr_t)cfa_fadvise ) 713 713 #define _CFA_IO_FEATURE_IORING_OP_FADVISE , 714 714 return IS_DEFINED(IORING_OP_FADVISE); 715 715 716 716 if( /*func == (fptr_t)madvise || */ 717 func == (fptr_t) async_madvise )717 func == (fptr_t)cfa_madvise ) 718 718 #define _CFA_IO_FEATURE_IORING_OP_MADVISE , 719 719 return IS_DEFINED(IORING_OP_MADVISE); 720 720 721 721 if( /*func == (fptr_t)openat || */ 722 func == (fptr_t) async_openat )722 func == (fptr_t)cfa_openat ) 723 723 #define _CFA_IO_FEATURE_IORING_OP_OPENAT , 724 724 return IS_DEFINED(IORING_OP_OPENAT); 725 725 726 726 if( /*func == (fptr_t)close || */ 727 func == (fptr_t) async_close )727 func == (fptr_t)cfa_close ) 728 728 #define _CFA_IO_FEATURE_IORING_OP_CLOSE , 729 729 return IS_DEFINED(IORING_OP_CLOSE); 730 730 731 731 if( /*func == (fptr_t)statx || */ 732 func == (fptr_t) async_statx )732 func == (fptr_t)cfa_statx ) 733 733 #define _CFA_IO_FEATURE_IORING_OP_STATX , 734 734 return IS_DEFINED(IORING_OP_STATX); 735 735 736 736 if( /*func == (fptr_t)read || */ 737 func == (fptr_t) async_read )737 func == (fptr_t)cfa_read ) 738 738 #define _CFA_IO_FEATURE_IORING_OP_READ , 739 739 return IS_DEFINED(IORING_OP_READ); 740 740 741 741 if( /*func == (fptr_t)write || */ 742 func == (fptr_t) async_write )742 func == (fptr_t)cfa_write ) 743 743 #define _CFA_IO_FEATURE_IORING_OP_WRITE , 744 744 return IS_DEFINED(IORING_OP_WRITE);
Note: See TracChangeset
for help on using the changeset viewer.