Changeset ecf6b46
- Timestamp:
- Apr 23, 2020, 4:46:15 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- d384787
- Parents:
- 2489d31
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/io.cfa
r2489d31 recf6b46 1 // 2 // Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo 3 // 4 // The contents of this file are covered under the licence agreement in the 5 // file "LICENCE" distributed with Cforall. 6 // 7 // io.cfa -- 8 // 9 // Author : Thierry Delisle 10 // Created On : Thu Apr 23 17:31:00 2020 11 // Last Modified By : 12 // Last Modified On : 13 // Update Count : 14 // 15 1 16 #include "kernel.hfa" 2 17 … … 8 23 void __kernel_io_shutdown( cluster & this ) { 9 24 // Nothing to do without io_uring 10 }11 12 bool is_async( void (*)() ) {13 return false;14 25 } 15 26 … … 359 370 this.len = len; 360 371 } 372 #endif 361 373 362 374 //============================================================================================= 363 375 // I/O Interface 364 376 //============================================================================================= 365 extern "C" { 366 #define __USE_GNU 367 #define _GNU_SOURCE 368 #include <fcntl.h> 369 #include <sys/uio.h> 370 #include <sys/socket.h> 371 #include <sys/stat.h> 372 } 373 377 extern "C" { 378 #define __USE_GNU 379 #define _GNU_SOURCE 380 #include <fcntl.h> 381 #include <sys/uio.h> 382 #include <sys/socket.h> 383 #include <sys/stat.h> 384 } 385 386 #if defined(HAVE_LINUX_IO_URING_H) 374 387 #define __submit_prelude \ 375 388 struct io_ring & ring = active_cluster()->io; \ … … 385 398 park( __cfaabi_dbg_ctx ); \ 386 399 return data.result; 400 #endif 387 401 388 402 //----------------------------------------------------------------------------- 389 403 // Asynchronous operations 390 391 #if!defined(IORING_OP_READV)392 393 394 395 396 397 398 399 400 401 402 403 #if!defined(IORING_OP_WRITEV)404 405 406 407 408 409 410 411 412 413 414 415 #if!defined(IORING_OP_FSYNC)416 417 418 419 420 421 422 423 424 425 426 427 #if!defined(IORING_OP_SYNC_FILE_RANGE)428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 #if!defined(IORING_OP_SENDMSG)444 445 446 447 448 449 450 451 452 453 454 455 456 #if!defined(IORING_OP_RECVMSG)457 458 459 460 461 462 463 464 465 466 467 468 469 #if!defined(IORING_OP_SEND)470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 #if!defined(IORING_OP_RECV)485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 #if!defined(IORING_OP_ACCEPT)500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 #if!defined(IORING_OP_CONNECT)517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 #if!defined(IORING_OP_FALLOCATE)533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 #if!defined(IORING_OP_FADVISE)548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 #if!defined(IORING_OP_MADVISE)563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 #if!defined(IORING_OP_OPENAT)578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 #if!defined(IORING_OP_CLOSE)593 594 595 596 597 598 599 600 601 602 603 604 #if!defined(IORING_OP_STATX)605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 #if!defined(IORING_OP_READ)623 624 625 626 627 628 629 630 631 632 633 634 #if!defined(IORING_OP_WRITE)635 636 637 638 639 640 641 642 643 404 ssize_t cfa_preadv2(int fd, const struct iovec *iov, int iovcnt, off_t offset, int flags) { 405 #if !defined(HAVE_LINUX_IO_URING_H) || !defined(IORING_OP_READV) 406 return preadv2(fd, iov, iovcnt, offset, flags); 407 #else 408 __submit_prelude 409 410 (*sqe){ IORING_OP_READV, fd, iov, iovcnt, offset }; 411 412 __submit_wait 413 #endif 414 } 415 416 ssize_t cfa_pwritev2(int fd, const struct iovec *iov, int iovcnt, off_t offset, int flags) { 417 #if !defined(HAVE_LINUX_IO_URING_H) || !defined(IORING_OP_WRITEV) 418 return pwritev2(fd, iov, iovcnt, offset, flags); 419 #else 420 __submit_prelude 421 422 (*sqe){ IORING_OP_WRITEV, fd, iov, iovcnt, offset }; 423 424 __submit_wait 425 #endif 426 } 427 428 int cfa_fsync(int fd) { 429 #if !defined(HAVE_LINUX_IO_URING_H) || !defined(IORING_OP_FSYNC) 430 return fsync(fd); 431 #else 432 __submit_prelude 433 434 (*sqe){ IORING_OP_FSYNC, fd }; 435 436 __submit_wait 437 #endif 438 } 439 440 int cfa_sync_file_range(int fd, int64_t offset, int64_t nbytes, unsigned int flags) { 441 #if !defined(HAVE_LINUX_IO_URING_H) || !defined(IORING_OP_SYNC_FILE_RANGE) 442 return sync_file_range(fd, offset, nbytes, flags); 443 #else 444 __submit_prelude 445 446 (*sqe){ IORING_OP_SYNC_FILE_RANGE, fd }; 447 sqe->off = offset; 448 sqe->len = nbytes; 449 sqe->sync_range_flags = flags; 450 451 __submit_wait 452 #endif 453 } 454 455 456 ssize_t cfa_sendmsg(int sockfd, const struct msghdr *msg, int flags) { 457 #if !defined(HAVE_LINUX_IO_URING_H) || !defined(IORING_OP_SENDMSG) 458 return recv(sockfd, msg, flags); 459 #else 460 __submit_prelude 461 462 (*sqe){ IORING_OP_SENDMSG, sockfd, msg, 1, 0 }; 463 sqe->msg_flags = flags; 464 465 __submit_wait 466 #endif 467 } 468 469 ssize_t cfa_recvmsg(int sockfd, struct msghdr *msg, int flags) { 470 #if !defined(HAVE_LINUX_IO_URING_H) || !defined(IORING_OP_RECVMSG) 471 return recv(sockfd, msg, flags); 472 #else 473 __submit_prelude 474 475 (*sqe){ IORING_OP_RECVMSG, sockfd, msg, 1, 0 }; 476 sqe->msg_flags = flags; 477 478 __submit_wait 479 #endif 480 } 481 482 ssize_t cfa_send(int sockfd, const void *buf, size_t len, int flags) { 483 #if !defined(HAVE_LINUX_IO_URING_H) || !defined(IORING_OP_SEND) 484 return send( sockfd, buf, len, flags ); 485 #else 486 __submit_prelude 487 488 (*sqe){ IORING_OP_SEND, sockfd }; 489 sqe->addr = (uint64_t)buf; 490 sqe->len = len; 491 sqe->msg_flags = flags; 492 493 __submit_wait 494 #endif 495 } 496 497 ssize_t cfa_recv(int sockfd, void *buf, size_t len, int flags) { 498 #if !defined(HAVE_LINUX_IO_URING_H) || !defined(IORING_OP_RECV) 499 return recv( sockfd, buf, len, flags ); 500 #else 501 __submit_prelude 502 503 (*sqe){ IORING_OP_RECV, sockfd }; 504 sqe->addr = (uint64_t)buf; 505 sqe->len = len; 506 sqe->msg_flags = flags; 507 508 __submit_wait 509 #endif 510 } 511 512 int cfa_accept4(int sockfd, struct sockaddr *addr, socklen_t *addrlen, int flags) { 513 #if !defined(HAVE_LINUX_IO_URING_H) || !defined(IORING_OP_ACCEPT) 514 __SOCKADDR_ARG _addr; 515 _addr.__sockaddr__ = addr; 516 return accept4( sockfd, _addr, addrlen, flags ); 517 #else 518 __submit_prelude 519 520 (*sqe){ IORING_OP_ACCEPT, sockfd }; 521 sqe->addr = addr; 522 sqe->addr2 = addrlen; 523 sqe->accept_flags = flags; 524 525 __submit_wait 526 #endif 527 } 528 529 int cfa_connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen) { 530 #if !defined(HAVE_LINUX_IO_URING_H) || !defined(IORING_OP_CONNECT) 531 __CONST_SOCKADDR_ARG _addr; 532 _addr.__sockaddr__ = addr; 533 return connect( sockfd, _addr, addrlen ); 534 #else 535 __submit_prelude 536 537 (*sqe){ IORING_OP_CONNECT, sockfd }; 538 sqe->addr = (uint64_t)addr; 539 sqe->off = addrlen; 540 541 __submit_wait 542 #endif 543 } 544 545 int cfa_fallocate(int fd, int mode, uint64_t offset, uint64_t len) { 546 #if !defined(HAVE_LINUX_IO_URING_H) || !defined(IORING_OP_FALLOCATE) 547 return fallocate( fd, mode, offset, len ); 548 #else 549 __submit_prelude 550 551 (*sqe){ IORING_OP_FALLOCATE, fd }; 552 sqe->off = offset; 553 sqe->len = length; 554 sqe->mode = mode; 555 556 __submit_wait 557 #endif 558 } 559 560 int cfa_fadvise(int fd, uint64_t offset, uint64_t len, int advice) { 561 #if !defined(HAVE_LINUX_IO_URING_H) || !defined(IORING_OP_FADVISE) 562 return posix_fadvise( fd, offset, len, advice ); 563 #else 564 __submit_prelude 565 566 (*sqe){ IORING_OP_FADVISE, fd }; 567 sqe->off = (uint64_t)offset; 568 sqe->len = length; 569 sqe->fadvise_advice = advice; 570 571 __submit_wait 572 #endif 573 } 574 575 int cfa_madvise(void *addr, size_t length, int advice) { 576 #if !defined(HAVE_LINUX_IO_URING_H) || !defined(IORING_OP_MADVISE) 577 return madvise( addr, length, advice ); 578 #else 579 __submit_prelude 580 581 (*sqe){ IORING_OP_MADVISE, 0 }; 582 sqe->addr = (uint64_t)addr; 583 sqe->len = length; 584 sqe->fadvise_advice = advice; 585 586 __submit_wait 587 #endif 588 } 589 590 int cfa_openat(int dirfd, const char *pathname, int flags, mode_t mode) { 591 #if !defined(HAVE_LINUX_IO_URING_H) || !defined(IORING_OP_OPENAT) 592 return openat( dirfd, pathname, flags, mode ); 593 #else 594 __submit_prelude 595 596 (*sqe){ IORING_OP_OPENAT, dirfd }; 597 sqe->addr = (uint64_t)pathname; 598 sqe->open_flags = flags; 599 sqe->mode = mode; 600 601 __submit_wait 602 #endif 603 } 604 605 int cfa_close(int fd) { 606 #if !defined(HAVE_LINUX_IO_URING_H) || !defined(IORING_OP_CLOSE) 607 return close( fd ); 608 #else 609 __submit_prelude 610 611 (*sqe){ IORING_OP_CLOSE, fd }; 612 613 __submit_wait 614 #endif 615 } 616 617 int cfa_statx(int dirfd, const char *pathname, int flags, unsigned int mask, struct statx *statxbuf) { 618 #if !defined(HAVE_LINUX_IO_URING_H) || !defined(IORING_OP_STATX) 619 //return statx( dirfd, pathname, flags, mask, statxbuf ); 620 return syscall( __NR_io_uring_setup, dirfd, pathname, flags, mask, statxbuf ); 621 #else 622 __submit_prelude 623 624 (*sqe){ IORING_OP_STATX, dirfd }; 625 sqe->addr = (uint64_t)pathname; 626 sqe->statx_flags = flags; 627 sqe->len = mask; 628 sqe->off = (uint64_t)statxbuf; 629 630 __submit_wait 631 #endif 632 } 633 634 635 ssize_t cfa_read(int fd, void *buf, size_t count) { 636 #if !defined(HAVE_LINUX_IO_URING_H) || !defined(IORING_OP_READ) 637 return read( fd, buf, count ); 638 #else 639 __submit_prelude 640 641 (*sqe){ IORING_OP_READ, fd, buf, count, 0 }; 642 643 __submit_wait 644 #endif 645 } 646 647 ssize_t cfa_write(int fd, void *buf, size_t count) { 648 #if !defined(HAVE_LINUX_IO_URING_H) || !defined(IORING_OP_WRITE) 649 return read( fd, buf, count ); 650 #else 651 __submit_prelude 652 653 (*sqe){ IORING_OP_WRITE, fd, buf, count, 0 }; 654 655 __submit_wait 656 #endif 657 } 644 658 645 659 //----------------------------------------------------------------------------- … … 647 661 648 662 // Macro magic to reduce the size of the following switch case 649 650 651 652 653 654 bool is_async( fptr_t func ) {655 663 #define IS_DEFINED_APPLY(f, ...) f(__VA_ARGS__) 664 #define IS_DEFINED_SECOND(first, second, ...) second 665 #define IS_DEFINED_TEST(expansion) _CFA_IO_FEATURE_##expansion 666 #define IS_DEFINED(macro) IS_DEFINED_APPLY( IS_DEFINED_SECOND,IS_DEFINED_TEST(macro) false, true) 667 668 bool has_user_level_blocking( fptr_t func ) { 669 #if defined(HAVE_LINUX_IO_URING_H) 656 670 if( /*func == (fptr_t)preadv2 || */ 657 671 func == (fptr_t)cfa_preadv2 ) … … 660 674 661 675 if( /*func == (fptr_t)pwritev2 || */ 662 676 func == (fptr_t)cfa_pwritev2 ) 663 677 #define _CFA_IO_FEATURE_IORING_OP_WRITEV , 664 678 return IS_DEFINED(IORING_OP_WRITEV); 665 679 666 680 if( /*func == (fptr_t)fsync || */ 667 681 func == (fptr_t)cfa_fsync ) 668 682 #define _CFA_IO_FEATURE_IORING_OP_FSYNC , 669 683 return IS_DEFINED(IORING_OP_FSYNC); 670 684 671 685 if( /*func == (fptr_t)ync_file_range || */ 672 686 func == (fptr_t)cfa_sync_file_range ) 673 687 #define _CFA_IO_FEATURE_IORING_OP_SYNC_FILE_RANGE , 674 688 return IS_DEFINED(IORING_OP_SYNC_FILE_RANGE); 675 689 676 690 if( /*func == (fptr_t)sendmsg || */ 677 691 func == (fptr_t)cfa_sendmsg ) 678 692 #define _CFA_IO_FEATURE_IORING_OP_SENDMSG , 679 693 return IS_DEFINED(IORING_OP_SENDMSG); 680 694 681 695 if( /*func == (fptr_t)recvmsg || */ 682 696 func == (fptr_t)cfa_recvmsg ) 683 697 #define _CFA_IO_FEATURE_IORING_OP_RECVMSG , 684 698 return IS_DEFINED(IORING_OP_RECVMSG); … … 735 749 736 750 if( /*func == (fptr_t)read || */ 737 751 func == (fptr_t)cfa_read ) 738 752 #define _CFA_IO_FEATURE_IORING_OP_READ , 739 753 return IS_DEFINED(IORING_OP_READ); 740 754 741 755 if( /*func == (fptr_t)write || */ 742 756 func == (fptr_t)cfa_write ) 743 757 #define _CFA_IO_FEATURE_IORING_OP_WRITE , 744 758 return IS_DEFINED(IORING_OP_WRITE); 745 746 return false; 747 } 748 749 #endif 759 #endif 760 761 return false; 762 }
Note: See TracChangeset
for help on using the changeset viewer.