|
301 | 301 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) |
302 | 302 | #include <crypto/internal/sig.h> |
303 | 303 | #endif /* linux ver >= 6.13 */ |
| 304 | + #ifdef WOLFSSL_LINUXKM_USE_GET_RANDOM_KPROBES |
| 305 | + #include <linux/kprobes.h> |
| 306 | + #endif |
304 | 307 |
|
305 | 308 | /* the LKCAPI assumes that expanded encrypt and decrypt keys will stay |
306 | 309 | * loaded simultaneously, and the Linux in-tree implementations have two |
|
712 | 715 | #endif |
713 | 716 | #endif |
714 | 717 |
|
| 718 | + typeof(preempt_count) *preempt_count; |
| 719 | + typeof(_raw_spin_lock_irqsave) *_raw_spin_lock_irqsave; |
| 720 | + typeof(_raw_spin_trylock) *_raw_spin_trylock; |
| 721 | + typeof(_raw_spin_unlock_irqrestore) *_raw_spin_unlock_irqrestore; |
| 722 | + typeof(_cond_resched) *_cond_resched; |
| 723 | + |
715 | 724 | const void *_last_slot; |
716 | 725 | }; |
717 | 726 |
|
|
874 | 883 | #define dump_stack (wolfssl_linuxkm_get_pie_redirect_table()->dump_stack) |
875 | 884 | #endif |
876 | 885 |
|
| 886 | + #undef preempt_count /* just in case -- not a macro on x86. */ |
| 887 | + #define preempt_count (wolfssl_linuxkm_get_pie_redirect_table()->preempt_count) |
| 888 | + #define _raw_spin_lock_irqsave (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_lock_irqsave) |
| 889 | + #define _raw_spin_trylock (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_trylock) |
| 890 | + #define _raw_spin_unlock_irqrestore (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_unlock_irqrestore) |
| 891 | + #define _cond_resched (wolfssl_linuxkm_get_pie_redirect_table()->_cond_resched) |
| 892 | + |
| 893 | + /* this is defined in linux/spinlock.h as an inline that calls the unshimmed |
| 894 | + * raw_spin_unlock_irqrestore(). use a macro here to supersede it. |
| 895 | + */ |
| 896 | + #define spin_unlock_irqrestore(lock, flags) raw_spin_unlock_irqrestore(&((lock)->rlock), flags) |
| 897 | + |
877 | 898 | #endif /* __PIE__ */ |
878 | 899 |
|
879 | 900 | #endif /* USE_WOLFSSL_LINUXKM_PIE_REDIRECT_TABLE */ |
|
932 | 953 | * above, with the bevy of warnings suppressed, and the below include will |
933 | 954 | * be a redundant no-op. |
934 | 955 | */ |
935 | | - #include <linux/mutex.h> |
936 | | - typedef struct mutex wolfSSL_Mutex; |
937 | | - #define WOLFSSL_MUTEX_INITIALIZER(lockname) __MUTEX_INITIALIZER(lockname) |
| 956 | + |
| 957 | + /* Copied from wc_port.h: For FIPS keep the function names the same */ |
| 958 | + #ifdef HAVE_FIPS |
| 959 | + #define wc_InitMutex InitMutex |
| 960 | + #define wc_FreeMutex FreeMutex |
| 961 | + #define wc_LockMutex LockMutex |
| 962 | + #define wc_UnLockMutex UnLockMutex |
| 963 | + #endif /* HAVE_FIPS */ |
| 964 | + |
| 965 | + #ifdef WOLFSSL_LINUXKM_USE_MUTEXES |
| 966 | + #ifdef LINUXKM_LKCAPI_REGISTER |
| 967 | + /* must use spin locks when registering implementations with the |
| 968 | + * kernel, because mutexes are forbidden when calling with nonzero |
| 969 | + * irq_count(). |
| 970 | + */ |
| 971 | + #error WOLFSSL_LINUXKM_USE_MUTEXES is incompatible with LINUXKM_LKCAPI_REGISTER. |
| 972 | + #endif |
| 973 | + |
| 974 | + #include <linux/mutex.h> |
| 975 | + typedef struct mutex wolfSSL_Mutex; |
| 976 | + #define WOLFSSL_MUTEX_INITIALIZER(lockname) __MUTEX_INITIALIZER(lockname) |
| 977 | + |
| 978 | + /* Linux kernel mutex routines are voids, alas. */ |
| 979 | + |
| 980 | + static inline int wc_InitMutex(wolfSSL_Mutex* m) |
| 981 | + { |
| 982 | + mutex_init(m); |
| 983 | + return 0; |
| 984 | + } |
| 985 | + |
| 986 | + static inline int wc_FreeMutex(wolfSSL_Mutex* m) |
| 987 | + { |
| 988 | + mutex_destroy(m); |
| 989 | + return 0; |
| 990 | + } |
| 991 | + |
| 992 | + static inline int wc_LockMutex(wolfSSL_Mutex* m) |
| 993 | + { |
| 994 | + if (in_nmi() || in_hardirq() || in_softirq()) |
| 995 | + return BAD_STATE_E; |
| 996 | + mutex_lock(m); |
| 997 | + return 0; |
| 998 | + } |
| 999 | + |
| 1000 | + static inline int wc_UnLockMutex(wolfSSL_Mutex* m) |
| 1001 | + { |
| 1002 | + mutex_unlock(m); |
| 1003 | + return 0; |
| 1004 | + } |
| 1005 | + #else |
| 1006 | + typedef struct { |
| 1007 | + spinlock_t lock; |
| 1008 | + unsigned long irq_flags; |
| 1009 | + } wolfSSL_Mutex; |
| 1010 | + #define WOLFSSL_MUTEX_INITIALIZER(lockname) { .lock =__SPIN_LOCK_UNLOCKED(lockname), .irq_flags = 0 } |
| 1011 | + |
| 1012 | + static __always_inline int wc_InitMutex(wolfSSL_Mutex* m) |
| 1013 | + { |
| 1014 | + m->lock = __SPIN_LOCK_UNLOCKED(m); |
| 1015 | + m->irq_flags = 0; |
| 1016 | + |
| 1017 | + return 0; |
| 1018 | + } |
| 1019 | + |
| 1020 | + static __always_inline int wc_FreeMutex(wolfSSL_Mutex* m) |
| 1021 | + { |
| 1022 | + (void)m; |
| 1023 | + return 0; |
| 1024 | + } |
| 1025 | + |
| 1026 | + static __always_inline int wc_LockMutex(wolfSSL_Mutex* m) |
| 1027 | + { |
| 1028 | + unsigned long irq_flags; |
| 1029 | + /* first, try the cheap way. */ |
| 1030 | + if (spin_trylock_irqsave(&m->lock, irq_flags)) { |
| 1031 | + m->irq_flags = irq_flags; |
| 1032 | + return 0; |
| 1033 | + } |
| 1034 | + if (irq_count() != 0) { |
| 1035 | + /* Note, this catches calls while SAVE_VECTOR_REGISTERS()ed as |
| 1036 | + * required, because in_softirq() is always true while saved, |
| 1037 | + * even for WC_FPU_INHIBITED_FLAG contexts. |
| 1038 | + */ |
| 1039 | + spin_lock_irqsave(&m->lock, irq_flags); |
| 1040 | + m->irq_flags = irq_flags; |
| 1041 | + return 0; |
| 1042 | + } |
| 1043 | + else { |
| 1044 | + for (;;) { |
| 1045 | + if (spin_trylock_irqsave(&m->lock, irq_flags)) { |
| 1046 | + m->irq_flags = irq_flags; |
| 1047 | + return 0; |
| 1048 | + } |
| 1049 | + cond_resched(); |
| 1050 | + } |
| 1051 | + } |
| 1052 | + __builtin_unreachable(); |
| 1053 | + } |
| 1054 | + |
| 1055 | + static __always_inline int wc_UnLockMutex(wolfSSL_Mutex* m) |
| 1056 | + { |
| 1057 | + spin_unlock_irqrestore(&m->lock, m->irq_flags); |
| 1058 | + return 0; |
| 1059 | + } |
| 1060 | + |
| 1061 | + #endif |
| 1062 | + |
| 1063 | + /* Undo copied defines from wc_port.h, to avoid redefinition warnings. */ |
| 1064 | + #ifdef HAVE_FIPS |
| 1065 | + #undef wc_InitMutex |
| 1066 | + #undef wc_FreeMutex |
| 1067 | + #undef wc_LockMutex |
| 1068 | + #undef wc_UnLockMutex |
| 1069 | + #endif /* HAVE_FIPS */ |
938 | 1070 |
|
939 | 1071 | /* prevent gcc's mm_malloc.h from being included, since it unconditionally |
940 | 1072 | * includes stdlib.h, which is kernel-incompatible. |
|
953 | 1085 | _alloc_sz; \ |
954 | 1086 | }) |
955 | 1087 | #ifdef HAVE_KVMALLOC |
956 | | - #define malloc(size) kvmalloc_node(WC_LINUXKM_ROUND_UP_P_OF_2(size), GFP_KERNEL, NUMA_NO_NODE) |
| 1088 | + #define malloc(size) kvmalloc_node(WC_LINUXKM_ROUND_UP_P_OF_2(size), GFP_ATOMIC, NUMA_NO_NODE) |
957 | 1089 | #define free(ptr) kvfree(ptr) |
958 | 1090 | void *lkm_realloc(void *ptr, size_t newsize); |
959 | 1091 | #define realloc(ptr, newsize) lkm_realloc(ptr, WC_LINUXKM_ROUND_UP_P_OF_2(newsize)) |
960 | 1092 | #else |
961 | | - #define malloc(size) kmalloc(WC_LINUXKM_ROUND_UP_P_OF_2(size), GFP_KERNEL) |
| 1093 | + #define malloc(size) kmalloc(WC_LINUXKM_ROUND_UP_P_OF_2(size), GFP_ATOMIC) |
962 | 1094 | #define free(ptr) kfree(ptr) |
963 | | - #define realloc(ptr, newsize) krealloc(ptr, WC_LINUXKM_ROUND_UP_P_OF_2(newsize), GFP_KERNEL) |
| 1095 | + #define realloc(ptr, newsize) krealloc(ptr, WC_LINUXKM_ROUND_UP_P_OF_2(newsize), GFP_ATOMIC) |
964 | 1096 | #endif |
965 | 1097 |
|
966 | 1098 | #ifndef static_assert |
|
0 commit comments